repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
drankye/kerb-token
|
krb5/src/tests/t_skew.py
|
1
|
2477
|
#!/usr/bin/python
from k5test import *
# Create a realm with the KDC one hour in the past.
realm = K5Realm(start_kdc=False)
realm.start_kdc(['-T', '-3600'])
# kinit (no preauth) should work, and should set a clock skew allowing
# kvno to work, with or without FAST.
realm.kinit(realm.user_princ, password('user'))
realm.run([kvno, realm.host_princ])
realm.kinit(realm.user_princ, password('user'), flags=['-T', realm.ccache])
realm.run([kvno, realm.host_princ])
realm.run([kdestroy])
# kinit (with preauth) should work, with or without FAST.
realm.run_kadminl('modprinc +requires_preauth user')
realm.kinit(realm.user_princ, password('user'))
realm.run([kvno, realm.host_princ])
realm.kinit(realm.user_princ, password('user'), flags=['-T', realm.ccache])
realm.run([kvno, realm.host_princ])
realm.run([kdestroy])
realm.stop()
# Repeat the above tests with kdc_timesync disabled.
conf = {'libdefaults': {'kdc_timesync': '0'}}
realm = K5Realm(start_kdc=False, krb5_conf=conf)
realm.start_kdc(['-T', '-3600'])
# Get tickets to use for FAST kinit tests. The start time offset is
# ignored by the KDC since we aren't getting postdatable tickets, but
# serves to suppress the client clock skew check on the KDC reply.
fast_cache = realm.ccache + '.fast'
realm.kinit(realm.user_princ, password('user'),
flags=['-s', '-3600s', '-c', fast_cache])
# kinit should detect too much skew in the KDC response. kinit with
# FAST should fail from the KDC since the armor AP-REQ won't be valid.
out = realm.kinit(realm.user_princ, password('user'), expected_code=1)
if 'Clock skew too great in KDC reply' not in out:
fail('Expected error message not seen in kinit skew case')
out = realm.kinit(realm.user_princ, password('user'), flags=['-T', fast_cache],
expected_code=1)
if 'Clock skew too great while' not in out:
fail('Expected error message not seen in kinit FAST skew case')
# kinit (with preauth) should fail from the KDC, with or without FAST.
realm.run_kadminl('modprinc +requires_preauth user')
out = realm.kinit(realm.user_princ, password('user'), expected_code=1)
if 'Clock skew too great while' not in out:
fail('Expected error message not seen in kinit skew case (preauth)')
realm.kinit(realm.user_princ, password('user'), flags=['-T', fast_cache],
expected_code=1)
if 'Clock skew too great while' not in out:
fail('Expected error message not seen in kinit FAST skew case (preauth)')
success('Clock skew tests')
|
apache-2.0
| 3,318,160,882,515,889,000 | 41.706897 | 79 | 0.70973 | false |
xorpd/idsearch
|
idsearch/tests/test_func_iter.py
|
1
|
2143
|
import unittest
from idsearch.func_iter import FuncIter
class TestSearchDB(unittest.TestCase):
def test_basic(self):
my_iter = (i for i in range(5))
fiter = FuncIter(my_iter)
self.assertEqual(list(fiter), [0,1,2,3,4])
def test_map(self):
my_iter = (i for i in range(5))
fiter = FuncIter(my_iter)
new_fiter = fiter.map(lambda x:x*x)
self.assertEqual(list(new_fiter), [0,1,4,9,16])
def test_all(self):
my_iter = (i for i in range(5))
fiter = FuncIter(my_iter)
new_fiter = fiter.all(lambda x:x>-1)
self.assertEqual(new_fiter, True)
my_iter = (i for i in range(5))
fiter = FuncIter(my_iter)
new_fiter = fiter.all(lambda x:x>2)
self.assertEqual(new_fiter, False)
def test_any(self):
my_iter = (i for i in range(5))
fiter = FuncIter(my_iter)
new_fiter = fiter.any(lambda x:x>-1)
self.assertEqual(new_fiter, True)
my_iter = (i for i in range(5))
fiter = FuncIter(my_iter)
new_fiter = fiter.any(lambda x:x>2)
self.assertEqual(new_fiter, True)
my_iter = (i for i in range(5))
fiter = FuncIter(my_iter)
new_fiter = fiter.any(lambda x:x>6)
self.assertEqual(new_fiter, False)
def test_filter(self):
my_iter = (i for i in range(5))
fiter = FuncIter(my_iter)
new_fiter = fiter.filter(lambda x:(x%2)==0)
self.assertEqual(list(new_fiter), [0,2,4])
def test_unique(self):
my_iter = (i for i in [1,5,5,2,5,3,1])
fiter = FuncIter(my_iter)
new_fiter = fiter.unique(lambda x:x)
self.assertEqual(list(new_fiter), [1,5,2,3])
def test_double_map(self):
my_iter = (i for i in range(5))
fiter = FuncIter(my_iter)
new_fiter = fiter.map(lambda x:x*x).map(lambda x:-x)
self.assertEqual(list(new_fiter), [0,-1,-4,-9,-16])
def test_load_from_list(self):
my_iter = [0,1,2,3,4]
fiter = FuncIter(my_iter)
new_fiter = fiter.map(lambda x:x*x)
self.assertEqual(list(new_fiter), [0,1,4,9,16])
|
gpl-3.0
| -1,785,624,968,993,354,800 | 31.469697 | 60 | 0.569295 | false |
martinhoefling/fretutils
|
lib/FRETUtils/Ensemble.py
|
1
|
2936
|
'''
Created on 24.06.2010
@author: mhoefli
'''
import random
import re
def readProbabilities(pbfile):
"""Reads in probabilities from a specific probability file, 1st colum is the regexp to match, 2nd the name and 3rd the probability in the ensemble"""
with open(pbfile) as pfh:
probabilities = []
print
for line in pfh:
spl = line.split()
if len(spl) == 0:
continue
if len(spl) != 3:
print "Line split is :", spl
raise ValueError("Line in probability file has not 3 entries.")
probabilities.append((spl[1], re.compile(spl[0]), float(spl[2])))
print "Found ensemble class", spl[1], "with probability %6.4f." % (float(spl[2]))
print
return probabilities
def assignTrajProbabilityClasses(trajs, probabilities):
"""applies classes to a trajectory dictionary with read in and compiled probabilities"""
keys = trajs.keys()
keys.sort()
for key in keys:
for pclass in probabilities:
myclass = None
if pclass[1].search(key):
myclass = pclass[0]
break
if not myclass:
raise ValueError("Cannot assign a probability class to simulation %s" % key)
trajs[key]["species"] = myclass
print "Assigned species", myclass, "to trajectory \"%s\"." % key
def getClassTrajCount(myclass, trajs):
"""counts trajectories of a distinct class"""
counter = 0
for trajk in trajs.keys():
if trajs[trajk]["species"] == myclass:
counter += 1
return counter
def getClassTrajSamples(myclass,trajs):
"""counts the samples from all trajectories of a distinct class"""
counter=0
for trajk in trajs.keys():
if trajs[trajk]["species"] == myclass:
counter+=trajs[trajk]["length"]
return counter
def getClassTrajWeight(key,trajs):
myclass = trajs[key]["species"]
classsamples = getClassTrajSamples(myclass,trajs)
return float(trajs[key]["length"])/classsamples
def cleanProbabilities(trajs, probs):
"""removes all probability classes from probability dictionary which do not have any trajectory"""
newprobs = []
for prob in probs:
if getClassTrajCount(prob[0], trajs) > 0:
newprobs.append(prob)
return newprobs
def pickFromEnsemble(eprob):
"""returns a class from ensemble according to its probability, this function (re-) normalizes the probability if necessary"""
epsum = 0.
for pr in eprob:
epsum += pr[2]
rnd = random.random() * epsum
epsum = 0
for pr in eprob:
epsum += pr[2]
if rnd < epsum:
return pr[0]
def getTrajClassProbability(traj, probabilities):
"""returns the class probability of a distinct trajectory"""
for pr in probabilities:
if pr[0] == traj["species"]:
return pr[2]
|
bsd-3-clause
| 5,670,931,972,478,937,000 | 30.913043 | 153 | 0.620913 | false |
jzmnd/fet-py-scripts
|
agilent4155_matlab_output_param.py
|
1
|
3478
|
#! /usr/bin/env python
"""
agilent4155_matlab_output_param.py
Prarmeter extractor for matlab generated .xlsx idvd files
Created by Jeremy Smith on 2015-10-29
University of California, Berkeley
j-smith@eecs.berkeley.edu
"""
import os
import sys
import xlrd
import numpy as np
import myfunctions as mf
from scipy import stats
__author__ = "Jeremy Smith"
__version__ = "1.0"
data_path = os.path.dirname(__file__) # Path name for location of script
files = os.listdir(data_path) # All files in directory
data_summary = []
summary_list_header = [["filename", "channelL", "channelW"]]
fitrange = 3
def main():
"""Main function"""
print "\nBatch importing .xlsx files..."
print data_path, '\n'
for f in files:
print f
# Loops through all transfer files
if "IDVD.xlsx" in f:
workbook = xlrd.open_workbook(f, logfile=open(os.devnull, 'w'))
for dev in workbook.sheet_names():
if "Sheet" in dev:
continue
print " - device {:s}".format(dev)
datasheet = workbook.sheet_by_name(dev)
run_numbers = [str(int(x)) for x in datasheet.row_values(2) if x]
stepvg = 0
for i, run in enumerate(run_numbers):
print " - run {:s}".format(run)
data = {}
gdlin = []
gdsat = []
vg_list = []
# File name for outputs
outname = f[:-5] + '_' + dev + '_' + run
# Constant parameters taken from header
vgmin = float(datasheet.cell_value(3, (stepvg + 2)*i + 1))
vgmax = float(datasheet.cell_value(4, (stepvg + 2)*i + 1))
stepvg_prev = stepvg
stepvg = int(datasheet.cell_value(5, (stepvg + 2)*i + 1))
chl = float(datasheet.cell_value(1, 1))
chw = float(datasheet.cell_value(0, 1))
tox = float(datasheet.cell_value(1, 3))
kox = float(datasheet.cell_value(0, 3))
ldr = float(datasheet.cell_value(1, 5))
lso = float(datasheet.cell_value(0, 5))
ci = 8.85418782e-7*kox/tox
colheads = ['VDS'] + ["ID{:d}".format(x + 1) for x in range(stepvg)]
for h in colheads:
data[h] = []
for row in range(datasheet.nrows - 11 - stepvg):
for col, h in enumerate(colheads):
if datasheet.cell_type(9 + row, (stepvg_prev + 2)*i + col) is 0:
continue
data[h].append(float(datasheet.cell_value(9 + row, (stepvg_prev + 2)*i + col)))
vds = np.array(data['VDS'])
output_list = [vds]
for j in range(stepvg):
ids = np.array(data["ID{:d}".format(j + 1)])
# Fits to first data points (given by fitrange) i.e. linear
slope, intercept, r_value, p_value, std_err = stats.linregress(vds[:fitrange], ids[:fitrange])
gdlin.append(slope)
# Fits to last data points (given by fitrange) i.e. saturation
slope, intercept, r_value, p_value, std_err = stats.linregress(vds[-fitrange:], ids[-fitrange:])
gdsat.append(slope)
# Update lists
output_list.append(ids)
vg_list.append(vgmin + j*(vgmax-vgmin)/(stepvg-1))
# Output data
data_summary.append([outname, chl, chw])
# Ouput files
mf.dataOutputGen(outname+"_output.txt", data_path, map(list, zip(*output_list)))
mf.dataOutputHead(outname+"_gm.txt", data_path, [vg_list, gdlin, gdsat], [['VG', 'GDlin', 'GDsat']],
format_d="%.2f\t %.5e\t %.5e\n",
format_h="%s\t")
mf.dataOutputHead("SUMMARY_OUT.txt", data_path, map(list, zip(*data_summary)), summary_list_header,
format_d="%s\t %.1f\t %.1f\n",
format_h="%s\t")
return
if __name__ == "__main__":
sys.exit(main())
|
mit
| 7,935,849,942,573,224,000 | 29.508772 | 106 | 0.616734 | false |
embray/numpy
|
numpy/core/tests/test_indexing.py
|
1
|
35320
|
from __future__ import division, absolute_import, print_function
import sys
import warnings
import functools
import numpy as np
from numpy.core.multiarray_tests import array_indexing
from itertools import product
from numpy.testing import *
try:
cdll = np.ctypeslib.load_library('multiarray', np.core.multiarray.__file__)
_HAS_CTYPE = True
except ImportError:
_HAS_CTYPE = False
class TestIndexing(TestCase):
def test_none_index(self):
# `None` index adds newaxis
a = np.array([1, 2, 3])
assert_equal(a[None], a[np.newaxis])
assert_equal(a[None].ndim, a.ndim + 1)
def test_empty_tuple_index(self):
# Empty tuple index creates a view
a = np.array([1, 2, 3])
assert_equal(a[()], a)
assert_(a[()].base is a)
a = np.array(0)
assert_(isinstance(a[()], np.int_))
# Regression, it needs to fall through integer and fancy indexing
# cases, so need the with statement to ignore the non-integer error.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', '', DeprecationWarning)
a = np.array([1.])
assert_(isinstance(a[0.], np.float_))
a = np.array([np.array(1)], dtype=object)
assert_(isinstance(a[0.], np.ndarray))
def test_same_kind_index_casting(self):
# Indexes should be cast with same-kind and not safe, even if
# that is somewhat unsafe. So test various different code paths.
index = np.arange(5)
u_index = index.astype(np.uintp)
arr = np.arange(10)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)
assert_array_equal(arr, np.arange(10))
arr = np.arange(10).reshape(5, 2)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)[:,None]
assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1))
arr = np.arange(25).reshape(5, 5)
assert_array_equal(arr[u_index, u_index], arr[index, index])
def test_empty_fancy_index(self):
# Empty list index creates an empty array
# with the same dtype (but with weird shape)
a = np.array([1, 2, 3])
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([], dtype=np.intp)
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([])
assert_raises(IndexError, a.__getitem__, b)
def test_ellipsis_index(self):
# Ellipsis index does not create a view
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[...], a)
assert_(a[...].base is a) # `a[...]` was `a` in numpy <1.9.)
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
assert_equal(a[0, ...], a[0])
assert_equal(a[0, ...], a[0,:])
assert_equal(a[..., 0], a[:, 0])
# Slicing with ellipsis always results
# in an array, not a scalar
assert_equal(a[0, ..., 1], np.array(2))
# Assignment with `(Ellipsis,)` on 0-d arrays
b = np.array(1)
b[(Ellipsis,)] = 2
assert_equal(b, 2)
def test_single_int_index(self):
# Single integer index selects one row
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[0], [1, 2, 3])
assert_equal(a[-1], [7, 8, 9])
# Index out of bounds produces IndexError
assert_raises(IndexError, a.__getitem__, 1<<30)
# Index overflow produces IndexError
assert_raises(IndexError, a.__getitem__, 1<<64)
def test_single_bool_index(self):
# Single boolean index
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
# Python boolean converts to integer
# These are being deprecated (and test in test_deprecations)
#assert_equal(a[True], a[1])
#assert_equal(a[False], a[0])
# Same with NumPy boolean scalar
# Before DEPRECATE, this is an error (as always, but telling about
# future change):
assert_raises(IndexError, a.__getitem__, np.array(True))
assert_raises(IndexError, a.__getitem__, np.array(False))
# After DEPRECATE, this behaviour can be enabled:
#assert_equal(a[np.array(True)], a[None])
#assert_equal(a[np.array(False), a[None][0:0]])
def test_boolean_indexing_onedim(self):
# Indexing a 2-dimensional array with
# boolean array of length one
a = np.array([[ 0., 0., 0.]])
b = np.array([ True], dtype=bool)
assert_equal(a[b], a)
# boolean assignment
a[b] = 1.
assert_equal(a, [[1., 1., 1.]])
def test_boolean_assignment_value_mismatch(self):
# A boolean assignment should fail when the shape of the values
# cannot be broadcasted to the subscription. (see also gh-3458)
a = np.arange(4)
def f(a, v):
a[a > -1] = v
assert_raises(ValueError, f, a, [])
assert_raises(ValueError, f, a, [1, 2, 3])
assert_raises(ValueError, f, a[:1], [1, 2, 3])
def test_boolean_indexing_twodim(self):
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
b = np.array([[ True, False, True],
[False, True, False],
[ True, False, True]])
assert_equal(a[b], [1, 3, 5, 7, 9])
assert_equal(a[b[1]], [[4, 5, 6]])
assert_equal(a[b[0]], a[b[2]])
# boolean assignment
a[b] = 0
assert_equal(a, [[0, 2, 0],
[4, 0, 6],
[0, 8, 0]])
def test_reverse_strides_and_subspace_bufferinit(self):
# This tests that the strides are not reversed for simple and
# subspace fancy indexing.
a = np.ones(5)
b = np.zeros(5, dtype=np.intp)[::-1]
c = np.arange(5)[::-1]
a[b] = c
# If the strides are not reversed, the 0 in the arange comes last.
assert_equal(a[0], 0)
# This also tests that the subspace buffer is initiliazed:
a = np.ones((5, 2))
c = np.arange(10).reshape(5, 2)[::-1]
a[b, :] = c
assert_equal(a[0], [0, 1])
def test_uncontiguous_subspace_assignment(self):
# During development there was a bug activating a skip logic
# based on ndim instead of size.
a = np.full((3, 4, 2), -1)
b = np.full((3, 4, 2), -1)
a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T
b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()
assert_equal(a, b)
def test_too_many_fancy_indices_special_case(self):
# Just documents behaviour, this is a small limitation.
a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS
assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)
def test_scalar_array_bool(self):
# Numpy bools can be used as boolean index (python ones as of yet not)
a = np.array(1)
assert_equal(a[np.bool_(True)], a[np.array(True)])
assert_equal(a[np.bool_(False)], a[np.array(False)])
# After deprecating bools as integers:
#a = np.array([0,1,2])
#assert_equal(a[True, :], a[None, :])
#assert_equal(a[:, True], a[:, None])
#
#assert_(not np.may_share_memory(a, a[True, :]))
def test_everything_returns_views(self):
# Before `...` would return a itself.
a = np.arange(5)
assert_(a is not a[()])
assert_(a is not a[...])
assert_(a is not a[:])
def test_broaderrors_indexing(self):
a = np.zeros((5, 5))
assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))
assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)
def test_trivial_fancy_out_of_bounds(self):
a = np.zeros(5)
ind = np.ones(20, dtype=np.intp)
ind[-1] = 10
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
ind = np.ones(20, dtype=np.intp)
ind[0] = 11
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
def test_nonbaseclass_values(self):
class SubClass(np.ndarray):
def __array_finalize__(self, old):
# Have array finalize do funny things
self.fill(99)
a = np.zeros((5, 5))
s = a.copy().view(type=SubClass)
s.fill(1)
a[[0, 1, 2, 3, 4], :] = s
assert_((a == 1).all())
# Subspace is last, so transposing might want to finalize
a[:, [0, 1, 2, 3, 4]] = s
assert_((a == 1).all())
a.fill(0)
a[...] = s
assert_((a == 1).all())
def test_memory_order(self):
# This is not necessary to preserve. Memory layouts for
# more complex indices are not as simple.
a = np.arange(10)
b = np.arange(10).reshape(5,2).T
assert_(a[b].flags.f_contiguous)
# Takes a different implementation branch:
a = a.reshape(-1, 1)
assert_(a[b, 0].flags.f_contiguous)
def test_scalar_return_type(self):
# Full scalar indices should return scalars and object
# arrays should not call PyArray_Return on their items
class Zero(object):
# The most basic valid indexing
def __index__(self):
return 0
z = Zero()
class ArrayLike(object):
# Simple array, should behave like the array
def __array__(self):
return np.array(0)
a = np.zeros(())
assert_(isinstance(a[()], np.float_))
a = np.zeros(1)
assert_(isinstance(a[z], np.float_))
a = np.zeros((1, 1))
assert_(isinstance(a[z, np.array(0)], np.float_))
assert_(isinstance(a[z, ArrayLike()], np.float_))
# And object arrays do not call it too often:
b = np.array(0)
a = np.array(0, dtype=object)
a[()] = b
assert_(isinstance(a[()], np.ndarray))
a = np.array([b, None])
assert_(isinstance(a[z], np.ndarray))
a = np.array([[b, None]])
assert_(isinstance(a[z, np.array(0)], np.ndarray))
assert_(isinstance(a[z, ArrayLike()], np.ndarray))
def test_small_regressions(self):
# Reference count of intp for index checks
a = np.array([0])
refcount = sys.getrefcount(np.dtype(np.intp))
# item setting always checks indices in seperate function:
a[np.array([0], dtype=np.intp)] = 1
a[np.array([0], dtype=np.uint8)] = 1
assert_raises(IndexError, a.__setitem__,
np.array([1], dtype=np.intp), 1)
assert_raises(IndexError, a.__setitem__,
np.array([1], dtype=np.uint8), 1)
assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
class TestFieldIndexing(TestCase):
def test_scalar_return_type(self):
# Field access on an array should return an array, even if it
# is 0-d.
a = np.zeros((), [('a','f8')])
assert_(isinstance(a['a'], np.ndarray))
assert_(isinstance(a[['a']], np.ndarray))
class TestBroadcastedAssignments(TestCase):
def assign(self, a, ind, val):
a[ind] = val
return a
def test_prepending_ones(self):
a = np.zeros((3, 2))
a[...] = np.ones((1, 3, 2))
# Fancy with subspace with and without transpose
a[[0, 1, 2], :] = np.ones((1, 3, 2))
a[:, [0, 1]] = np.ones((1, 3, 2))
# Fancy without subspace (with broadcasting)
a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2))
def test_prepend_not_one(self):
assign = self.assign
s_ = np.s_
a = np.zeros(5)
# Too large and not only ones.
assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1)))
assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1)))
assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1)))
def test_simple_broadcasting_errors(self):
assign = self.assign
s_ = np.s_
a = np.zeros((5, 1))
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))
assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))
def test_index_is_larger(self):
# Simple case of fancy index broadcasting of the index.
a = np.zeros((5, 5))
a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4]
assert_((a[:3, :3] == [2, 3, 4]).all())
def test_broadcast_subspace(self):
a = np.zeros((100, 100))
v = np.arange(100)[:,None]
b = np.arange(100)[::-1]
a[b] = v
assert_((a[::-1] == v).all())
class TestSubclasses(TestCase):
def test_basic(self):
class SubClass(np.ndarray):
pass
s = np.arange(5).view(SubClass)
assert_(isinstance(s[:3], SubClass))
assert_(s[:3].base is s)
assert_(isinstance(s[[0, 1, 2]], SubClass))
assert_(isinstance(s[s > 0], SubClass))
def test_matrix_fancy(self):
# The matrix class messes with the shape. While this is always
# weird (getitem is not used, it does not have setitem nor knows
# about fancy indexing), this tests gh-3110
m = np.matrix([[1, 2], [3, 4]])
assert_(isinstance(m[[0,1,0], :], np.matrix))
# gh-3110. Note the transpose currently because matrices do *not*
# support dimension fixing for fancy indexing correctly.
x = np.asmatrix(np.arange(50).reshape(5,10))
assert_equal(x[:2, np.array(-1)], x[:2, -1].T)
def test_finalize_gets_full_info(self):
# Array finalize should be called on the filled array.
class SubClass(np.ndarray):
def __array_finalize__(self, old):
self.finalize_status = np.array(self)
self.old = old
s = np.arange(10).view(SubClass)
new_s = s[:3]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
new_s = s[[0,1,2,3]]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
new_s = s[s > 0]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
class TestFancyIndexingEquivalence(TestCase):
def test_object_assign(self):
# Check that the field and object special case using copyto is active.
# The right hand side cannot be converted to an array here.
a = np.arange(5, dtype=object)
b = a.copy()
a[:3] = [1, (1,2), 3]
b[[0, 1, 2]] = [1, (1,2), 3]
assert_array_equal(a, b)
# test same for subspace fancy indexing
b = np.arange(5, dtype=object)[None, :]
b[[0], :3] = [[1, (1,2), 3]]
assert_array_equal(a, b[0])
def test_cast_equivalence(self):
# Yes, normal slicing uses unsafe casting.
a = np.arange(5)
b = a.copy()
a[:3] = np.array(['2', '-3', '-1'])
b[[0, 2, 1]] = np.array(['2', '-1', '-3'])
assert_array_equal(a, b)
# test the same for subspace fancy indexing
b = np.arange(5)[None, :]
b[[0], :3] = np.array([['2', '-3', '-1']])
assert_array_equal(a, b[0])
class TestMultiIndexingAutomated(TestCase):
"""
These test use code to mimic the C-Code indexing for selection.
NOTE: * This still lacks tests for complex item setting.
* If you change behavoir of indexing, you might want to modify
these tests to try more combinations.
* Behavior was written to match numpy version 1.8. (though a
first version matched 1.7.)
* Only tuple indicies are supported by the mimicing code.
(and tested as of writing this)
* Error types should match most of the time as long as there
is only one error. For multiple errors, what gets raised
will usually not be the same one. They are *not* tested.
"""
def setUp(self):
self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
self.b = np.empty((3, 0, 5, 6))
self.complex_indices = ['skip', Ellipsis,
0,
# Boolean indices, up to 3-d for some special cases of eating up
# dimensions, also need to test all False
np.array(False),
np.array([True, False, False]),
np.array([[True, False], [False, True]]),
np.array([[[False, False], [False, False]]]),
# Some slices:
slice(-5, 5, 2),
slice(1, 1, 100),
slice(4, -1, -2),
slice(None, None, -3),
# Some Fancy indexes:
np.empty((0, 1, 1), dtype=np.intp), # empty broadcastable
np.array([0, 1, -2]),
np.array([[2], [0], [1]]),
np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),
np.array([2, -1], dtype=np.int8),
np.zeros([1]*31, dtype=int), # trigger too large array.
np.array([0., 1.])] # invalid datatype
# Some simpler indices that still cover a bit more
self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip']
# Very simple ones to fill the rest:
self.fill_indices = [slice(None, None), 0]
def _get_multi_index(self, arr, indices):
"""Mimic multi dimensional indexing.
Parameters
----------
arr : ndarray
Array to be indexed.
indices : tuple of index objects
Returns
-------
out : ndarray
An array equivalent to the indexing operation (but always a copy).
`arr[indices]` should be identical.
no_copy : bool
Whether the indexing operation requires a copy. If this is `True`,
`np.may_share_memory(arr, arr[indicies])` should be `True` (with
some exceptions for scalars and possibly 0-d arrays).
Notes
-----
While the function may mostly match the errors of normal indexing this
is generally not the case.
"""
in_indices = list(indices)
indices = []
# if False, this is a fancy or boolean index
no_copy = True
# number of fancy/scalar indexes that are not consecutive
num_fancy = 0
# number of dimensions indexed by a "fancy" index
fancy_dim = 0
# NOTE: This is a funny twist (and probably OK to change).
# The boolean array has illegal indexes, but this is
# allowed if the broadcasted fancy-indices are 0-sized.
# This variable is to catch that case.
error_unless_broadcast_to_empty = False
# We need to handle Ellipsis and make arrays from indices, also
# check if this is fancy indexing (set no_copy).
ndim = 0
ellipsis_pos = None # define here mostly to replace all but first.
for i, indx in enumerate(in_indices):
if indx is None:
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
no_copy = False
if indx.ndim == 0:
raise IndexError
# boolean indices can have higher dimensions
ndim += indx.ndim
fancy_dim += indx.ndim
continue
if indx is Ellipsis:
if ellipsis_pos is None:
ellipsis_pos = i
continue # do not increment ndim counter
raise IndexError
if isinstance(indx, slice):
ndim += 1
continue
if not isinstance(indx, np.ndarray):
# This could be open for changes in numpy.
# numpy should maybe raise an error if casting to intp
# is not safe. It rejects np.array([1., 2.]) but not
# [1., 2.] as index (same for ie. np.take).
# (Note the importance of empty lists if changing this here)
indx = np.array(indx, dtype=np.intp)
in_indices[i] = indx
elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
raise IndexError('arrays used as indices must be of integer (or boolean) type')
if indx.ndim != 0:
no_copy = False
ndim += 1
fancy_dim += 1
if arr.ndim - ndim < 0:
# we can't take more dimensions then we have, not even for 0-d arrays.
# since a[()] makes sense, but not a[(),]. We will raise an error
# lateron, unless a broadcasting error occurs first.
raise IndexError
if ndim == 0 and not None in in_indices:
# Well we have no indexes or one Ellipsis. This is legal.
return arr.copy(), no_copy
if ellipsis_pos is not None:
in_indices[ellipsis_pos:ellipsis_pos+1] = [slice(None, None)] * (arr.ndim - ndim)
for ax, indx in enumerate(in_indices):
if isinstance(indx, slice):
# convert to an index array anways:
indx = np.arange(*indx.indices(arr.shape[ax]))
indices.append(['s', indx])
continue
elif indx is None:
# this is like taking a slice with one element from a new axis:
indices.append(['n', np.array([0], dtype=np.intp)])
arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:]))
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
# This may be open for improvement in numpy.
# numpy should probably cast boolean lists to boolean indices
# instead of intp!
# Numpy supports for a boolean index with
# non-matching shape as long as the True values are not
# out of bounds. Numpy maybe should maybe not allow this,
# (at least not array that are larger then the original one).
try:
flat_indx = np.ravel_multi_index(np.nonzero(indx),
arr.shape[ax:ax+indx.ndim], mode='raise')
except:
error_unless_broadcast_to_empty = True
# fill with 0s instead, and raise error later
flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
# concatenate axis into a single one:
if indx.ndim != 0:
arr = arr.reshape((arr.shape[:ax]
+ (np.prod(arr.shape[ax:ax+indx.ndim]),)
+ arr.shape[ax+indx.ndim:]))
indx = flat_indx
else:
# This could be changed, a 0-d boolean index can
# make sense (even outide the 0-d indexed array case)
# Note that originally this is could be interpreted as
# integer in the full integer special case.
raise IndexError
else:
# If the index is a singleton, the bounds check is done
# before the broadcasting. This used to be different in <1.9
if indx.ndim == 0:
if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
raise IndexError
if indx.ndim == 0:
# The index is a scalar. This used to be two fold, but if fancy
# indexing was active, the check was done later, possibly
# after broadcasting it away (1.7. or earlier). Now it is always
# done.
if indx >= arr.shape[ax] or indx < - arr.shape[ax]:
raise IndexError
if len(indices) > 0 and indices[-1][0] == 'f' and ax != ellipsis_pos:
# NOTE: There could still have been a 0-sized Ellipsis
# between them. Checked that with ellipsis_pos.
indices[-1].append(indx)
else:
# We have a fancy index that is not after an existing one.
# NOTE: A 0-d array triggers this as well, while
# one may expect it to not trigger it, since a scalar
# would not be considered fancy indexing.
num_fancy += 1
indices.append(['f', indx])
if num_fancy > 1 and not no_copy:
# We have to flush the fancy indexes left
new_indices = indices[:]
axes = list(range(arr.ndim))
fancy_axes = []
new_indices.insert(0, ['f'])
ni = 0
ai = 0
for indx in indices:
ni += 1
if indx[0] == 'f':
new_indices[0].extend(indx[1:])
del new_indices[ni]
ni -= 1
for ax in range(ai, ai + len(indx[1:])):
fancy_axes.append(ax)
axes.remove(ax)
ai += len(indx) - 1 # axis we are at
indices = new_indices
# and now we need to transpose arr:
arr = arr.transpose(*(fancy_axes + axes))
# We only have one 'f' index now and arr is transposed accordingly.
# Now handle newaxes by reshaping...
ax = 0
for indx in indices:
if indx[0] == 'f':
if len(indx) == 1:
continue
# First of all, reshape arr to combine fancy axes into one:
orig_shape = arr.shape
orig_slice = orig_shape[ax:ax + len(indx[1:])]
arr = arr.reshape((arr.shape[:ax]
+ (np.prod(orig_slice).astype(int),)
+ arr.shape[ax + len(indx[1:]):]))
# Check if broadcasting works
if len(indx[1:]) != 1:
res = np.broadcast(*indx[1:]) # raises ValueError...
else:
res = indx[1]
# unfortunatly the indices might be out of bounds. So check
# that first, and use mode='wrap' then. However only if
# there are any indices...
if res.size != 0:
if error_unless_broadcast_to_empty:
raise IndexError
for _indx, _size in zip(indx[1:], orig_slice):
if _indx.size == 0:
continue
if np.any(_indx >= _size) or np.any(_indx < -_size):
raise IndexError
if len(indx[1:]) == len(orig_slice):
if np.product(orig_slice) == 0:
# Work around for a crash or IndexError with 'wrap'
# in some 0-sized cases.
try:
mi = np.ravel_multi_index(indx[1:], orig_slice, mode='raise')
except:
# This happens with 0-sized orig_slice (sometimes?)
# here it is a ValueError, but indexing gives a:
raise IndexError('invalid index into 0-sized')
else:
mi = np.ravel_multi_index(indx[1:], orig_slice, mode='wrap')
else:
# Maybe never happens...
raise ValueError
arr = arr.take(mi.ravel(), axis=ax)
arr = arr.reshape((arr.shape[:ax]
+ mi.shape
+ arr.shape[ax+1:]))
ax += mi.ndim
continue
# If we are here, we have a 1D array for take:
arr = arr.take(indx[1], axis=ax)
ax += 1
return arr, no_copy
def _check_multi_index(self, arr, index):
"""Check a multi index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be a reshaped arange.
index : tuple of indexing objects
Index being tested.
"""
# Test item getting
try:
mimic_get, no_copy = self._get_multi_index(arr, index)
except Exception as e:
prev_refcount = sys.getrefcount(arr)
assert_raises(Exception, arr.__getitem__, index)
assert_raises(Exception, arr.__setitem__, index, 0)
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _check_single_index(self, arr, index):
"""Check a single index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be an arange.
index : indexing object
Index being tested. Must be a single index and not a tuple
of indexing objects (see also `_check_multi_index`).
"""
try:
mimic_get, no_copy = self._get_multi_index(arr, (index,))
except Exception as e:
prev_refcount = sys.getrefcount(arr)
assert_raises(Exception, arr.__getitem__, index)
assert_raises(Exception, arr.__setitem__, index, 0)
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _compare_index_result(self, arr, index, mimic_get, no_copy):
"""Compare mimicked result to indexing result.
"""
arr = arr.copy()
indexed_arr = arr[index]
assert_array_equal(indexed_arr, mimic_get)
# Check if we got a view, unless its a 0-sized or 0-d array.
# (then its not a view, and that does not matter)
if indexed_arr.size != 0 and indexed_arr.ndim != 0:
assert_(np.may_share_memory(indexed_arr, arr) == no_copy)
# Check reference count of the original array
if no_copy:
# refcount increases by one:
assert_equal(sys.getrefcount(arr), 3)
else:
assert_equal(sys.getrefcount(arr), 2)
# Test non-broadcast setitem:
b = arr.copy()
b[index] = mimic_get + 1000
if b.size == 0:
return # nothing to compare here...
if no_copy and indexed_arr.ndim != 0:
# change indexed_arr in-place to manipulate original:
indexed_arr += 1000
assert_array_equal(arr, b)
return
# Use the fact that the array is originally an arange:
arr.flat[indexed_arr.ravel()] += 1000
assert_array_equal(arr, b)
def test_boolean(self):
a = np.array(5)
assert_equal(a[np.array(True)], 5)
a[np.array(True)] = 1
assert_equal(a, 1)
# NOTE: This is different from normal broadcasting, as
# arr[boolean_array] works like in a multi index. Which means
# it is aligned to the left. This is probably correct for
# consistency with arr[boolean_array,] also no broadcasting
# is done at all
self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool),))
self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
def test_multidim(self):
# Automatically test combinations with complex indexes on 2nd (or 1st)
# spot and the simple ones in one other spot.
with warnings.catch_warnings():
# This is so that np.array(True) is not accepted in a full integer
# index, when running the file seperatly.
warnings.filterwarnings('error', '', DeprecationWarning)
for simple_pos in [0, 2, 3]:
tocheck = [self.fill_indices, self.complex_indices,
self.fill_indices, self.fill_indices]
tocheck[simple_pos] = self.simple_indices
for index in product(*tocheck):
index = tuple(i for i in index if i != 'skip')
self._check_multi_index(self.a, index)
self._check_multi_index(self.b, index)
# Check very simple item getting:
self._check_multi_index(self.a, (0, 0, 0, 0))
self._check_multi_index(self.b, (0, 0, 0, 0))
# Also check (simple cases of) too many indices:
assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0)
assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0)
def test_1d(self):
a = np.arange(10)
with warnings.catch_warnings():
warnings.filterwarnings('error', '', DeprecationWarning)
for index in self.complex_indices:
self._check_single_index(a, index)
class TestCApiAccess(TestCase):
def test_getitem(self):
subscript = functools.partial(array_indexing, 0)
# 0-d arrays don't work:
assert_raises(IndexError, subscript, np.ones(()), 0)
# Out of bound values:
assert_raises(IndexError, subscript, np.ones(10), 11)
assert_raises(IndexError, subscript, np.ones(10), -11)
assert_raises(IndexError, subscript, np.ones((10, 10)), 11)
assert_raises(IndexError, subscript, np.ones((10, 10)), -11)
a = np.arange(10)
assert_array_equal(a[4], subscript(a, 4))
a = a.reshape(5, 2)
assert_array_equal(a[-4], subscript(a, -4))
def test_setitem(self):
assign = functools.partial(array_indexing, 1)
# Deletion is impossible:
assert_raises(ValueError, assign, np.ones(10), 0)
# 0-d arrays don't work:
assert_raises(IndexError, assign, np.ones(()), 0, 0)
# Out of bound values:
assert_raises(IndexError, assign, np.ones(10), 11, 0)
assert_raises(IndexError, assign, np.ones(10), -11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0)
a = np.arange(10)
assign(a, 4, 10)
assert_(a[4] == 10)
a = a.reshape(5, 2)
assign(a, 4, 10)
assert_array_equal(a[-1], [10, 10])
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
| 1,670,695,179,522,248,700 | 36.856377 | 95 | 0.526416 | false |
fcelda/cds-monitor
|
monitor.py
|
1
|
2054
|
#!/usr/bin/env python3
import logging
import sched
import time
import cdsmon.source
import cdsmon.fetch
import cdsmon.update
class Monitor:
def __init__(self, source, fetch, update):
self._source = source
self._fetch = fetch
self._update = update
self._next = 0
def exec(self):
time_start = time.time()
for zone, ds_list in self._source.get_delegations():
for ds in ds_list:
logging.debug("%s, DS '%s'" % (zone, ds))
cds_list = self._fetch.get_cds(zone)
for cds in cds_list:
logging.debug("%s, CDS '%s'" % (zone, cds))
if len(cds_list) == 0:
logging.info("%s, CDS not present" % zone)
continue
if not sorted(ds_list) != sorted(cds_list):
logging.info("%s, is up-to-date" % zone)
else:
logging.info("%s, sending update" % zone)
self._update.update_ds(zone, cds_list)
self._next = time_start + self._source.refresh_seconds()
def next(self):
return self._next
# setup logging
logging_format = "%(asctime)s %(levelname)s: %(message)s"
logging.basicConfig(format=logging_format, level=logging.DEBUG)
# setup interfaces
axfr_source = cdsmon.source.AXFRSource("example.com", "::2")
#zone_source = cdsmon.source.ZonefileSource("example.com", "examples/zones/example.com.zone")
import dns.resolver
resolver = dns.resolver.Resolver(configure=False)
resolver.nameservers = [ "::1" ]
resolver.port = 53000
cds_fetch = cdsmon.fetch.ResolverFetch(resolver)
ds_update = cdsmon.update.DDNSUpdate("::2", 53, "example.com", 10)
# execution loop
loop = sched.scheduler()
monitor = Monitor(axfr_source, cds_fetch, ds_update)
def run_and_reschedule():
monitor.exec()
next_abs = monitor.next()
next_rel = max(0, next_abs - time.time())
logging.debug("refresh in %.2f seconds" % next_rel)
loop.enter(next_rel, 0, run_and_reschedule)
loop.enter(0, 0, run_and_reschedule)
loop.run()
|
gpl-3.0
| -3,210,956,226,077,122,600 | 27.929577 | 93 | 0.615385 | false |
irvingprog/python-pilas-experimental
|
pilasengine/actores/bomba.py
|
1
|
1122
|
# -*- encoding: utf-8 -*-
# pilas engine: un motor para hacer videojuegos
#
# Copyright 2010-2014 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
from pilasengine.actores.animacion import Animacion
class Bomba(Animacion):
"""Representa una bomba que puede explotar...
.. image:: images/actores/bomba.png
La bomba adquiere la habilidad explotar al momento de crearse, así
que puedes invocar a su método "explotar" y la bomba hará un
explosión en pantalla con sonido.
Este es un ejemplo de uso del actor:
>>> bomba = pilas.actores.Bomba()
>>> bomba.explotar()
"""
def __init__(self, pilas=None, x=0, y=0):
grilla = pilas.imagenes.cargar_grilla("bomba.png", 2)
Animacion.__init__(self, pilas, grilla, ciclica=True, x=x, y=y,
velocidad=10)
self.radio_de_colision = 25
self.aprender(pilas.habilidades.PuedeExplotar)
def explotar(self):
"""Hace explotar a la bomba y la elimina de la pantalla."""
self.eliminar()
|
lgpl-3.0
| -1,777,256,028,047,224,600 | 30.055556 | 71 | 0.644902 | false |
Som-Energia/switching
|
switching/input/messages/B1.py
|
1
|
3359
|
# -*- coding: utf-8 -*-
from message import Message, except_f1
import C1, C2
class B1(Message):
"""Classe que implementa B1."""
@property
def sollicitud(self):
"""Retorna l'objecte Sollicitud"""
return C1.Sollicitud(self.obj.BajaEnergia.DatosSolicitud)
@property
def client(self):
"""Retorna l'objecte Client"""
return C1.Client(self.obj.BajaEnergia.Cliente)
@property
def acceptacio(self):
"""Retorna l'objecte Acceptacio"""
obj = getattr(self.obj, self._header, False)
if obj and hasattr(obj, 'DatosAceptacion'):
return C1.Acceptacio(obj.DatosAceptacion)
return False
@property
def activacio(self):
"""Retorna l'objecte Activacio"""
return C1.Activacio(self.obj.NotificacionBajaEnergia)
@property
def anullacio(self):
"""Retorna l'object Anullacio"""
return C1.Anullacio(self.obj.AnulacionSolicitud)
@property
def rebuig(self):
"""Retorna una llista de Rebuig"""
data = []
for i in self.obj.RechazoATRDistribuidoras.Rechazo:
data.append(C1.Rebuig(i))
return data
@property
def rebuig_anullacio(self):
"""Retorna l'objecte Rebuig"""
data = []
for i in self.obj.RechazoDeAnulacion.RechazoAnulacion:
data.append(C1.Rebuig(i))
return data
@property
def contracte(self):
"""Retorna l'objecte Contracte"""
obj = getattr(self.obj, self._header)
try:
idcontrato = C1.Contracte(obj.IdContrato)
except AttributeError:
# Step 04 Acceptacio has the classic structure
idcontrato = C1.Contracte(obj.Contrato)
return idcontrato
@property
def direccio_correspondecia(self):
direccio = False
try:
direccio = DireccioAmbIndicador(self.obj.BajaEnergia.DireccionCorrespondencia)
except AttributeError:
pass
return direccio
@property
def header(self):
return self._header
@property
def punts_mesura(self):
"""Retorna una llista de punts de mesura"""
data = []
obj = getattr(self.obj, self._header)
for i in obj.PuntosDeMedida.PuntoDeMedida:
data.append(C1.PuntMesura(i))
return data
@property
def comentaris(self):
"""Retorna una llista de comentaris"""
data = []
obj = getattr(self.obj, self._header)
if (hasattr(obj, 'Comentarios') and
hasattr(obj.Comentarios, 'Comentario')):
for i in obj.Comentarios.Comentario:
data.append(C2.Comentari(i))
return data
class DireccioAmbIndicador(object):
"""Classe que implementa la direccio"""
def __init__(self, data):
self.direccio = data
@property
def indicador(self):
"""Retorna F/S/O"""
value = ''
try:
value = self.direccio.Indicador.text
except AttributeError:
pass
return value
@property
def direccio_correspondecia(self):
value = False
try:
value = C1.Direccio(self.direccio.Direccion)
except AttributeError:
pass
return value
|
gpl-3.0
| 3,862,654,646,224,733,000 | 26.768595 | 90 | 0.584102 | false |
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_05_01/operations/_virtual_network_gateways_operations.py
|
1
|
131134
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkGatewaysOperations(object):
"""VirtualNetworkGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VirtualNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkGateway"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VirtualNetworkGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkGateway"]
"""Creates or updates a virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to create or update virtual network gateway operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.VirtualNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.VirtualNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkGateway"
"""Gets the specified virtual network gateway by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.VirtualNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VirtualNetworkGateway"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualNetworkGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkGateway"]
"""Updates a virtual network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to update virtual network gateway tags.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.VirtualNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkGatewayListResult"]
"""Gets all virtual network gateways by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_05_01.models.VirtualNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways'} # type: ignore
def list_connections(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkGatewayListConnectionsResult"]
"""Gets all the connections in a virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkGatewayListConnectionsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_05_01.models.VirtualNetworkGatewayListConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayListConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_connections.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGatewayListConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/connections'} # type: ignore
def _reset_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
gateway_vip=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VirtualNetworkGateway"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualNetworkGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._reset_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if gateway_vip is not None:
query_parameters['gatewayVip'] = self._serialize.query("gateway_vip", gateway_vip, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reset_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'} # type: ignore
def begin_reset(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
gateway_vip=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkGateway"]
"""Resets the primary of the virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param gateway_vip: Virtual network gateway vip address supplied to the begin reset of the
active-active feature enabled gateway.
:type gateway_vip: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.VirtualNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
gateway_vip=gateway_vip,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'} # type: ignore
def _reset_vpn_client_shared_key_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._reset_vpn_client_shared_key_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_vpn_client_shared_key_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/resetvpnclientsharedkey'} # type: ignore
def begin_reset_vpn_client_shared_key(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Resets the VPN client shared key of the virtual network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_vpn_client_shared_key_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_vpn_client_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/resetvpnclientsharedkey'} # type: ignore
def _generatevpnclientpackage_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnClientParameters"
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._generatevpnclientpackage_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnClientParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_generatevpnclientpackage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'} # type: ignore
def begin_generatevpnclientpackage(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnClientParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Generates VPN client package for P2S client of the virtual network gateway in the specified
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network gateway VPN client
package operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.VpnClientParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._generatevpnclientpackage_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_generatevpnclientpackage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'} # type: ignore
def _generate_vpn_profile_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnClientParameters"
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._generate_vpn_profile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnClientParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_generate_vpn_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnprofile'} # type: ignore
def begin_generate_vpn_profile(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnClientParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Generates VPN profile for P2S client of the virtual network gateway in the specified resource
group. Used for IKEV2 and radius based authentication.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network gateway VPN client
package operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.VpnClientParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._generate_vpn_profile_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_generate_vpn_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnprofile'} # type: ignore
def _get_vpn_profile_package_url_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._get_vpn_profile_package_url_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vpn_profile_package_url_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnprofilepackageurl'} # type: ignore
def begin_get_vpn_profile_package_url(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Gets pre-generated VPN profile for P2S client of the virtual network gateway in the specified
resource group. The profile needs to be generated first using generateVpnProfile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_vpn_profile_package_url_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vpn_profile_package_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnprofilepackageurl'} # type: ignore
def _get_bgp_peer_status_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Optional["_models.BgpPeerStatusListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.BgpPeerStatusListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._get_bgp_peer_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if peer is not None:
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BgpPeerStatusListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_bgp_peer_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'} # type: ignore
def begin_get_bgp_peer_status(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.BgpPeerStatusListResult"]
"""The GetBgpPeerStatus operation retrieves the status of all BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer to retrieve the status of.
:type peer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either BgpPeerStatusListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.BgpPeerStatusListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BgpPeerStatusListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_bgp_peer_status_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('BgpPeerStatusListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_bgp_peer_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'} # type: ignore
def supported_vpn_devices(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> str
"""Gets a xml format representation for supported vpn devices.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.supported_vpn_devices.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
supported_vpn_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/supportedvpndevices'} # type: ignore
def _get_learned_routes_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GatewayRouteListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GatewayRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._get_learned_routes_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_learned_routes_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'} # type: ignore
def begin_get_learned_routes(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GatewayRouteListResult"]
"""This operation retrieves a list of routes the virtual network gateway has learned, including
routes learned from BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GatewayRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.GatewayRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GatewayRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_learned_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_learned_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'} # type: ignore
def _get_advertised_routes_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GatewayRouteListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GatewayRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._get_advertised_routes_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_advertised_routes_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'} # type: ignore
def begin_get_advertised_routes(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
peer, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GatewayRouteListResult"]
"""This operation retrieves a list of routes the virtual network gateway is advertising to the
specified peer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer.
:type peer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GatewayRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.GatewayRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GatewayRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_advertised_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_advertised_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'} # type: ignore
def _set_vpnclient_ipsec_parameters_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
vpnclient_ipsec_params, # type: "_models.VpnClientIPsecParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VpnClientIPsecParameters"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnClientIPsecParameters"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._set_vpnclient_ipsec_parameters_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpnclient_ipsec_params, 'VpnClientIPsecParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_set_vpnclient_ipsec_parameters_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/setvpnclientipsecparameters'} # type: ignore
def begin_set_vpnclient_ipsec_parameters(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
vpnclient_ipsec_params, # type: "_models.VpnClientIPsecParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnClientIPsecParameters"]
"""The Set VpnclientIpsecParameters operation sets the vpnclient ipsec policy for P2S client of
virtual network gateway in the specified resource group through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param vpnclient_ipsec_params: Parameters supplied to the Begin Set vpnclient ipsec parameters
of Virtual Network Gateway P2S client operation through Network resource provider.
:type vpnclient_ipsec_params: ~azure.mgmt.network.v2020_05_01.models.VpnClientIPsecParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnClientIPsecParameters or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.VpnClientIPsecParameters]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnClientIPsecParameters"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._set_vpnclient_ipsec_parameters_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
vpnclient_ipsec_params=vpnclient_ipsec_params,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_set_vpnclient_ipsec_parameters.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/setvpnclientipsecparameters'} # type: ignore
def _get_vpnclient_ipsec_parameters_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VpnClientIPsecParameters"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnClientIPsecParameters"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._get_vpnclient_ipsec_parameters_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vpnclient_ipsec_parameters_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnclientipsecparameters'} # type: ignore
def begin_get_vpnclient_ipsec_parameters(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnClientIPsecParameters"]
"""The Get VpnclientIpsecParameters operation retrieves information about the vpnclient ipsec
policy for P2S client of virtual network gateway in the specified resource group through
Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The virtual network gateway name.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnClientIPsecParameters or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.VpnClientIPsecParameters]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnClientIPsecParameters"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_vpnclient_ipsec_parameters_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vpnclient_ipsec_parameters.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnclientipsecparameters'} # type: ignore
def vpn_device_configuration_script(
self,
resource_group_name, # type: str
virtual_network_gateway_connection_name, # type: str
parameters, # type: "_models.VpnDeviceScriptParameters"
**kwargs # type: Any
):
# type: (...) -> str
"""Gets a xml format representation for vpn device configuration script.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the virtual network gateway
connection for which the configuration script is generated.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the generate vpn device script operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.VpnDeviceScriptParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.vpn_device_configuration_script.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnDeviceScriptParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
vpn_device_configuration_script.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/vpndeviceconfigurationscript'} # type: ignore
def _start_packet_capture_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters=None, # type: Optional["_models.VpnPacketCaptureStartParameters"]
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._start_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'VpnPacketCaptureStartParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_start_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/startPacketCapture'} # type: ignore
def begin_start_packet_capture(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters=None, # type: Optional["_models.VpnPacketCaptureStartParameters"]
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Starts packet capture on virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Virtual network gateway packet capture parameters supplied to start packet
capture on gateway.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.VpnPacketCaptureStartParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_packet_capture_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/startPacketCapture'} # type: ignore
def _stop_packet_capture_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnPacketCaptureStopParameters"
**kwargs # type: Any
):
# type: (...) -> Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._stop_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnPacketCaptureStopParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_stop_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/stopPacketCapture'} # type: ignore
def begin_stop_packet_capture(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
parameters, # type: "_models.VpnPacketCaptureStopParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[str]
"""Stops packet capture on virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Virtual network gateway packet capture parameters supplied to stop packet
capture on gateway.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.VpnPacketCaptureStopParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_packet_capture_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/stopPacketCapture'} # type: ignore
def _get_vpnclient_connection_health_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.VpnClientConnectionHealthDetailListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnClientConnectionHealthDetailListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._get_vpnclient_connection_health_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnClientConnectionHealthDetailListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vpnclient_connection_health_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getVpnClientConnectionHealth'} # type: ignore
def begin_get_vpnclient_connection_health(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VpnClientConnectionHealthDetailListResult"]
"""Get VPN client connection health detail per P2S client connection of the virtual network
gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VpnClientConnectionHealthDetailListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.VpnClientConnectionHealthDetailListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnClientConnectionHealthDetailListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_vpnclient_connection_health_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnClientConnectionHealthDetailListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vpnclient_connection_health.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getVpnClientConnectionHealth'} # type: ignore
def _disconnect_virtual_network_gateway_vpn_connections_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
request, # type: "_models.P2SVpnConnectionRequest"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._disconnect_virtual_network_gateway_vpn_connections_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'P2SVpnConnectionRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_disconnect_virtual_network_gateway_vpn_connections_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/disconnectVirtualNetworkGatewayVpnConnections'} # type: ignore
def begin_disconnect_virtual_network_gateway_vpn_connections(
self,
resource_group_name, # type: str
virtual_network_gateway_name, # type: str
request, # type: "_models.P2SVpnConnectionRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Disconnect vpn connections of virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param request: The parameters are supplied to disconnect vpn connections.
:type request: ~azure.mgmt.network.v2020_05_01.models.P2SVpnConnectionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._disconnect_virtual_network_gateway_vpn_connections_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
request=request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_disconnect_virtual_network_gateway_vpn_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/disconnectVirtualNetworkGatewayVpnConnections'} # type: ignore
|
mit
| 6,869,906,415,822,150,000 | 51.812727 | 293 | 0.653217 | false |
ww9rivers/pysnmp
|
examples/v3arch/agent/ntforg/trap-v2c-with-objects.py
|
1
|
3234
|
#
# Notification Originator
#
# Send SNMP TRAP notification using the following options:
#
# * SNMPv2c
# * with community name 'public'
# * over IPv4/UDP
# * send TRAP notification
# * to a Manager at 127.0.0.1:162
# * with TRAP ID IF-MIB::ifLink as MIB symbol
#
# The IF-MIB::ifLink NOTIFICATION-TYPE implies including four other
# var-binds into the notification message describing the incident
# occurred. These var-binds are:
# IF-MIB::ifIndex."x"
# IF-MIB::ifAdminStatus."x"
# IF-MIB::ifOperStatus."x"
# IF-MIB::ifDescr."x"
#
# Where "x" is MIB table index (instance index).
#
# To run this example make sure to have IF-MIB.py in search path.
#
from pysnmp.entity import engine, config
from pysnmp.carrier.asynsock.dgram import udp
from pysnmp.entity.rfc3413 import ntforg
from pysnmp.smi import rfc1902, view
#
# Here we fill in some values for Managed Objects Instances (invoked
# later while building TRAP message) by NOTIFICATION-TYPE macro evaluation.
# In real Agent app, these values should already be initialized during
# Agent runtime.
#
instanceIndex = (1,)
objects = {
('IF-MIB', 'ifIndex'): instanceIndex[0],
('IF-MIB', 'ifAdminStatus'): 'up',
('IF-MIB', 'ifOperStatus'): 'down',
('IF-MIB', 'ifDescr'): 'eth0'
}
# Create SNMP engine instance
snmpEngine = engine.SnmpEngine()
# MIB view controller is used for MIB lookup purposes
mibViewController = view.MibViewController(snmpEngine.getMibBuilder())
# SecurityName <-> CommunityName mapping
config.addV1System(snmpEngine, 'my-area', 'public', transportTag='all-my-managers')
# Specify security settings per SecurityName (SNMPv2c -> 1)
config.addTargetParams(snmpEngine, 'my-creds', 'my-area', 'noAuthNoPriv', 1)
# Setup transport endpoints and bind it with security settings yielding
# a target name:
# UDP/IPv4
config.addTransport(
snmpEngine,
udp.domainName,
udp.UdpSocketTransport().openClientMode()
)
config.addTargetAddr(
snmpEngine, 'my-nms-1',
udp.domainName, ('127.0.0.1', 162),
'my-creds',
tagList='all-my-managers'
)
# Specify what kind of notification should be sent (TRAP or INFORM),
# to what targets (chosen by tag) and what filter should apply to
# the set of targets (selected by tag)
config.addNotificationTarget(
snmpEngine, 'my-notification', 'my-filter', 'all-my-managers', 'trap'
)
# Allow NOTIFY access to Agent's MIB by this SNMP model (2), securityLevel
# and SecurityName
config.addContext(snmpEngine, '')
config.addVacmUser(snmpEngine, 2, 'my-area', 'noAuthNoPriv', (), (), (1,3,6))
# *** SNMP engine configuration is complete by this line ***
# Create Notification Originator App instance.
ntfOrg = ntforg.NotificationOriginator()
# Build and submit notification message to dispatcher
ntfOrg.sendVarBinds(
snmpEngine,
'my-notification', # notification targets
None, '', # contextEngineId, contextName
rfc1902.NotificationType(
rfc1902.ObjectIdentity('IF-MIB', 'linkUp'),
instanceIndex=instanceIndex,
objects=objects
).resolveWithMib(mibViewController)
)
print('Notification is scheduled to be sent')
# Run I/O dispatcher which would send pending message and process response
snmpEngine.transportDispatcher.runDispatcher()
|
bsd-2-clause
| -1,604,053,862,149,648,000 | 30.096154 | 83 | 0.727891 | false |
huertatipografica/huertatipografica-fl-scripts
|
AT_Effects/AT-patternMaker.py
|
1
|
3589
|
#FLM: AT PatternMaker
###########################
#INSTRUCTIONS
# Create glyphs with patterns to apply to the font. Be sure the glyphs have the same
# sufix, point and consecutive 3 digit numbers starting from 001.
# Example: sufix.001, sufix.002, sufix.003, etc.
# Execute the script and enter the sufix for the glyphs that will work as patterns.
# Example:
# glyphs:pattern.001,pattern.002,pattern.003...
# the sufix will be «pattern»
# Please be sure the numbers after suffix are of 3 digits and consecutive
# The script will substract all your patterns from the selected characters contour, alternating randomly between all your patterns.
###########################
#INSTRUCCIONES
# Cree glifos con patrones para aplicar a la fuente. Asegúrese que los glifos tienen el mismo sufijo
# seguido de un punto y 3 números que serán consecutivos en cada patrón, empezando por 001.
# Ejemplo: sufijo.001, sufijo.002, sufijo.003, etc.
# Ejecute el script e ingrese el sufijo de sus glifos patrones
# Ejemplo
# glifos: marca.001,marca.002,marca.003
# el sufijo será «marca»
# Asegúrese que los números de los patrones son consecutivos, empezando desde 001.
# El script sustraerá sus patrones del contorno de los caracteres seleccionados, alternando de forma aleatoria entre cada uno.
###########################
class MyDialog:
def __init__(self):
self.d = Dialog(self)
self.d.size = Point(400, 100)
self.d.Center()
self.d.title = "Enter pattern glyphs suffix name"
self.d.AddControl(STATICCONTROL, Rect(aIDENT, aIDENT, aIDENT, aIDENT), "frame", STYLE_FRAME)
self.d.AddControl(STATICCONTROL, Rect(aIDENT2, aIDENT2, aAUTO, aAUTO), "label1", STYLE_LABEL, "")
self.d.AddControl(EDITCONTROL, Rect(100, aNEXT, aIDENT2, aAUTO), "suffix", STYLE_EDIT, "Enter Suffix:")
self.suffix = ""
def on_suffix(self, code):
self.d.GetValue("suffix")
def on_ok(self, code):
return 1
def Run(self):
return self.d.Run()
d = MyDialog()
if d.Run()!= 1:
error="si"
else:
error="no"
suffix=d.suffix
import random
font = fl.font
glyphs = font.glyphs
patternList=[]
#busca sufijo
for index in range(len(glyphs)):
gl=glyphs[index].name
if gl == suffix+'.001':
for n in range(20):
newValue=suffix+"."+str(n+1).zfill(3)
if font[newValue] is not None:
patternList.append(newValue)
else:
break
patternCount=len(patternList)
if patternCount > 0:
for pat in patternList:
if font[pat] is None:
fl.Message(pat+" is not a valid and existing glyph pattern. You must declare valid patterns in patternList to make this script work")
error="si"
def process(g, index, error):
if g is not None and error=="no":
for pat in patternList:
if pat==g.name:
error="si"
print "Warning: "+pat+" is a pattern. Glyph patterns won't be applied to themselves."
if error=="no":
fl.SetUndo(index)
rand=fl.Random(0, patternCount)
patternName=patternList[rand]
pattern=font[patternName]
#pattern.mark=500
g.Bsubtract(pattern)
g.RemoveOverlap()
g.mark=200
fl.UpdateGlyph(index)
#print g.name+" < "+pattern.name
return 1
if suffix!="":
for index in range(len(glyphs)):
if fl.Selected(index) and index!=fl.iglyph:
process(glyphs[index], index, error)
elif index == fl.iglyph:
process(glyphs[fl.iglyph], fl.iglyph, error)
fl.UpdateGlyph(index)
else:
fl.Message("No suffix entered.")
elif suffix=="":
fl.Message("No sufix entered. No changes made.")
else:
fl.Message("There is no pattern glyphs starting from "+suffix+".001. No changes made.")
|
apache-2.0
| 2,225,459,793,113,449,700 | 28.908333 | 136 | 0.687099 | false |
tedlaz/pyted
|
pymiles/pymiles.old/u_txt_num.py
|
1
|
4761
|
# -*- coding: utf-8 -*-
import decimal
from collections import OrderedDict
def isNum(value): # Einai to value arithmos, i den einai ?
""" use: Returns False if value is not a number , True otherwise
input parameters :
1.value : the value to check against.
output: True or False
"""
try:
float(value)
except ValueError:
return False
else:
return True
def dec(poso=0, decimals=2):
""" use : Given a number, it returns a decimal with a specific number
of decimals
input Parameters:
1.poso : The number for conversion in any format (e.g. string or
int ..)
2.decimals : The number of decimals (default 2)
output: A decimal number
"""
PLACES = decimal.Decimal(10) ** (-1 * decimals)
if isNum(poso):
tmp = decimal.Decimal(str(poso))
else:
tmp = decimal.Decimal('0')
return tmp.quantize(PLACES)
class Ddict(dict):
'''
Dictionary of decimals
'''
def __init__(self, *args, **kwargs):
# self.update(*args, **kwargs)
fkw = {}
for key in kwargs:
fkw[key] = dec(kwargs[key])
dict.__init__(self, *args, **fkw)
def __setitem__(self, key, val):
dval = dec(val)
dict.__setitem__(self, key, dval)
class Dordict(OrderedDict):
'''
Ordered Dictionary of decimals
'''
def __init__(self, *args, **kwargs):
# self.update(*args, **kwargs)
fkw = {}
for key in kwargs:
fkw[key] = dec(kwargs[key])
OrderedDict.__init__(self, *args, **fkw)
def __setitem__(self, key, val):
dval = dec(val)
OrderedDict.__setitem__(self, key, dval)
def triades(txt, separator='.'):
'''
Help function to split digits to thousants ( 123456 becomes 123.456 )
'''
ltxt = len(txt)
rem = ltxt % 3
precSpace = 3 - rem
stxt = ' ' * precSpace + txt
a = []
while len(stxt) > 0:
a.append(stxt[:3])
stxt = stxt[3:]
a[0] = a[0].strip()
fval = ''
for el in a:
fval += el + separator
return fval[:-1]
def strGrDec(poso, decimals=2):
'''
Returns string with Greek Formatted decimal (12345.67 becomes 12.345,67)
'''
prosimo = ''
strposo = str(poso)
if len(strposo) > 0:
if strposo[0] in '-':
prosimo = '-'
strposo = strposo[1:]
timi = '%s' % dec(strposo, decimals)
intpart, decpart = timi.split('.')
final = triades(intpart) + ',' + decpart
if final[0] == '.':
final = final[1:]
return prosimo + final
def strGrToDec(poso):
'''
Returns decimal (12.345,67 becomes 12345.67)
'''
st = poso.replace('.', '')
ds = st.replace(',', '.')
return dec(ds)
def nul2DecimalZero(val):
'''
Instead of null returns 0.
'''
return dec(val)
def distribute(val, distArray, decimals=2):
"""
input parameters:
val : Decimal value for distribution
distArray : Distribution Array
decimals : Number of decimal digits
"""
tmpArr = []
val = dec(val, decimals)
try:
tar = dec(sum(distArray), decimals)
except:
return 0
for el in distArray:
tmpArr.append(dec(val * dec(el, decimals) / tar, decimals))
nval = sum(tmpArr)
dif = val - nval # Get the possible difference to fix round problem
if dif == 0:
pass
else:
# Max number Element gets the difference
tmpArr[tmpArr.index(max(tmpArr))] += dif
return tmpArr
def nul2z(val):
'''
Instead of null returns 0. For sqlite use.
'''
if val:
return val
else:
return 0
def grup(txtVal):
'''
Trasforms a string to uppercase special for Greek comparison
'''
ar1 = u"αάΆβγδεέΈζηήΉθιίϊΊκλμνξοόΌπρσςτυύΎφχψωώΏ"
ar2 = u"ΑΑΑΒΓΔΕΕΕΖΗΗΗΘΙΙΙΙΚΛΜΝΞΟΟΟΠΡΣΣΤΥΥΥΦΧΨΩΩΩ"
ftxt = u''
for letter in txtVal:
if letter in ar1:
ftxt += ar2[ar1.index(letter)]
else:
ftxt += letter.upper()
return ftxt
if __name__ == '__main__':
print(strGrDec('-123456789123456789.34'))
print(nul2DecimalZero(''))
ar = (distribute(620, [204, 159, 243, 120, 274]))
print(ar, sum(ar))
print(dec())
print(dec('1.3451'))
print(strGrToDec('-120.345,24'))
aa = Ddict(ted=100, popi=34)
aa['val'] = 12
aa['tim'] = 1.235
aa['pr'] = 'tape'
print(aa)
|
gpl-3.0
| 5,173,988,845,309,119,000 | 23.166667 | 76 | 0.531083 | false |
clearlinux/autospec
|
autospec/build.py
|
1
|
13247
|
#!/bin/true
#
# build.py - part of autospec
# Copyright (C) 2015 Intel Corporation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Actually build the package
#
import os
import re
import shutil
import util
def cleanup_req(s: str) -> str:
"""Strip unhelpful strings from requirements."""
if "is wanted" in s:
s = ""
if "should be defined" in s:
s = ""
if "are broken" in s:
s = ""
if "is broken" in s:
s = ""
if s[0:4] == 'for ':
s = s[4:]
s = s.replace(" works as expected", "")
s = s.replace(" and usability", "")
s = s.replace(" usability", "")
s = s.replace(" argument", "")
s = s.replace(" environment variable", "")
s = s.replace(" environment var", "")
s = s.replace(" presence", "")
s = s.replace(" support", "")
s = s.replace(" implementation is broken", "")
s = s.replace(" is broken", "")
s = s.replace(" files can be found", "")
s = s.replace(" can be found", "")
s = s.replace(" is declared", "")
s = s.replace("whether to build ", "")
s = s.replace("whether ", "")
s = s.replace("library containing ", "")
s = s.replace("x86_64-generic-linux-gnu-", "")
s = s.replace("i686-generic-linux-gnu-", "")
s = s.replace("'", "")
s = s.strip()
return s
def check_for_warning_pattern(line):
"""Print warning if a line matches against a warning list."""
warning_patterns = [
"march=native"
]
for pat in warning_patterns:
if pat in line:
util.print_warning("Build log contains: {}".format(pat))
def get_mock_cmd():
"""Set mock command to use sudo as needed."""
# Some distributions (e.g. Fedora) use consolehelper to run mock,
# while others (e.g. Clear Linux) expect the user run it via sudo.
if os.path.basename(os.path.realpath('/usr/bin/mock')) == 'consolehelper':
return '/usr/bin/mock'
return 'sudo /usr/bin/mock'
class Build(object):
"""Manage package builds."""
def __init__(self):
"""Initialize default build settings."""
self.success = 0
self.round = 0
self.must_restart = 0
self.uniqueext = ''
self.warned_about = set()
def simple_pattern_pkgconfig(self, line, pattern, pkgconfig, conf32, requirements):
"""Check for pkgconfig patterns and restart build as needed."""
pat = re.compile(pattern)
match = pat.search(line)
if match:
self.must_restart += requirements.add_pkgconfig_buildreq(pkgconfig, conf32, cache=True)
def simple_pattern(self, line, pattern, req, requirements):
"""Check for simple patterns and restart the build as needed."""
pat = re.compile(pattern)
match = pat.search(line)
if match:
self.must_restart += requirements.add_buildreq(req, cache=True)
def failed_pattern(self, line, config, requirements, pattern, verbose, buildtool=None):
"""Check against failed patterns to restart build as needed."""
pat = re.compile(pattern)
match = pat.search(line)
if not match:
return
s = match.group(1)
# standard configure cleanups
s = cleanup_req(s)
if s in config.ignored_commands:
return
try:
if not buildtool:
req = config.failed_commands[s]
if req:
self.must_restart += requirements.add_buildreq(req, cache=True)
elif buildtool == 'pkgconfig':
self.must_restart += requirements.add_pkgconfig_buildreq(s, config.config_opts.get('32bit'), cache=True)
elif buildtool == 'R':
if requirements.add_buildreq("R-" + s, cache=True) > 0:
self.must_restart += 1
requirements.add_requires("R-" + s, config.os_packages)
elif buildtool == 'perl':
s = s.replace('inc::', '')
self.must_restart += requirements.add_buildreq('perl(%s)' % s, cache=True)
elif buildtool == 'pypi':
s = util.translate(s)
if not s:
return
self.must_restart += requirements.add_buildreq(util.translate('%s-python' % s), cache=True)
elif buildtool == 'ruby':
if s in config.gems:
self.must_restart += requirements.add_buildreq(config.gems[s], cache=True)
else:
self.must_restart += requirements.add_buildreq('rubygem-%s' % s, cache=True)
elif buildtool == 'ruby table':
if s in config.gems:
self.must_restart += requirements.add_buildreq(config.gems[s], cache=True)
else:
print("Unknown ruby gem match", s)
elif buildtool == 'maven' or buildtool == 'gradle':
group_count = len(match.groups())
if group_count == 2:
# Add fully qualified versioned mvn() dependency
name = match.group(1)
# Hyphens are disallowed for version strings, so use dots instead
ver = match.group(2).replace('-', '.')
mvn_provide = f'mvn({name}) = {ver}'
self.must_restart += requirements.add_buildreq(mvn_provide, cache=True)
elif s in config.maven_jars:
# Overrides for dependencies with custom grouping
self.must_restart += requirements.add_buildreq(config.maven_jars[s], cache=True)
elif group_count == 3:
org = match.group(1)
name = match.group(2)
ver = match.group(3).replace('-', '.')
if re.search("-(parent|pom|bom)$", name):
mvn_provide = f'mvn({org}:{name}:pom) = {ver}'
else:
mvn_provide = f'mvn({org}:{name}:jar) = {ver}'
self.must_restart += requirements.add_buildreq(mvn_provide, cache=True)
else:
# Fallback to mvn-ARTIFACTID package name
self.must_restart += requirements.add_buildreq('mvn-%s' % s, cache=True)
elif buildtool == 'catkin':
self.must_restart += requirements.add_pkgconfig_buildreq(s, config.config_opts.get('32bit'), cache=True)
self.must_restart += requirements.add_buildreq(s, cache=True)
except Exception:
if s.strip() and s not in self.warned_about and s[:2] != '--':
util.print_warning(f"Unknown pattern match: {s}")
self.warned_about.add(s)
def parse_buildroot_log(self, filename, returncode):
"""Handle buildroot log contents."""
if returncode == 0:
return True
self.must_restart = 0
is_clean = True
util.call("sync")
with util.open_auto(filename, "r") as rootlog:
loglines = rootlog.readlines()
missing_pat = re.compile(r"^.*No matching package to install: '(.*)'$")
for line in loglines:
match = missing_pat.match(line)
if match is not None:
util.print_fatal("Cannot resolve dependency name: {}".format(match.group(1)))
is_clean = False
return is_clean
def parse_build_results(self, filename, returncode, filemanager, config, requirements, content):
"""Handle build log contents."""
requirements.verbose = 1
self.must_restart = 0
infiles = 0
# Flush the build-log to disk, before reading it
util.call("sync")
with util.open_auto(filename, "r") as buildlog:
loglines = buildlog.readlines()
for line in loglines:
for pat in config.pkgconfig_pats:
self.simple_pattern_pkgconfig(line, *pat, config.config_opts.get('32bit'), requirements)
for pat in config.simple_pats:
self.simple_pattern(line, *pat, requirements)
for pat in config.failed_pats:
self.failed_pattern(line, config, requirements, *pat)
check_for_warning_pattern(line)
# Search for files to add to the %files section.
# * infiles == 0 before we reach the files listing
# * infiles == 1 for the "Installed (but unpackaged) file(s) found" header
# and for the entirety of the files listing
# * infiles == 2 after the files listing has ended
if infiles == 1:
for search in ["RPM build errors", "Childreturncodewas",
"Child returncode", "Empty %files file"]:
if search in line:
infiles = 2
for start in ["Building", "Child return code was"]:
if line.startswith(start):
infiles = 2
if infiles == 0 and "Installed (but unpackaged) file(s) found:" in line:
infiles = 1
elif infiles == 1 and "not matching the package arch" not in line:
# exclude blank lines from consideration...
file = line.strip()
if file and file[0] == "/":
filemanager.push_file(file, content.name)
if line.startswith("Sorry: TabError: inconsistent use of tabs and spaces in indentation"):
print(line)
returncode = 99
nvr = f"{content.name}-{content.version}-{content.release}"
match = f"File not found: /builddir/build/BUILDROOT/{nvr}.x86_64/"
if match in line:
missing_file = "/" + line.split(match)[1].strip()
filemanager.remove_file(missing_file)
if line.startswith("Executing(%clean") and returncode == 0:
print("RPM build successful")
self.success = 1
def package(self, filemanager, mockconfig, mockopts, config, requirements, content, cleanup=False):
"""Run main package build routine."""
self.round += 1
self.success = 0
mock_cmd = get_mock_cmd()
print("Building package " + content.name + " round", self.round)
self.uniqueext = content.name
if cleanup:
cleanup_flag = "--cleanup-after"
else:
cleanup_flag = "--no-cleanup-after"
print("{} mock chroot at /var/lib/mock/clear-{}".format(content.name, self.uniqueext))
if self.round == 1:
shutil.rmtree('{}/results'.format(config.download_path), ignore_errors=True)
os.makedirs('{}/results'.format(config.download_path))
cmd_args = [
mock_cmd,
f"--root={mockconfig}",
"--buildsrpm",
"--sources=./",
f"--spec={content.name}.spec",
f"--uniqueext={self.uniqueext}",
"--result=results/",
cleanup_flag,
mockopts,
]
util.call(" ".join(cmd_args),
logfile=f"{config.download_path}/results/mock_srpm.log",
cwd=config.download_path)
# back up srpm mock logs
util.call("mv results/root.log results/srpm-root.log", cwd=config.download_path)
util.call("mv results/build.log results/srpm-build.log", cwd=config.download_path)
srcrpm = f"results/{content.name}-{content.version}-{content.release}.src.rpm"
cmd_args = [
mock_cmd,
f"--root={mockconfig}",
"--result=results/",
srcrpm,
"--enable-plugin=ccache",
f"--uniqueext={self.uniqueext}",
cleanup_flag,
mockopts,
]
ret = util.call(" ".join(cmd_args),
logfile=f"{config.download_path}/results/mock_build.log",
check=False,
cwd=config.download_path)
# sanity check the build log
if not os.path.exists(config.download_path + "/results/build.log"):
util.print_fatal("Mock command failed, results log does not exist. User may not have correct permissions.")
exit(1)
is_clean = self.parse_buildroot_log(config.download_path + "/results/root.log", ret)
if is_clean:
self.parse_build_results(config.download_path + "/results/build.log", ret, filemanager, config, requirements, content)
if filemanager.has_banned:
util.print_fatal("Content in banned paths found, aborting build")
exit(1)
|
gpl-3.0
| -6,898,562,381,102,616,000 | 40.012384 | 130 | 0.55673 | false |
connorbrinton/lcat
|
lcat/loading/annotations.py
|
1
|
5671
|
#!/usr/bin/env python
"""
BMI 260: Final Project
Load chest CT scan annotations from radiologist xml files.
"""
from collections import namedtuple
import os
import re
import xml.etree.ElementTree as ET
import numpy as np
import skimage
import skimage.measure
import skimage.segmentation
import lcat
# Nodule datatype
Nodule = namedtuple('Nodule', ['nodule_id', 'characteristics', 'origin', 'mask'])
# XML namespace abbreviations
XMLNS = {
'nih': 'http://www.nih.gov'
}
# Tag name regex
TAG_NAME_RE = re.compile('^{' + XMLNS['nih'] + '}' + '(.+)$')
def load_radiologist_annotations(dicom_folder, dimensions, sop_instance_uids):
"""
Load radiologist annotations (namely nodule characteristics and regions) from the xml files
present in `dicom_folder`. Returns an array of Nodule objects representing all nodules found in
the radiologist annotations.
"""
# Create nodules placeholder
nodules = []
# Look for XML files
for filename in os.listdir(dicom_folder):
if filename.endswith('.xml'):
# Reconstruct filepath
filepath = os.path.join(dicom_folder, filename)
# Load xml file
tree = ET.parse(filepath)
root = tree.getroot()
# Find all nodules
reads = root.findall('.//nih:readingSession//nih:unblindedReadNodule', XMLNS)
# For each read
for read in reads:
# Extract nodule information
nodule = get_nodule_information(read, dimensions, sop_instance_uids)
# Only include >3mm nodules
if any(dim > 1 for dim in nodule.mask.shape):
nodules.append(nodule)
return nodules
def get_nodule_information(read, dimensions, sop_instance_uids):
"""
Given an unblindedReadNodule element, create a Nodule object representing the nodule's
characteristics and vertices.
"""
# Get nodule ID
nodule_id = get_read_nodule_id(read)
# Get characteristics
characteristics = get_read_characteristics(read)
# Get mask
origin, mask = get_read_mask(read, dimensions, sop_instance_uids)
return Nodule(nodule_id, characteristics, origin, mask)
def get_read_nodule_id(read):
# Find nodule ID element
nodule_id_elem = read.find('.//nih:noduleID', XMLNS)
# Return text content
return nodule_id_elem.text
def get_read_characteristics(read):
"""
Get the characteristics from a read as recorded by the radiologist. Returns an empty dictionary
if no characteristics were recorded.
"""
# Extract characteristics
characteristics = {}
for attribute_elem in read.findall('.//nih:characteristics//*', XMLNS):
# Get attribute name (removing namespace)
match = TAG_NAME_RE.match(attribute_elem.tag)
assert match is not None
attribute_name = match.group(1)
# Get attribute value
attribute_value = int(attribute_elem.text)
characteristics[attribute_name] = attribute_value
return characteristics
def get_read_mask(read, dimensions, sop_instance_uids):
"""
Get a 3D array representing the region described by the specific read, prefaced by an origin
specifying its placement in the image (in index coordinates).
"""
# Get the full mask
mask = get_mask_region(read, dimensions, sop_instance_uids)
# Compress to small region with offset
origin, mask = lcat.util.compress_nodule_mask(mask)
return origin, mask
def get_mask_region(read, dimensions, sop_instance_uids):
"""
Returns a full representation of the region represented by the given nodule read as a mask.
"""
# Create mask output placeholder
mask = np.zeros(dimensions, dtype=bool)
# Create holes queue
holes = []
# Identify regions of interest
for roi_elem in read.findall('.//nih:roi', XMLNS):
# Check if it's a hole
if roi_elem.find('.//nih:inclusion', XMLNS).text.upper() == 'FALSE':
holes.append(roi_elem)
else:
mark_region(mask, roi_elem, sop_instance_uids)
# Create unincluded mask placeholder
unincluded = np.zeros(dimensions, dtype=bool)
# Identify hole regions
for roi_elem in holes:
mark_region(unincluded, roi_elem, sop_instance_uids)
# Remove unincluded regions
mask &= np.logical_not(unincluded)
return mask
def mark_region(mask, roi_elem, sop_instance_uids):
"""
Mark the region of interest encoded by `roi_elem` in `mask`. `sop_instance_uids` is used to
determine the slices referenced by `roi_elem`.
"""
# Create mask boundary placeholder
mask_boundary = np.zeros(mask.shape[:2], dtype=bool)
# Get Z index
sop_instance_uid = roi_elem.find('.//nih:imageSOP_UID', XMLNS).text
z_index = sop_instance_uids.index(sop_instance_uid)
# Mark boundary points
for edge_elem in roi_elem.findall('.//nih:edgeMap', XMLNS):
# Get x and y positions
x_position = int(edge_elem.find('.//nih:xCoord', XMLNS).text)
y_position = int(edge_elem.find('.//nih:yCoord', XMLNS).text)
# Mark boundary in mask
mask_boundary[x_position, y_position] = 1
# Fill in region
mask_regions = skimage.measure.label(mask_boundary, background=-1, connectivity=1)
mask_center = skimage.segmentation.clear_border(mask_regions)
mask[:, :, z_index] |= mask_center != 0
def main():
"""
Command-line invocation routine.
"""
import scans
scan = scans.load_scan('../../data/LIDC-IDRI/LIDC-IDRI-0090')
import IPython
IPython.embed()
if __name__ == '__main__':
main()
|
gpl-3.0
| -7,819,591,577,144,248,000 | 28.082051 | 99 | 0.654911 | false |
google-research/graph-attribution
|
tests/test_training.py
|
1
|
3709
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for training GNN models."""
import numpy as np
import sonnet.v2 as snt
import tensorflow.compat.v2 as tf
from absl.testing import absltest, parameterized
from graph_attribution import experiments, featurization
from graph_attribution import graphnet_models as gnn_models
from graph_attribution import graphs as graph_utils
from graph_attribution import templates, training
class TrainingTests(parameterized.TestCase):
"""Basic tests for training a model."""
def _setup_graphs_labels(self, n_graphs):
"""Setup graphs and labels for a binary classification learning task."""
tensorizer = featurization.MolTensorizer()
smiles_pool = ['CO', 'CCC', 'CN1C=NC2=C1C(=O)N(C(=O)N2C)C', 'CCCO']
smiles = np.random.choice(smiles_pool, n_graphs)
graphs = graph_utils.smiles_to_graphs_tuple(smiles, tensorizer)
n_labels = len(graphs.nodes) if n_graphs == 1 else n_graphs
labels = np.random.choice([0, 1], n_labels).reshape(-1, 1)
return graphs, labels
def _setup_model(self, n_graphs):
target_type = templates.TargetType.globals if n_graphs > 1 else templates.TargetType.nodes
model = experiments.GNN(10, 10, 10, 1, gnn_models.BlockType('gcn'), 'relu',
target_type, 3)
return model
@parameterized.named_parameters(('constant', 1024, 256, 4),
('droplast', 1000, 256, 3))
def test_get_batch_indices(self, n, batch_size, expected_n_batches):
batch_indices = training.get_batch_indices(n, batch_size)
self.assertEqual(batch_indices.shape, (expected_n_batches, batch_size))
@parameterized.parameters([0.2, 1.0])
def test_augment_binary_task(self, fraction):
"""Check that data augmention sizes are correct."""
initial_n = 10
x, y = self._setup_graphs_labels(initial_n)
node_vec = np.zeros_like(x.nodes[0])
edge_vec = np.zeros_like(x.edges[0])
initial_positive = int(np.sum(y == 1))
aug_n = int(np.floor(fraction * initial_positive))
expected_n = initial_n + aug_n * 2
x_aug, y_aug = training.augment_binary_task(x, y, node_vec, edge_vec,
fraction)
self.assertEqual(graph_utils.get_num_graphs(x_aug), expected_n)
self.assertLen(y_aug, expected_n)
# Make sure half of the augmented examples are positive labels.
aug_positive = np.sum(y_aug == 1) - initial_positive
self.assertEqual(aug_positive, aug_n)
@parameterized.named_parameters(('onegraph', 1),
('minibatch', 25))
def test_make_tf_opt_epoch_fn(self, batch_size):
"""Make sure tf-optimized epoch gives a valid loss."""
x, y = self._setup_graphs_labels(batch_size)
model = self._setup_model(batch_size)
opt = snt.optimizers.Adam()
loss_fn = tf.keras.losses.BinaryCrossentropy()
opt_fn = training.make_tf_opt_epoch_fn(x, y, batch_size, model, opt,
loss_fn)
loss = opt_fn(x, y).numpy()
self.assertTrue(np.isfinite(loss))
if __name__ == '__main__':
tf.config.experimental_run_functions_eagerly(True)
absltest.main()
|
apache-2.0
| -2,763,162,494,523,120,600 | 42.127907 | 94 | 0.672418 | false |
jbpoline/nidm-results_fsl
|
nidmfsl/fsl_exporter/fsl_exporter.py
|
1
|
44126
|
"""
Export neuroimaging results created with feat in FSL following NIDM-Results
specification.
@author: Camille Maumet <c.m.j.maumet@warwick.ac.uk>
@copyright: University of Warwick 2013-2014
"""
import re
import os
import sys
import glob
import json
import numpy as np
import subprocess
import warnings
# If "nidmresults" code is available locally work on the source code (used
# only for development)
FSL_EXPORTER_DIR = os.path.dirname(os.path.realpath(__file__))
NIDM_FSL_DIR = os.path.dirname(FSL_EXPORTER_DIR)
NIDM_RESULTS_FSL_DIR = os.path.dirname(NIDM_FSL_DIR)
NIDM_RESULTS_SRC_DIR = os.path.join(
os.path.dirname(NIDM_RESULTS_FSL_DIR), "nidmresults")
if os.path.isdir(NIDM_RESULTS_SRC_DIR):
sys.path.append(NIDM_RESULTS_SRC_DIR)
from nidmresults.exporter import NIDMExporter
from nidmresults.objects.constants import *
from nidmresults.objects.modelfitting import *
from nidmresults.objects.contrast import *
from nidmresults.objects.inference import *
from nidmfsl.fsl_exporter.objects.fsl_objects import *
class FSLtoNIDMExporter(NIDMExporter, object):
"""
Parse an FSL result directory to extract the pieces information to be
stored in NIDM-Results and generate a NIDM-Results export.
"""
def __init__(self, *args, **kwargs):
super(FSLtoNIDMExporter, self).__init__()
self.feat_dir = kwargs.pop('feat_dir')
nidm_dirs = glob.glob(os.path.join(self.feat_dir, 'nidm****'))
if nidm_dirs:
if nidm_dirs[-1] == os.path.join(self.feat_dir, 'nidm'):
export_dir_num = 1
else:
m = re.search('(?<=nidm_).*', nidm_dirs[-1])
export_dir_num = int(m.group(0)) + 1
self.export_dir = os.path.join(
self.feat_dir, 'nidm' + "_{0:0>4}".format(export_dir_num))
else:
self.export_dir = os.path.join(self.feat_dir, 'nidm')
self.design_file = os.path.join(self.feat_dir, 'design.fsf')
# FIXME: maybe not always "4"?
feat_post_log_file = os.path.join(self.feat_dir, 'logs', 'feat4_post')
# FIXME: this file is sometimes missing, can the connectivity info
# be retreive from somewhere else??
if os.path.isfile(feat_post_log_file):
self.feat_post_log = open(feat_post_log_file, 'r')
else:
self.feat_post_log = None
self.version = kwargs.pop('version')
self.coord_space = None
self.contrast_names_by_num = dict()
def parse(self):
"""
Parse an FSL result directory to extract the pieces information to be
stored in NIDM-Results.
"""
# Load design.fsf file
design_file_open = open(self.design_file, 'r')
self.design_txt = design_file_open.read()
# Load feat post log file
if self.feat_post_log is not None:
self.feat_post_log = self.feat_post_log.read()
fmri_level_re = r'.*set fmri\(level\) (?P<info>\d+).*'
fmri_level = int(self._search_in_fsf(fmri_level_re))
self.first_level = (fmri_level == 1)
# FIXME cope1
if self.first_level:
# stat_dir = list([os.path.join(self.feat_dir, 'stats')])
self.analysis_dirs = list([self.feat_dir])
else:
# If feat was called with the GUI then the analysis directory is in
# the nested cope folder
self.analysis_dirs = glob.glob(
os.path.join(self.feat_dir, 'cope*.feat'))
if not self.analysis_dirs:
self.analysis_dirs = list([self.feat_dir])
# cope_dirs
# print cope_dirs
# stat_dir = os.path.join(self.feat_dir, 'cope1.feat', 'stats')
# analysis_dir = os.path.join(self.feat_dir, 'cope1.feat')
super(FSLtoNIDMExporter, self).parse()
def _add_namespaces(self):
"""
Overload of parent _add_namespaces to add FSL namespace.
"""
super(FSLtoNIDMExporter, self)._add_namespaces()
self.doc.add_namespace(FSL)
def _find_software(self):
"""
Return an object of type Software describing the version of FSL used to
compute the current analysis.
"""
version_re = r'.*set fmri\(version\) (?P<info>\d+\.?\d+).*'
feat_version = self._search_in_fsf(version_re)
software = Software(feat_version=feat_version)
return software
def _find_model_fitting(self):
"""
Parse FSL result directory to retreive model fitting information.
Return a list of objects of type ModelFitting.
"""
self.model_fittings = dict()
for analysis_dir in self.analysis_dirs:
stat_dir = os.path.join(analysis_dir, 'stats')
design_matrix = self._get_design_matrix(analysis_dir)
data = self._get_data()
error_model = self._get_error_model()
rms_map = self._get_residual_mean_squares_map(stat_dir)
param_estimates = self._get_param_estimate_maps(stat_dir)
mask_map = self._get_mask_map(analysis_dir)
grand_mean_map = self._get_grand_mean(mask_map.file, analysis_dir)
activity = self._get_model_parameters_estimations(error_model)
model_fitting = ModelFitting(
activity, design_matrix, data,
error_model, param_estimates, rms_map, mask_map,
grand_mean_map)
self.model_fittings[analysis_dir] = model_fitting
return self.model_fittings
def _find_contrasts(self):
"""
Parse FSL result directory to retreive information about contrasts.
Return a dictionary of (key, value) pairs where key is a tuple
containing the identifier of a ModelParametersEstimation object and a
tuple of identifiers of ParameterEstimateMap objects, and value is an
object of type Contrast.
"""
for analysis_dir in self.analysis_dirs:
# Retreive the Model Parameters Estimations activity corresponding
# to current analysis directory.
mf_id = self.model_fittings[analysis_dir].activity.id
stat_dir = os.path.join(analysis_dir, 'stats')
# Degrees of freedom
# FIXME: check what happens when more than one contrast is
# performed
dof_file = open(os.path.join(stat_dir, 'dof'), 'r')
dof = float(dof_file.read())
exc_sets = glob.glob(os.path.join(analysis_dir,
'thresh_z*.nii.gz'))
contrasts = dict()
for filename in exc_sets:
s = re.compile('zf?stat\d+')
zstatnum = s.search(filename)
zstatnum = zstatnum.group()
if zstatnum.startswith("zstat"):
stat_type = "T"
con_num = zstatnum.replace('zstat', '')
elif zstatnum.startswith("zfstat"):
stat_type = "F"
con_num = zstatnum.replace('zfstat', '')
# If more than one excursion set is reported, we need to
# use an index in the file names of the file exported in
# nidm
if len(exc_sets) > 1:
stat_num = "_" + \
stat_type.upper() + "{0:0>3}".format(con_num)
else:
stat_num = ""
# Contrast name
name_re = r'.*set fmri\(conname_real\.' + con_num +\
'\) "(?P<info>[^"]+)".*'
contrast_name = self._search_in_fsf(name_re)
self.contrast_names_by_num[con_num] = contrast_name
# Contrast estimation activity
estimation = ContrastEstimation(con_num, contrast_name)
# Contrast weights
weights_re = r'.*set fmri\(con_real' + con_num +\
'\.\d+\) (?P<info>-?\d+)'
weight_search = re.compile(weights_re)
contrast_weights = str(
re.findall(weight_search,
self.design_txt)).replace("'", '')
weights = ContrastWeights(stat_num, contrast_name,
contrast_weights, stat_type)
# Find which parameter estimates were used to compute the
# contrast
pe_ids = list()
pe_index = 1
contrast_weights = contrast_weights.replace(' ', '')
contrast_weights = contrast_weights.replace('[', '')
contrast_weights = contrast_weights.replace(']', '')
contrast_weights = contrast_weights.split(',')
# Whenever a "1" is found in contrast_weights, the
# parameter estimate map identified by the corresponding
# index is in use
for beta_index in contrast_weights:
if int(beta_index) == 1:
for model_fitting in self.model_fittings.values():
for pe in model_fitting.param_estimates:
s = re.compile('pe\d+')
pe_num = s.search(pe.file)
pe_num = pe_num.group()
pe_num = pe_num.replace('pe', '')
if pe_num == pe_index:
pe_ids.append(pe.id)
pe_index += 1
# Convert to immutable tuple to be used as key
pe_ids = tuple(pe_ids)
# Statistic Map
stat_file = os.path.join(
stat_dir,
stat_type.lower() + 'stat' + str(con_num) + '.nii.gz')
stat_map = StatisticMap(
stat_file, stat_type, stat_num,
contrast_name, dof, self.coord_space,
self.export_dir)
# Z-Statistic Map
z_stat_file = os.path.join(
stat_dir,
'zstat' + str(con_num) + '.nii.gz')
z_stat_map = StatisticMap(
z_stat_file, 'Z', stat_num,
contrast_name, dof, self.coord_space,
self.export_dir)
if stat_type is "T":
# Contrast Map
con_file = os.path.join(stat_dir,
'cope' + str(con_num) + '.nii.gz')
contrast_map = ContrastMap(con_file, stat_num,
contrast_name, self.coord_space,
self.export_dir)
# Contrast Variance and Standard Error Maps
varcontrast_file = os.path.join(
stat_dir, 'varcope' + str(con_num) + '.nii.gz')
is_variance = True
std_err_map = ContrastStdErrMap(
stat_num,
varcontrast_file, is_variance, self.coord_space,
self.coord_space, self.export_dir)
std_err_map_or_mean_sq_map = std_err_map
elif stat_type is "F":
contrast_map = None
sigma_sq_file = os.path.join(
stat_dir, 'sigmasquareds.nii.gz')
expl_mean_sq_map = ContrastExplainedMeanSquareMap(
stat_file, sigma_sq_file, stat_num,
self.coord_space, self.export_dir)
std_err_map_or_mean_sq_map = expl_mean_sq_map
else:
raise Exception("Unknown statistic type: "+stat_type)
con = Contrast(
con_num, contrast_name, weights, estimation,
contrast_map, std_err_map_or_mean_sq_map, stat_map,
z_stat_map)
contrasts.setdefault((mf_id, pe_ids), list()).append(con)
return contrasts
def _find_inferences(self):
"""
Parse FSL result directory to retreive information about inference
along with peaks and clusters. Return a dictionary of (key, value)
pairs where key is the identifier of a ContrastEstimation object and
value is an object of type Inference.
"""
inferences = dict()
for analysis_dir in self.analysis_dirs:
exc_sets = glob.glob(os.path.join(analysis_dir,
'thresh_z*.nii.gz'))
# Find excursion sets (in a given feat directory we have one
# excursion set per contrast)
for filename in exc_sets:
s = re.compile('zf?stat\d+')
zstatnum = s.search(filename)
zstatnum = zstatnum.group()
if zstatnum.startswith("zstat"):
stat_type = "T"
stat_num = zstatnum.replace('zstat', '')
elif zstatnum.startswith("zfstat"):
stat_type = "F"
stat_num = zstatnum.replace('zfstat', '')
# If more than one excursion set is reported, we need to use
# an index in the file names of the file exported in nidm
if len(exc_sets) > 1:
stat_num_t = "_" + \
stat_type.upper() + "{0:0>3}".format(stat_num)
else:
stat_num_t = ""
# Find corresponding contrast estimation activity
con_id = None
for contrasts in self.contrasts.values():
for contrast in contrasts:
s = re.compile('zf?stat\d+')
con_num = s.search(contrast.z_stat_map.file)
con_num = con_num.group()
con_num = con_num.replace('zstat', '')\
.replace('zfstat', '')\
.replace('.nii.gz', '')
if con_num == stat_num:
con_id = contrast.estimation.id
assert con_id is not None
# Inference activity
inference_act = InferenceActivity(
stat_num,
self.contrast_names_by_num[stat_num])
# Excursion set
visualisation = os.path.join(
analysis_dir,
'rendered_thresh_zstat' + stat_num + '.png')
zFileImg = os.path.join(analysis_dir,
'thresh_zstat' + stat_num + '.nii.gz')
exc_set = ExcursionSet(zFileImg, stat_num_t, visualisation,
self.coord_space, self.export_dir)
# Height Threshold
prob_re = r'.*set fmri\(prob_thresh\) (?P<info>\d+\.?\d+).*'
z_re = r'.*set fmri\(z_thresh\) (?P<info>\d+\.?\d+).*'
type_re = r'.*set fmri\(thresh\) (?P<info>\d+).*'
prob_thresh = float(self._search_in_fsf(prob_re))
z_thresh = float(self._search_in_fsf(z_re))
thresh_type = int(self._search_in_fsf(type_re))
# FIXME: deal with 0 = no thresh?
voxel_uncorr = (thresh_type == 1)
voxel_corr = (thresh_type == 2)
cluster_thresh = (thresh_type == 3)
stat_threshold = None
extent_p_corr = None
p_corr_threshold = None
p_uncorr_threshold = None
if voxel_uncorr:
p_uncorr_threshold = prob_thresh
elif voxel_corr:
p_corr_threshold = prob_thresh
else:
stat_threshold = z_thresh
extent_p_corr = prob_thresh
height_thresh = HeightThreshold(
stat_threshold,
p_corr_threshold, p_uncorr_threshold)
# Extent Threshold
extent_thresh = ExtentThreshold(p_corr=extent_p_corr)
# There is not table display listing peaks and clusters for
# voxelwise correction
if cluster_thresh:
# Clusters (and associated peaks)
clusters = self._get_clusters_peaks(stat_num)
# Peak and Cluster Definition Criteria
peak_criteria = PeakCriteria(
stat_num,
self._get_num_peaks(), self._get_peak_dist())
clus_criteria = ClusterCriteria(
stat_num,
self._get_connectivity())
else:
clusters = None
peak_criteria = None
clus_criteria = None
# FIXME: for now only based on conmask1_1
m = re.search(
r"set fmri\(conmask1_1\) (?P<con_maskg>\d+)",
self.design_txt)
assert m is not None
contrast_masking = bool(int(m.group("con_maskg")))
if contrast_masking:
# Display mask
# FIXME deal with the case in which we are contrast masking by
# more than one contrast
# contrast_masking_search = re.compile(r'.*set
# fmri\(conmask'+contrast_num+'_(?P<maskingconnum>\d+)\)
# (?P<domask>\d+).*')
# contrast_masking_found =
# contrast_masking_search.search(self.design_txt)
# do_contrast_masking =
# float(contrast_masking_found.group('domask'))
# if do_contrast_masking:
# contrast_masking_num =
# contrast_masking_found.group('maskingconnum')
# contrast_masking_file =
# else:
# contrast_masking_num = None
# FIXME: We need an example with more than one contrast to code
# contrast masking
contrast_masking_file = self._get_display_mask()
display_mask = DisplayMaskMap(
stat_num,
contrast_masking_file, self.coord_space,
self.export_dir)
else:
display_mask = None
# Search space
search_space = self._get_search_space(analysis_dir)
inference = Inference(
self.version,
inference_act, height_thresh,
extent_thresh, peak_criteria, clus_criteria,
display_mask, exc_set, clusters, search_space,
self.software.id)
inferences.setdefault(con_id, list()).append(inference)
return inferences
def _get_design_matrix(self, analysis_dir):
"""
Parse FSL result directory to retreive information about the design
matrix. Return an object of type DesignMatrix.
"""
design_mat_file = os.path.join(analysis_dir, 'design.mat')
design_mat_fid = open(design_mat_file, 'r')
design_mat_values = np.loadtxt(design_mat_fid, skiprows=5, ndmin=2)
design_mat_image = os.path.join(analysis_dir, 'design.png')
# Regressor names (not taking into account HRF model)
regnames_re = r'.*set fmri\(evtitle\d+\).*'
ev_names = re.findall(regnames_re, self.design_txt)
orig_ev = dict()
for ev_name in ev_names:
regname_re = r'.*set fmri\(evtitle(?P<num>\d+)\)\s*"(?P<name>.*)"'
info_search = re.compile(regname_re)
info_found = info_search.search(ev_name)
num = info_found.group('num')
name = info_found.group('name')
orig_ev[int(num)] = name
# For first-level fMRI only
if self.first_level:
# Design-type: event, mixed or block
# FIXME: deal with other options than "custom"
onsets_re = r'.*set fmri\(custom(?P<num>\d+)\)\s*"(?P<file>.*)".*'
r = re.compile(onsets_re)
onsets = [m.groupdict() for m in r.finditer(self.design_txt)]
max_duration = 0
min_duration = 36000
for onset in onsets:
if os.path.isfile(onset['file']):
aa = np.loadtxt(onset['file'])
max_duration = max(
max_duration, np.amax(aa[:, 2], axis=None))
min_duration = min(
min_duration, np.amin(aa[:, 2], axis=None))
else:
missing_onset_file = onset['file']
max_duration = None
if max_duration is not None:
if max_duration <= 1:
design_type = NIDM_EVENT_RELATED_DESIGN
elif min_duration > 1:
design_type = NIDM_BLOCK_BASED_DESIGN
else:
design_type = NIDM_MIXED_DESIGN
else:
warnings.warn(
"Onset file " + missing_onset_file + " not found, " +
"design type will not be reported")
design_type = None
# HRF model (only look at first ev)
m = re.search(
r"set fmri\(convolve1\) (?P<hrf>\d)", self.design_txt)
assert m is not None
hrf = int(m.group("hrf"))
if hrf == 1: # 1: Gaussian
hrf_model = NIDM_GAUSSIAN_HRF
elif hrf == 2: # 2 : Gamma
hrf_model = NIDM_GAMMA_HRF
elif hrf == 3: # 3 : Double-Gamma HRF
hrf_model = FSL_FSLS_GAMMA_DIFFERENCE_HRF
elif hrf == 4: # 4 : Gamma basis functions
hrf_model = NIDM_GAMMA_HRB
elif hrf == 5: # 5 : Sine basis functions
hrf_model = NIDM_SINE_BASIS_SET
elif hrf == 6: # 6 : FIR basis functions
hrf_model = NIDM_FINITE_IMPULSE_RESPONSE_HRB
# Drift model
m = re.search(
r"set fmri\(paradigm_hp\) (?P<cut_off>\d+)", self.design_txt)
assert m is not None
cut_off = float(m.group("cut_off"))
drift_model = DriftModel(
FSL_GAUSSIAN_RUNNING_LINE_DRIFT_MODEL, cut_off)
else:
design_type = None
hrf_model = None
drift_model = None
real_ev = list()
for ev_num, ev_name in orig_ev.items():
real_ev.append(ev_name)
# Add one regressor name if there is an extra column for a temporal
# derivative
tempo_deriv_re = \
r'.*set fmri\(deriv_yn'+str(ev_num)+'\) (?P<info>[\d]+).*'
tempo_deriv = bool(self._search_in_fsf(tempo_deriv_re))
if tempo_deriv:
real_ev.append(ev_name+'*temporal_derivative')
# FIXME: other hrf models (FIR...)
design_matrix = DesignMatrix(design_mat_values, design_mat_image,
self.export_dir, real_ev, design_type,
hrf_model, drift_model)
return design_matrix
def _get_data(self):
"""
Parse FSL result directory to retreive information about the data.
Return an object of type Data.
"""
grand_mean_scaling = True
target_intensity = 10000.0
data = Data(grand_mean_scaling, target_intensity)
return data
def _get_error_model(self):
"""
Parse FSL result directory to retreive information about the error
model. Return an object of type ErrorModel.
"""
if self.first_level:
variance_homo = True
dependance = SERIALLY_CORR
variance_spatial = SPATIALLY_LOCAL
dependance_spatial = SPATIALLY_REGUL
else:
variance_homo = False
dependance = INDEPEDENT_CORR
variance_spatial = SPATIALLY_LOCAL
dependance_spatial = None
error_distribution = NIDM_GAUSSIAN_DISTRIBUTION
error_model = ErrorModel(
error_distribution, variance_homo,
variance_spatial, dependance, dependance_spatial)
return error_model
def _get_residual_mean_squares_map(self, stat_dir):
"""
Parse FSL result directory to retreive information about the residual
mean squares map. Return an object of type ResidualMeanSquares.
"""
if self.first_level:
residuals_file = os.path.join(stat_dir, 'sigmasquareds.nii.gz')
else:
# FIXME cope num enter here
sigma2_group_file = os.path.join(stat_dir,
'mean_random_effects_var1.nii.gz')
sigma2_sub_file = os.path.join(stat_dir,
'varcope1.nii.gz')
# Create residual mean squares map
sigma2_group_img = nib.load(sigma2_group_file)
sigma2_group = sigma2_group_img.get_data()
sigma2_sub_img = nib.load(sigma2_sub_file)
sigma2_sub = sigma2_sub_img.get_data()
residuals_file = os.path.join(stat_dir,
'calculated_sigmasquareds.nii.gz')
residuals_img = nib.Nifti1Image(sigma2_group + sigma2_sub,
sigma2_sub_img.get_qform())
nib.save(residuals_img, residuals_file)
# In FSL all files will be in the same coordinate space
self.coord_space = CoordinateSpace(self._get_coordinate_system(),
residuals_file)
rms_map = ResidualMeanSquares(self.export_dir, residuals_file,
self.coord_space)
# FIXME: does not work
# if not self.first_level:
# Delete calculated rms file (a copy is now in the NIDM export)
# FIXME we need to add the wasDerivedFrom maps
# os.remove(residuals_file)
return rms_map
def _get_param_estimate_maps(self, stat_dir):
"""
Parse FSL result directory to retreive information about the parameter
estimates. Return a list of objects of type ParameterEstimateMap.
"""
param_estimates = list()
for filename in os.listdir(stat_dir):
if filename.startswith("pe"):
if filename.endswith(".nii.gz"):
s = re.compile('pe\d+')
penum = s.search(filename)
penum = penum.group()
penum = penum.replace('pe', '')
full_path_file = os.path.join(stat_dir, filename)
param_estimate = ParameterEstimateMap(
full_path_file,
penum, self.coord_space)
param_estimates.append(param_estimate)
return param_estimates
def _get_mask_map(self, analysis_dir):
"""
Parse FSL result directory to retreive information about the mask
created as part of Model Parameters Estimation. Return an object of
type MaskMap.
"""
mask_file = os.path.join(analysis_dir, 'mask.nii.gz')
mask_map = MaskMap(self.export_dir, mask_file,
self.coord_space, False)
return mask_map
def _get_grand_mean(self, mask_file, analysis_dir):
"""
Parse FSL result directory to retreive information about the grand
mean map. Return an object of type GrandMeanMap.
"""
grand_mean_file = os.path.join(analysis_dir, 'mean_func.nii.gz')
# FIXME: Check if there is an alternative file to use here (maybe)
# depending on FSL version
if not os.path.isfile(grand_mean_file):
grand_mean = None
else:
grand_mean = GrandMeanMap(grand_mean_file, mask_file,
self.coord_space, self.export_dir)
return grand_mean
def _get_coordinate_system(self):
"""
Parse FSL result directory to retreive information about the
coordinate system used in the current analysis (dependent on the
template).
"""
space_re = r'.*set fmri\(regstandard_yn\) (?P<info>[\d]+).*'
standard_space = bool(self._search_in_fsf(space_re))
if standard_space:
custom_re = \
r'.*set fmri\(alternateReference_yn\) (?P<info>[\d]+).*'
custom_space = self._search_in_fsf(custom_re, True)
if custom_space is not None:
custom_space = (custom_space == "1")
else:
custom_space = False
if custom_space is not None:
custom_standard = (custom_space == "1")
else:
custom_re = r'.*set fmri\(regstandard\) (?P<info>.+).*'
custom_space = self._search_in_fsf(custom_re)
if custom_space is not None:
custom_standard = True
else:
custom_standard = False
# TODO check if first level is always performed in subject space?
if not standard_space or self.first_level:
coordinate_system = NIDM_SUBJECT_COORDINATE_SYSTEM
else:
if not custom_standard:
coordinate_system = \
NIDM_ICBM_MNI152_NON_LINEAR6TH_GENERATION_COORDINATE_SYSTEM
else:
coordinate_system = NIDM_STANDARDIZED_COORDINATE_SYSTEM
return coordinate_system
def _search_in_fsf(self, regexp, return_not_found=False):
"""
Look for information matching regular expression 'regexp' in the design
file of the current study.
"""
info_search = re.compile(regexp)
info_found = info_search.search(self.design_txt)
if not info_found and return_not_found:
info = None
else:
info = info_found.group('info')
return info
def _get_display_mask(self):
"""
Parse FSL result directory to retreive information about display mask.
"""
# FIXME this should be updated with actual contrast masking file
mask_file = os.path.join(self.feat_dir, 'mask.nii.gz')
return mask_file
def _get_num_peaks(self):
if self.feat_post_log is not None:
num_peak_search = re.compile(r'.* --num=(?P<numpeak>\d+)+ .*')
num_peak_found = num_peak_search.search(self.feat_post_log)
if num_peak_found:
num_peak = int(num_peak_found.group('numpeak'))
else:
num_peak_search = re.compile(r'.* -n=(?P<numpeak>\d+)+ .*')
num_peak_found = num_peak_search.search(self.feat_post_log)
if num_peak_found:
num_peak = int(num_peak_found.group('numpeak'))
else:
# If not specified, default value is inf?
# (cf. http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Cluster)
# Is it ok to say no limit with -1 (as for Inf
# we would need float...)
# FIXME: for now omitted if not explicitely defined
num_peak = None
else:
num_peak = None
return num_peak
def _get_peak_dist(self):
if self.feat_post_log is not None:
peak_dist_search = re.compile(
r'.* --peakdist=(?P<peakdist>\d+)+ .*')
peak_dist_found = peak_dist_search.search(self.feat_post_log)
if peak_dist_found:
peak_dist = float(peak_dist_found.group('peakdist'))
else:
# If not specified, default value is zero (cf.
# http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Cluster)
peak_dist = 0.0
else:
peak_dist = 0.0
return peak_dist
def _get_connectivity(self):
"""
Parse FSL result directory to retreive peak connectivity within a
cluster.
"""
if self.feat_post_log is not None:
conn_re = r'.* --connectivity=(?P<connectivity>\d+)+ .*'
connectivity_search = re.compile(conn_re)
connectivity = int(
connectivity_search.search(
self.feat_post_log).group('connectivity'))
else:
connectivity = None
return connectivity
def _get_search_space(self, analysis_dir):
"""
Parse FSL result directory to retreive information about the search
space. Return an object of type SearchSpace.
"""
# FIXME this needs to be estimated
search_space_file = os.path.join(analysis_dir, 'mask.nii.gz')
smoothness_file = os.path.join(analysis_dir, 'stats', 'smoothness')
# Load DLH, VOLUME, RESELS and noise FWHM
with open(smoothness_file, "r") as fp:
smoothness_txt = fp.read()
sm_reg = \
r"FWHMx = (?P<FWHMx_vx>\d+\.?\d*) voxels, " + \
r"FWHMy = (?P<FWHMy_vx>\d+\.?\d*) voxels, " + \
r"FWHMz = (?P<FWHMz_vx>\d+\.?\d*) voxels\n" + \
r"FWHMx = (?P<FWHMx_mm>\d+\.?\d*) mm, " + \
r"FWHMy = (?P<FWHMy_mm>\d+\.?\d*) mm, " + \
r"FWHMz = (?P<FWHMz_mm>\d+\.?\d*) mm\n" + \
r"DLH (?P<DLH>\d+\.?\d*) voxels\^\-3\n" + \
r"VOLUME (?P<volume>\d+) voxels\n" + \
r"RESELS (?P<vox_per_resels>\d+\.?\d*) voxels per resel"
sm_match = re.search(sm_reg, smoothness_txt, re.DOTALL)
if sm_match:
d = sm_match.groupdict()
else:
# smoothness was estimated without the "-V" option, recompute
log_file = os.path.join(self.feat_dir, 'logs', 'feat3_stats')
if not os.path.isfile(log_file):
log_file = os.path.join(self.feat_dir, 'logs', 'feat3_film')
if not os.path.isfile(log_file):
warnings.warn(
"Log file feat3_stats/feat3_film not found, " +
"noise FWHM will not be reported")
noise_fwhm_in_voxels = None
noise_fwhm_in_units = None
# Load DLH, VOLUME and RESELS
d = dict()
d['DLH'], d['volume'], d['vox_per_resels'] = \
np.loadtxt(smoothness_file, usecols=[1])
else:
with open(log_file, "r") as fp:
log_txt = fp.read()
cmd_match = re.search(r"(?P<cmd>smoothest.*)\n", log_txt)
cmd = cmd_match.group("cmd")
cmd = cmd.replace("stats/smoothness", "stats/smoothness_v")
cmd = cmd.replace("smoothest", "smoothest -V")
subprocess.call("cd "+analysis_dir+";"+cmd, shell=True)
with open(smoothness_file+"_v", "r") as fp:
smoothness_txt = fp.read()
sm_match = re.search(sm_reg, smoothness_txt, re.DOTALL)
d = sm_match.groupdict()
vol_in_units = float(d['volume'])*np.prod(
json.loads(self.coord_space.voxel_size))
vol_in_resels = float(d['volume'])/float(d['vox_per_resels'])
if 'FWHMx_vx' in d:
noise_fwhm_in_voxels = json.dumps(
[float(d['FWHMx_vx']), float(d['FWHMy_vx']),
float(d['FWHMz_vx'])])
noise_fwhm_in_units = json.dumps(
[float(d['FWHMx_mm']), float(d['FWHMy_mm']),
float(d['FWHMz_mm'])])
search_space = SearchSpace(
search_space_file=search_space_file,
vol_in_voxels=int(d['volume']),
vol_in_units=vol_in_units,
vol_in_resels=vol_in_resels,
resel_size_in_voxels=float(d['vox_per_resels']),
dlh=float(d['DLH']),
random_field_stationarity=True,
noise_fwhm_in_voxels=noise_fwhm_in_voxels,
noise_fwhm_in_units=noise_fwhm_in_units,
coord_space=self.coord_space,
export_dir=self.export_dir)
return search_space
def _get_clusters_peaks(self, stat_num):
"""
Parse FSL result directory to retreive information about the clusters
and peaks declared significant for statistic 'stat_num'. Return a list
of Cluster objects.
"""
clusters = list()
for analysis_dir in self.analysis_dirs:
# Cluster list (positions in voxels)
cluster_file = os.path.join(analysis_dir,
'cluster_zstat' + stat_num + '.txt')
if not os.path.isfile(cluster_file):
cluster_file = None
else:
cluster_table = np.loadtxt(cluster_file, skiprows=1, ndmin=2)
# Cluster list (positions in mm)
cluster_std_file = os.path.join(
analysis_dir,
'cluster_zstat' + stat_num + '_std.txt')
if not os.path.isfile(cluster_std_file):
cluster_std_file = None
# cluster_std_table = np.zeros_like(cluster_table)*float('nan')
else:
cluster_std_table = np.loadtxt(cluster_std_file, skiprows=1,
ndmin=2)
# Peaks
peak_file = os.path.join(
analysis_dir, 'lmax_zstat' + stat_num + '.txt')
if not os.path.isfile(peak_file):
peak_file = None
else:
peak_table = np.loadtxt(peak_file, skiprows=1, ndmin=2)
peak_std_file = os.path.join(analysis_dir,
'lmax_zstat' + stat_num + '_std.txt')
if not os.path.isfile(peak_std_file):
peak_std_file = None
else:
peak_std_table = np.loadtxt(peak_std_file, skiprows=1, ndmin=2)
peaks = dict()
prev_cluster = -1
if (peak_file is not None) and (peak_std_file is not None):
peaks_join_table = np.column_stack(
(peak_table, peak_std_table))
for peak_row in peaks_join_table:
cluster_id = int(peak_row[0])
if not cluster_id == prev_cluster:
# First peak in this cluster
peakIndex = 1
# Though peak coordinates in voxels are integer, we use a
# float type to comply with the rdfs:range
peak = Peak(
peak_index=int(peakIndex), x=int(peak_row[2]),
y=int(peak_row[3]), z=int(peak_row[4]),
x_std=peak_row[7], y_std=peak_row[8],
z_std=peak_row[9],
equiv_z=float(peak_row[1]),
cluster_index=cluster_id, stat_num=stat_num)
if cluster_id in peaks:
peaks[cluster_id].append(peak)
else:
peaks[cluster_id] = list([peak])
prev_cluster = cluster_id
peakIndex = peakIndex + 1
elif (peak_file is not None):
for peak_row in peak_table:
cluster_id = int(peak_row[0])
if not cluster_id == prev_cluster:
peakIndex = 1
peak = Peak(
peak_index=int(peakIndex), x=int(peak_row[2]),
y=int(peak_row[3]), z=int(peak_row[4]),
equiv_z=float(peak_row[1]), cluster_index=cluster_id,
stat_num=stat_num)
if cluster_id in peaks:
peaks[cluster_id].append(peak)
else:
peaks[cluster_id] = list([peak])
prev_cluster = cluster_id
peakIndex = peakIndex + 1
elif (peak_std_file is not None):
for peak_row in peak_std_table:
cluster_id = int(peak_row[0])
if not cluster_id == prev_cluster:
peakIndex = 1
peak = Peak(
peak_index=int(peakIndex),
x_std=peak_row[2],
y_std=peak_row[3],
z_std=peak_row[4],
equiv_z=float(peak_row[1]), cluster_index=cluster_id,
stat_num=stat_num)
if cluster_id in peaks:
peaks[cluster_id].append(peak)
else:
peaks[cluster_id] = list([peak])
prev_cluster = cluster_id
peakIndex = peakIndex + 1
if (cluster_file is not None) and (cluster_std_file is not None):
clusters_join_table = np.column_stack((cluster_table,
cluster_std_table))
for cluster_row in clusters_join_table:
cluster_id = int(cluster_row[0])
size = int(cluster_row[1])
pFWER = float(cluster_row[2])
x = float(cluster_row[8])
y = float(cluster_row[9])
z = float(cluster_row[10])
x_std = float(cluster_row[24])
y_std = float(cluster_row[25])
z_std = float(cluster_row[26])
clusters.append(
Cluster(cluster_num=cluster_id, size=size,
pFWER=pFWER, peaks=peaks[
cluster_id], x=x, y=y, z=z,
x_std=x_std, y_std=y_std, z_std=z_std))
elif (cluster_file is not None):
for cluster_row in cluster_table:
cluster_id = int(cluster_row[0])
size = int(cluster_row[1])
pFWER = float(cluster_row[2])
x = float(cluster_row[8])
y = float(cluster_row[9])
z = float(cluster_row[10])
x_std = None
y_std = None
z_std = None
clusters.append(
Cluster(cluster_num=cluster_id, size=size,
pFWER=pFWER, peaks=peaks[
cluster_id], x=x, y=y, z=z,
x_std=x_std, y_std=y_std, z_std=z_std))
elif (cluster_std_file is not None):
for cluster_row in cluster_std_table:
cluster_id = int(cluster_row[0])
size = int(cluster_row[1])
pFWER = float(cluster_row[2])
x_std = float(cluster_row[8])
y_std = float(cluster_row[9])
z_std = float(cluster_row[10])
x = None
y = None
z = None
clusters.append(
Cluster(cluster_num=cluster_id, size=size,
pFWER=pFWER, peaks=peaks[
cluster_id], x=x, y=y, z=z,
x_std=x_std, y_std=y_std, z_std=z_std))
return clusters
|
mit
| 2,310,649,534,921,080,300 | 39.971216 | 83 | 0.498051 | false |
Antrek/NDNProject
|
docs/redmine_issue.py
|
1
|
2560
|
# -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# Based on http://doughellmann.com/2010/05/09/defining-custom-roles-in-sphinx.html
"""Integration of Sphinx with Redmine.
"""
from docutils import nodes, utils
from docutils.parsers.rst.roles import set_classes
def redmine_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Link to a Redmine issue.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
try:
issue_num = int(text)
if issue_num <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'Redmine issue number must be a number greater than or equal to 1; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
app = inliner.document.settings.env.app
node = make_link_node(rawtext, app, 'issues', str(issue_num), options)
return [node], []
def make_link_node(rawtext, app, type, slug, options):
"""Create a link to a Redmine resource.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param type: Link type (issue, changeset, etc.)
:param slug: ID of the thing to link to
:param options: Options dictionary passed to role func.
"""
#
try:
base = app.config.redmine_project_url
if not base:
raise AttributeError
except AttributeError, err:
raise ValueError('redmine_project_url configuration value is not set (%s)' % str(err))
#
slash = '/' if base[-1] != '/' else ''
ref = base + slash + type + '/' + slug + '/'
set_classes(options)
node = nodes.reference(rawtext, 'Issue #' + utils.unescape(slug), refuri=ref,
**options)
return node
def setup(app):
"""Install the plugin.
:param app: Sphinx application context.
"""
app.add_role('issue', redmine_role)
app.add_config_value('redmine_project_url', None, 'env')
return
|
gpl-3.0
| -1,619,331,055,285,040,400 | 35.585714 | 94 | 0.647266 | false |
p-morais/rl
|
rl/algos/dagger.py
|
1
|
2489
|
"""CURRENTLY OUTDATED. WILL UPDATE IN FUTURE"""
"""
import torch
from torch import Tensor
from torch.autograd import Variable as Var
from torch.utils.data import DataLoader
from rl.utils import ProgBar, RealtimePlot
from rl.envs import controller
class DAgger():
def __init__(self, env, learner, expert):
self.env = env
self.expert = expert
self.learner = learner
self.rtplot = RealtimePlot()
self.rtplot.config("MSE Loss", "Epoch")
def train(self, dagger_itr, epochs, trj_len):
env = self.env
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
dagger_init = True
X, y = Tensor(trj_len, obs_dim), Tensor(trj_len, action_dim)
obs = env.reset()
for t in range(trj_len):
expert_action = controller(env.cstate, 0, 1, 0)
obs, _, done, _ = env.step(expert_action)
X[t, :], y[t, :] = Tensor(obs), Tensor(expert_action)
for d in range(dagger_itr):
X_new, y_new = Tensor(trj_len, obs_dim), Tensor(trj_len, action_dim)
obs = env.reset()
for t in range(trj_len):
if dagger_init:
dagger_init = False
continue
obs_torch = Var(Tensor(obs[None, :]), requires_grad=False)
action = self.learner(obs_torch).data.numpy()[0]
expert_action = controller(env.cstate, 0, 1, 0)
obs, _, done, _ = env.step(action)
if done or t == trj_len - 1:
X_new, y_new = X_new[0:t, :], y_new[0:t, :]
X, y = torch.cat((X, X_new), 0), torch.cat((y, y_new), 0)
break
X_new[t, :], y_new[t, :] = Tensor(obs), Tensor(expert_action)
dataset = SplitDataset(X.numpy(), y.numpy())
dataloader = DataLoader(dataset, batch_size=100, shuffle=True)
bar = ProgBar(len(dataloader) * epochs)
for e in range(epochs):
running_loss = 0
for batch_idx, (X_batch, y_batch) in enumerate(dataloader):
X_batch, y_batch = Var(Tensor(X_batch)), Var(Tensor(y_batch))
running_loss += self.learner.fit(X_batch, y_batch)[0]
bar.next("DAgger iteration: %s / %s" % (d + 1, dagger_itr))
self.rtplot.plot(running_loss / len(dataloader))
self.rtplot.done()
"""
|
mit
| 2,729,575,054,047,871,500 | 34.070423 | 81 | 0.532744 | false |
uwcirg/true_nth_usa_portal
|
portal/migrations/versions/7ad0da0d1b72_.py
|
1
|
1909
|
"""empty message
Revision ID: 7ad0da0d1b72
Revises: af193c376724
Create Date: 2017-07-06 17:42:35.513647
"""
# revision identifiers, used by Alembic.
revision = '7ad0da0d1b72'
down_revision = 'af193c376724'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('recurs',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('days_to_start', sa.Integer(), nullable=False),
sa.Column('days_in_cycle', sa.Integer(), nullable=False),
sa.Column('days_till_termination',
sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'questionnaire_bank_questionnaire_recurs',
sa.Column(
'id',
sa.Integer(),
nullable=False),
sa.Column(
'questionnaire_bank_questionnaire_id',
sa.Integer(),
nullable=False),
sa.Column(
'recur_id',
sa.Integer(),
nullable=False),
sa.ForeignKeyConstraint(
['questionnaire_bank_questionnaire_id'],
['questionnaire_bank_questionnaires.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(
['recur_id'],
['recurs.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint(
'questionnaire_bank_questionnaire_id',
'recur_id',
name='_questionnaire_bank_questionnaire_recure'))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('questionnaire_bank_questionnaire_recurs')
op.drop_table('recurs')
# ### end Alembic commands ###
|
bsd-3-clause
| 2,077,013,238,323,017,500 | 30.295082 | 77 | 0.55736 | false |
googleapis/python-channel
|
google/cloud/channel_v1/services/cloud_channel_service/transports/base.py
|
1
|
22245
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.channel_v1.types import channel_partner_links
from google.cloud.channel_v1.types import customers
from google.cloud.channel_v1.types import entitlements
from google.cloud.channel_v1.types import offers
from google.cloud.channel_v1.types import service
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-channel",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class CloudChannelServiceTransport(abc.ABC):
"""Abstract transport class for CloudChannelService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/apps.order",)
DEFAULT_HOST: str = "cloudchannel.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials is service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_customers: gapic_v1.method.wrap_method(
self.list_customers, default_timeout=None, client_info=client_info,
),
self.get_customer: gapic_v1.method.wrap_method(
self.get_customer, default_timeout=None, client_info=client_info,
),
self.check_cloud_identity_accounts_exist: gapic_v1.method.wrap_method(
self.check_cloud_identity_accounts_exist,
default_timeout=None,
client_info=client_info,
),
self.create_customer: gapic_v1.method.wrap_method(
self.create_customer, default_timeout=None, client_info=client_info,
),
self.update_customer: gapic_v1.method.wrap_method(
self.update_customer, default_timeout=None, client_info=client_info,
),
self.delete_customer: gapic_v1.method.wrap_method(
self.delete_customer, default_timeout=None, client_info=client_info,
),
self.provision_cloud_identity: gapic_v1.method.wrap_method(
self.provision_cloud_identity,
default_timeout=60.0,
client_info=client_info,
),
self.list_entitlements: gapic_v1.method.wrap_method(
self.list_entitlements, default_timeout=None, client_info=client_info,
),
self.list_transferable_skus: gapic_v1.method.wrap_method(
self.list_transferable_skus,
default_timeout=None,
client_info=client_info,
),
self.list_transferable_offers: gapic_v1.method.wrap_method(
self.list_transferable_offers,
default_timeout=None,
client_info=client_info,
),
self.get_entitlement: gapic_v1.method.wrap_method(
self.get_entitlement, default_timeout=None, client_info=client_info,
),
self.create_entitlement: gapic_v1.method.wrap_method(
self.create_entitlement, default_timeout=60.0, client_info=client_info,
),
self.change_parameters: gapic_v1.method.wrap_method(
self.change_parameters, default_timeout=60.0, client_info=client_info,
),
self.change_renewal_settings: gapic_v1.method.wrap_method(
self.change_renewal_settings,
default_timeout=60.0,
client_info=client_info,
),
self.change_offer: gapic_v1.method.wrap_method(
self.change_offer, default_timeout=60.0, client_info=client_info,
),
self.start_paid_service: gapic_v1.method.wrap_method(
self.start_paid_service, default_timeout=60.0, client_info=client_info,
),
self.suspend_entitlement: gapic_v1.method.wrap_method(
self.suspend_entitlement, default_timeout=60.0, client_info=client_info,
),
self.cancel_entitlement: gapic_v1.method.wrap_method(
self.cancel_entitlement, default_timeout=60.0, client_info=client_info,
),
self.activate_entitlement: gapic_v1.method.wrap_method(
self.activate_entitlement,
default_timeout=60.0,
client_info=client_info,
),
self.transfer_entitlements: gapic_v1.method.wrap_method(
self.transfer_entitlements,
default_timeout=60.0,
client_info=client_info,
),
self.transfer_entitlements_to_google: gapic_v1.method.wrap_method(
self.transfer_entitlements_to_google,
default_timeout=60.0,
client_info=client_info,
),
self.list_channel_partner_links: gapic_v1.method.wrap_method(
self.list_channel_partner_links,
default_timeout=None,
client_info=client_info,
),
self.get_channel_partner_link: gapic_v1.method.wrap_method(
self.get_channel_partner_link,
default_timeout=None,
client_info=client_info,
),
self.create_channel_partner_link: gapic_v1.method.wrap_method(
self.create_channel_partner_link,
default_timeout=None,
client_info=client_info,
),
self.update_channel_partner_link: gapic_v1.method.wrap_method(
self.update_channel_partner_link,
default_timeout=None,
client_info=client_info,
),
self.lookup_offer: gapic_v1.method.wrap_method(
self.lookup_offer, default_timeout=None, client_info=client_info,
),
self.list_products: gapic_v1.method.wrap_method(
self.list_products, default_timeout=None, client_info=client_info,
),
self.list_skus: gapic_v1.method.wrap_method(
self.list_skus, default_timeout=None, client_info=client_info,
),
self.list_offers: gapic_v1.method.wrap_method(
self.list_offers, default_timeout=None, client_info=client_info,
),
self.list_purchasable_skus: gapic_v1.method.wrap_method(
self.list_purchasable_skus,
default_timeout=None,
client_info=client_info,
),
self.list_purchasable_offers: gapic_v1.method.wrap_method(
self.list_purchasable_offers,
default_timeout=None,
client_info=client_info,
),
self.register_subscriber: gapic_v1.method.wrap_method(
self.register_subscriber, default_timeout=None, client_info=client_info,
),
self.unregister_subscriber: gapic_v1.method.wrap_method(
self.unregister_subscriber,
default_timeout=None,
client_info=client_info,
),
self.list_subscribers: gapic_v1.method.wrap_method(
self.list_subscribers, default_timeout=None, client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def list_customers(
self,
) -> Callable[
[service.ListCustomersRequest],
Union[service.ListCustomersResponse, Awaitable[service.ListCustomersResponse]],
]:
raise NotImplementedError()
@property
def get_customer(
self,
) -> Callable[
[service.GetCustomerRequest],
Union[customers.Customer, Awaitable[customers.Customer]],
]:
raise NotImplementedError()
@property
def check_cloud_identity_accounts_exist(
self,
) -> Callable[
[service.CheckCloudIdentityAccountsExistRequest],
Union[
service.CheckCloudIdentityAccountsExistResponse,
Awaitable[service.CheckCloudIdentityAccountsExistResponse],
],
]:
raise NotImplementedError()
@property
def create_customer(
self,
) -> Callable[
[service.CreateCustomerRequest],
Union[customers.Customer, Awaitable[customers.Customer]],
]:
raise NotImplementedError()
@property
def update_customer(
self,
) -> Callable[
[service.UpdateCustomerRequest],
Union[customers.Customer, Awaitable[customers.Customer]],
]:
raise NotImplementedError()
@property
def delete_customer(
self,
) -> Callable[
[service.DeleteCustomerRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def provision_cloud_identity(
self,
) -> Callable[
[service.ProvisionCloudIdentityRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def list_entitlements(
self,
) -> Callable[
[service.ListEntitlementsRequest],
Union[
service.ListEntitlementsResponse,
Awaitable[service.ListEntitlementsResponse],
],
]:
raise NotImplementedError()
@property
def list_transferable_skus(
self,
) -> Callable[
[service.ListTransferableSkusRequest],
Union[
service.ListTransferableSkusResponse,
Awaitable[service.ListTransferableSkusResponse],
],
]:
raise NotImplementedError()
@property
def list_transferable_offers(
self,
) -> Callable[
[service.ListTransferableOffersRequest],
Union[
service.ListTransferableOffersResponse,
Awaitable[service.ListTransferableOffersResponse],
],
]:
raise NotImplementedError()
@property
def get_entitlement(
self,
) -> Callable[
[service.GetEntitlementRequest],
Union[entitlements.Entitlement, Awaitable[entitlements.Entitlement]],
]:
raise NotImplementedError()
@property
def create_entitlement(
self,
) -> Callable[
[service.CreateEntitlementRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def change_parameters(
self,
) -> Callable[
[service.ChangeParametersRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def change_renewal_settings(
self,
) -> Callable[
[service.ChangeRenewalSettingsRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def change_offer(
self,
) -> Callable[
[service.ChangeOfferRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def start_paid_service(
self,
) -> Callable[
[service.StartPaidServiceRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def suspend_entitlement(
self,
) -> Callable[
[service.SuspendEntitlementRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_entitlement(
self,
) -> Callable[
[service.CancelEntitlementRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def activate_entitlement(
self,
) -> Callable[
[service.ActivateEntitlementRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def transfer_entitlements(
self,
) -> Callable[
[service.TransferEntitlementsRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def transfer_entitlements_to_google(
self,
) -> Callable[
[service.TransferEntitlementsToGoogleRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def list_channel_partner_links(
self,
) -> Callable[
[service.ListChannelPartnerLinksRequest],
Union[
service.ListChannelPartnerLinksResponse,
Awaitable[service.ListChannelPartnerLinksResponse],
],
]:
raise NotImplementedError()
@property
def get_channel_partner_link(
self,
) -> Callable[
[service.GetChannelPartnerLinkRequest],
Union[
channel_partner_links.ChannelPartnerLink,
Awaitable[channel_partner_links.ChannelPartnerLink],
],
]:
raise NotImplementedError()
@property
def create_channel_partner_link(
self,
) -> Callable[
[service.CreateChannelPartnerLinkRequest],
Union[
channel_partner_links.ChannelPartnerLink,
Awaitable[channel_partner_links.ChannelPartnerLink],
],
]:
raise NotImplementedError()
@property
def update_channel_partner_link(
self,
) -> Callable[
[service.UpdateChannelPartnerLinkRequest],
Union[
channel_partner_links.ChannelPartnerLink,
Awaitable[channel_partner_links.ChannelPartnerLink],
],
]:
raise NotImplementedError()
@property
def lookup_offer(
self,
) -> Callable[
[service.LookupOfferRequest], Union[offers.Offer, Awaitable[offers.Offer]]
]:
raise NotImplementedError()
@property
def list_products(
self,
) -> Callable[
[service.ListProductsRequest],
Union[service.ListProductsResponse, Awaitable[service.ListProductsResponse]],
]:
raise NotImplementedError()
@property
def list_skus(
self,
) -> Callable[
[service.ListSkusRequest],
Union[service.ListSkusResponse, Awaitable[service.ListSkusResponse]],
]:
raise NotImplementedError()
@property
def list_offers(
self,
) -> Callable[
[service.ListOffersRequest],
Union[service.ListOffersResponse, Awaitable[service.ListOffersResponse]],
]:
raise NotImplementedError()
@property
def list_purchasable_skus(
self,
) -> Callable[
[service.ListPurchasableSkusRequest],
Union[
service.ListPurchasableSkusResponse,
Awaitable[service.ListPurchasableSkusResponse],
],
]:
raise NotImplementedError()
@property
def list_purchasable_offers(
self,
) -> Callable[
[service.ListPurchasableOffersRequest],
Union[
service.ListPurchasableOffersResponse,
Awaitable[service.ListPurchasableOffersResponse],
],
]:
raise NotImplementedError()
@property
def register_subscriber(
self,
) -> Callable[
[service.RegisterSubscriberRequest],
Union[
service.RegisterSubscriberResponse,
Awaitable[service.RegisterSubscriberResponse],
],
]:
raise NotImplementedError()
@property
def unregister_subscriber(
self,
) -> Callable[
[service.UnregisterSubscriberRequest],
Union[
service.UnregisterSubscriberResponse,
Awaitable[service.UnregisterSubscriberResponse],
],
]:
raise NotImplementedError()
@property
def list_subscribers(
self,
) -> Callable[
[service.ListSubscribersRequest],
Union[
service.ListSubscribersResponse, Awaitable[service.ListSubscribersResponse]
],
]:
raise NotImplementedError()
__all__ = ("CloudChannelServiceTransport",)
|
apache-2.0
| -669,878,659,928,609,400 | 33.488372 | 103 | 0.616768 | false |
nocarryr/plotly-system-stats
|
plotly_system_stats/plotting/plot_config.py
|
1
|
1381
|
import plotly.tools as pl_tools
from plotly_system_stats.config import config
def build_config():
d = {}
d = dict(
all_stream_ids=[],
streams={},
figures={},
plots={},
)
config.set('plotly', d)
if config.get('plotly') is None:
build_config()
class PlotConfig(object):
@property
def data(self):
return config.get('plotly')
def write_all(self):
config.write_all()
def set(self, key, item):
self.data[key] = item
config.write_all()
def get(self, key, default=None):
return self.data.get(key, default)
def update(self, d):
self.data.update(d)
config.write_all()
def refresh_stream_ids(self):
changed = False
stream_ids = pl_tools.get_credentials_file()['stream_ids']
for stream_id in stream_ids:
if stream_id in self.data['all_stream_ids']:
continue
self.data['all_stream_ids'].append(stream_id)
changed = True
if changed:
self.write_all()
def get_available_stream_id(self):
self.refresh_stream_ids()
for stream_id in self.data['all_stream_ids']:
if stream_id in self.data['streams']:
continue
return stream_id
return None
plot_config = PlotConfig()
|
gpl-2.0
| -1,374,831,903,278,039,800 | 25.557692 | 66 | 0.55105 | false |
djfan/wifind
|
pipeline/v1.0/etl.py
|
1
|
4488
|
import csv
import configparser
import logging
from builtins import range
from carto.auth import APIKeyAuthClient
from carto.sql import SQLClient
from carto.sql import BatchSQLClient
logger = logging.getLogger('carto-etl')
config = configparser.RawConfigParser()
config.read("etl.conf")
CARTO_BASE_URL = config.get('carto', 'base_url')
CARTO_API_KEY = config.get('carto', 'api_key')
CARTO_TABLE_NAME = config.get('carto', 'table_name')
CARTO_COLUMNS = config.get('carto', 'columns')
CHUNK_SIZE = int(config.get('etl', 'chunk_size'))
MAX_ATTEMPTS = int(config.get('etl', 'max_attempts'))
api_auth = APIKeyAuthClient(base_url=CARTO_BASE_URL, api_key=CARTO_API_KEY, organization='nyu')
sql = SQLClient(api_auth)
bsql = BatchSQLClient(api_auth)
def ClearContent(table):
sql.send("TRUNCATE TABLE " + table)
def chunks(full_list, chunk_size, start_chunk=1, end_chunk=None):
finished = False
while finished is False:
chunk = []
for chunk_num in range(chunk_size):
if chunk_num < (start_chunk - 1):
continue
if end_chunk is not None and chunk_num >= end_chunk:
return
try:
chunk.append(next(full_list))
except StopIteration:
finished = True
if len(chunk) > 0:
continue
else:
return
yield chunk
class UploadJob(object):
def __init__(self, csv_file_path, table, x_column="longitude", y_column="latitude", srid=4326):
self.csv_file_path = csv_file_path
self.x_column = x_column
self.y_column = y_column
self.srid = srid
self.table = table
def run(self):
raise NotImplemented
def regenerate_overviews(self):
query = 'select CDB_CreateOverviews(\'{table}\'::regclass)'.format(table=table)
job_result = bsql.create(query)
return job_result['job_id']
def check_job(self, job_id):
return bsql.read(job_id)
class InsertJob(UploadJob):
def __init__(self, csv_file_path, table, x_column="longitude", y_column="latitude", srid=4326):
self.csv_file_path = csv_file_path
#self.x_column = x_column
#self.y_column = y_column
self.srid = srid
self.table = table
def run(self, start_chunk=1, end_chunk=None):
with open(self.csv_file_path) as f:
a=''
csv_reader = csv.DictReader(f)
for chunk_num, record_chunk in enumerate(chunks(csv_reader, CHUNK_SIZE, start_chunk, end_chunk)):
query = "insert into {table_name} (the_geom,{columns}) values".format(table_name=self.table, columns=CARTO_COLUMNS.lower())
for record in record_chunk:
# query += " (st_transform(st_setsrid(st_makepoint({longitude}, {latitude}), {srid}), 4326),".format(longitude=record[self.x_column], latitude=record[self.y_column], srid=self.srid)
query += " (st_transform(st_setsrid(st_geomfromtext('{geometry}'),{srid}), 4326),".format(geometry=record['geometry'], srid=self.srid)
for column in CARTO_COLUMNS.split(","):
try:
float(record[column])
except ValueError:
query += "'{value}',".format(value=record[column])
else:
query += "{value},".format(value=record[column])
query = query[:-1] + "),"
query = query[:-1]
#query = query.replace("'", "''")
logger.debug("Chunk #{chunk_num}: {query}".format(chunk_num=(chunk_num + 1), query=query))
for retry in range(MAX_ATTEMPTS):
try:
sql.send(query)
a = a + 'send'
except Exception as e:
logger.warning("Chunk #{chunk_num}: Retrying ({error_msg})".format(chunk_num=(chunk_num + 1), error_msg=e))
a=a+'error'
else:
logger.info("Chunk #{chunk_num}: Success!".format(chunk_num=(chunk_num + 1)))
a=a+'end'
break
else:
logger.error("Chunk #{chunk_num}: Failed!)".format(chunk_num=(chunk_num + 1)))
a=a+'fail'
return query[:20] + a + query[-10:]
|
mit
| -6,131,026,039,456,346,000 | 38.368421 | 201 | 0.548797 | false |
evilhero/mylar
|
mylar/importer.py
|
1
|
80815
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mylar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
import time
import os, errno
import sys
import shlex
import datetime
import re
import json
import urllib
import urllib2
import shutil
import imghdr
import sqlite3
import cherrypy
import requests
import mylar
from mylar import logger, filers, helpers, db, mb, cv, parseit, filechecker, search, updater, moveit, comicbookdb
def is_exists(comicid):
myDB = db.DBConnection()
# See if the artist is already in the database
comiclist = myDB.select('SELECT ComicID, ComicName from comics WHERE ComicID=?', [comicid])
if any(comicid in x for x in comiclist):
logger.info(comiclist[0][1] + ' is already in the database.')
return False
else:
return False
def addComictoDB(comicid, mismatch=None, pullupd=None, imported=None, ogcname=None, calledfrom=None, annload=None, chkwant=None, issuechk=None, issuetype=None, latestissueinfo=None, csyear=None, fixed_type=None):
myDB = db.DBConnection()
controlValueDict = {"ComicID": comicid}
dbcomic = myDB.selectone('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone()
if dbcomic is None:
newValueDict = {"ComicName": "Comic ID: %s" % (comicid),
"Status": "Loading"}
if all([imported is not None, mylar.CONFIG.IMP_PATHS is True]):
comlocation = os.path.dirname(imported['filelisting'][0]['comiclocation'])
else:
comlocation = None
oldcomversion = None
series_status = 'Loading'
lastissueid = None
aliases = None
else:
if chkwant is not None:
logger.fdebug('ComicID: ' + str(comicid) + ' already exists. Not adding from the future pull list at this time.')
return 'Exists'
if dbcomic['Status'] == 'Active':
series_status = 'Active'
elif dbcomic['Status'] == 'Paused':
series_status = 'Paused'
else:
series_status = 'Loading'
newValueDict = {"Status": "Loading"}
comlocation = dbcomic['ComicLocation']
lastissueid = dbcomic['LatestIssueID']
aliases = dbcomic['AlternateSearch']
logger.info('aliases currently: %s' % aliases)
if not latestissueinfo:
latestissueinfo = []
latestissueinfo.append({"latestiss": dbcomic['LatestIssue'],
"latestdate": dbcomic['LatestDate']})
if mylar.CONFIG.CREATE_FOLDERS is True:
checkdirectory = filechecker.validateAndCreateDirectory(comlocation, True)
if not checkdirectory:
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
return
oldcomversion = dbcomic['ComicVersion'] #store the comicversion and chk if it exists before hammering.
myDB.upsert("comics", newValueDict, controlValueDict)
#run the re-sortorder here in order to properly display the page
if all([pullupd is None, calledfrom != 'maintenance']):
helpers.ComicSort(comicorder=mylar.COMICSORT, imported=comicid)
# we need to lookup the info for the requested ComicID in full now
comic = cv.getComic(comicid, 'comic')
if not comic:
logger.warn('Error fetching comic. ID for : ' + comicid)
if dbcomic is None:
newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (comicid),
"Status": "Active"}
else:
if series_status == 'Active' or series_status == 'Loading':
newValueDict = {"Status": "Active"}
myDB.upsert("comics", newValueDict, controlValueDict)
return
if comic['ComicName'].startswith('The '):
sortname = comic['ComicName'][4:]
else:
sortname = comic['ComicName']
comic['Corrected_Type'] = fixed_type
if fixed_type is not None and fixed_type != comic['Type']:
logger.info('Forced Comic Type to : %s' % comic['Corrected_Type'])
logger.info('Now adding/updating: ' + comic['ComicName'])
#--Now that we know ComicName, let's try some scraping
#--Start
# gcd will return issue details (most importantly publishing date)
if not mylar.CONFIG.CV_ONLY:
if mismatch == "no" or mismatch is None:
gcdinfo=parseit.GCDScraper(comic['ComicName'], comic['ComicYear'], comic['ComicIssues'], comicid)
#print ("gcdinfo: " + str(gcdinfo))
mismatch_com = "no"
if gcdinfo == "No Match":
updater.no_searchresults(comicid)
nomatch = "true"
logger.info('There was an error when trying to add ' + comic['ComicName'] + ' (' + comic['ComicYear'] + ')')
return nomatch
else:
mismatch_com = "yes"
#print ("gcdinfo:" + str(gcdinfo))
elif mismatch == "yes":
CV_EXcomicid = myDB.selectone("SELECT * from exceptions WHERE ComicID=?", [comicid])
if CV_EXcomicid['variloop'] is None: pass
else:
vari_loop = CV_EXcomicid['variloop']
NewComicID = CV_EXcomicid['NewComicID']
gcomicid = CV_EXcomicid['GComicID']
resultURL = "/series/" + str(NewComicID) + "/"
#print ("variloop" + str(CV_EXcomicid['variloop']))
#if vari_loop == '99':
gcdinfo = parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=comicid, TotalIssues=0, issvariation="no", resultPublished=None)
# print ("Series Published" + parseit.resultPublished)
CV_NoYearGiven = "no"
#if the SeriesYear returned by CV is blank or none (0000), let's use the gcd one.
if any([comic['ComicYear'] is None, comic['ComicYear'] == '0000', comic['ComicYear'][-1:] == '-']):
if mylar.CONFIG.CV_ONLY:
#we'll defer this until later when we grab all the issues and then figure it out
logger.info('Uh-oh. I cannot find a Series Year for this series. I am going to try analyzing deeper.')
SeriesYear = cv.getComic(comicid, 'firstissue', comic['FirstIssueID'])
if SeriesYear == '0000':
logger.info('Ok - I could not find a Series Year at all. Loading in the issue data now and will figure out the Series Year.')
CV_NoYearGiven = "yes"
issued = cv.getComic(comicid, 'issue')
SeriesYear = issued['firstdate'][:4]
else:
SeriesYear = gcdinfo['SeriesYear']
else:
SeriesYear = comic['ComicYear']
if any([int(SeriesYear) > int(datetime.datetime.now().year) + 1, int(SeriesYear) == 2099]) and csyear is not None:
logger.info('Corrected year of ' + str(SeriesYear) + ' to corrected year for series that was manually entered previously of ' + str(csyear))
SeriesYear = csyear
logger.info('Successfully retrieved details for ' + comic['ComicName'])
#since the weekly issue check could return either annuals or issues, let's initialize it here so it carries through properly.
weeklyissue_check = []
if any([oldcomversion is None, oldcomversion == "None"]):
logger.info('Previous version detected as None - seeing if update required')
if comic['ComicVersion'].isdigit():
comicVol = 'v' + comic['ComicVersion']
logger.info('Updated version to :' + str(comicVol))
if all([mylar.CONFIG.SETDEFAULTVOLUME is False, comicVol == 'v1']):
comicVol = None
else:
if mylar.CONFIG.SETDEFAULTVOLUME is True:
comicVol = 'v1'
else:
comicVol = None
else:
comicVol = oldcomversion
if all([mylar.CONFIG.SETDEFAULTVOLUME is True, comicVol is None]):
comicVol = 'v1'
# setup default location here
u_comicnm = comic['ComicName']
# let's remove the non-standard characters here that will break filenaming / searching.
comicname_filesafe = helpers.filesafe(u_comicnm)
if comlocation is None:
comic_values = {'ComicName': comic['ComicName'],
'ComicPublisher': comic['ComicPublisher'],
'ComicYear': SeriesYear,
'ComicVersion': comicVol,
'Type': comic['Type'],
'Corrected_Type': comic['Corrected_Type']}
dothedew = filers.FileHandlers(comic=comic_values)
comlocation = dothedew.folder_create()
#moved this out of the above loop so it will chk for existance of comlocation in case moved
#if it doesn't exist - create it (otherwise will bugger up later on)
if os.path.isdir(comlocation):
logger.info('Directory (' + comlocation + ') already exists! Continuing...')
else:
if mylar.CONFIG.CREATE_FOLDERS is True:
checkdirectory = filechecker.validateAndCreateDirectory(comlocation, True)
if not checkdirectory:
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
return
#try to account for CV not updating new issues as fast as GCD
#seems CV doesn't update total counts
#comicIssues = gcdinfo['totalissues']
comicIssues = comic['ComicIssues']
if not mylar.CONFIG.CV_ONLY:
if gcdinfo['gcdvariation'] == "cv":
comicIssues = str(int(comic['ComicIssues']) + 1)
if mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS is False:
cimage = os.path.join(mylar.CONFIG.CACHE_DIR, str(comicid) + '.jpg')
PRComicImage = os.path.join('cache', str(comicid) + ".jpg")
ComicImage = helpers.replacetheslash(PRComicImage)
if os.path.isfile(cimage) is True:
logger.fdebug('Cover already exists for series. Not redownloading.')
else:
covercheck = helpers.getImage(comicid, comic['ComicImage'])
if covercheck == 'retry':
logger.info('Attempting to retrieve alternate comic image for the series.')
covercheck = helpers.getImage(comicid, comic['ComicImageALT'])
#if the comic cover local is checked, save a cover.jpg to the series folder.
if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True, os.path.isfile(os.path.join(comlocation, 'cover.jpg')) is False]):
try:
comiclocal = os.path.join(comlocation, 'cover.jpg')
shutil.copyfile(cimage, comiclocal)
if mylar.CONFIG.ENFORCE_PERMS:
filechecker.setperms(comiclocal)
except IOError as e:
logger.error('Unable to save cover (%s) into series directory (%s) at this time.' % (cimage, comiclocal))
else:
ComicImage = None
#for description ...
#Cdesc = helpers.cleanhtml(comic['ComicDescription'])
#cdes_find = Cdesc.find("Collected")
#cdes_removed = Cdesc[:cdes_find]
#logger.fdebug('description: ' + cdes_removed)
#dynamic-name generation here.
as_d = filechecker.FileChecker(watchcomic=comic['ComicName'])
as_dinfo = as_d.dynamic_replace(comic['ComicName'])
tmpseriesname = as_dinfo['mod_seriesname']
dynamic_seriesname = re.sub('[\|\s]','', tmpseriesname.lower()).strip()
if comic['Issue_List'] != 'None':
issue_list = json.dumps(comic['Issue_List'])
else:
issue_list = None
if comic['Aliases'] != 'None':
if all([aliases is not None, aliases != 'None']):
for x in aliases.split('##'):
aliaschk = [x for y in comic['Aliases'].split('##') if y == x]
if aliaschk and x not in aliases.split('##'):
aliases += '##' + ''.join(x)
else:
if x not in aliases.split('##'):
aliases += '##' + x
else:
aliases = comic['Aliases']
else:
aliases = aliases
logger.fdebug('comicIssues: %s' % comicIssues)
logger.fdebug('seriesyear: %s / currentyear: %s' % (SeriesYear, helpers.today()[:4]))
logger.fdebug('comicType: %s' % comic['Type'])
if all([int(comicIssues) == 1, SeriesYear < helpers.today()[:4], comic['Type'] != 'One-Shot', comic['Type'] != 'TPB']):
logger.info('Determined to be a one-shot issue. Forcing Edition to One-Shot')
booktype = 'One-Shot'
else:
booktype = comic['Type']
controlValueDict = {"ComicID": comicid}
newValueDict = {"ComicName": comic['ComicName'],
"ComicSortName": sortname,
"ComicName_Filesafe": comicname_filesafe,
"DynamicComicName": dynamic_seriesname,
"ComicYear": SeriesYear,
"ComicImage": ComicImage,
"ComicImageURL": comic.get("ComicImage", ""),
"ComicImageALTURL": comic.get("ComicImageALT", ""),
"Total": comicIssues,
"ComicVersion": comicVol,
"ComicLocation": comlocation,
"ComicPublisher": comic['ComicPublisher'],
# "Description": Cdesc, #.dencode('utf-8', 'replace'),
"DetailURL": comic['ComicURL'],
"AlternateSearch": aliases,
# "ComicPublished": gcdinfo['resultPublished'],
"ComicPublished": "Unknown",
"Type": booktype,
"Corrected_Type": comic['Corrected_Type'],
"Collects": issue_list,
"DateAdded": helpers.today(),
"Status": "Loading"}
myDB.upsert("comics", newValueDict, controlValueDict)
#comicsort here...
#run the re-sortorder here in order to properly display the page
if all([pullupd is None, calledfrom != 'maintenance']):
helpers.ComicSort(sequence='update')
if CV_NoYearGiven == 'no':
#if set to 'no' then we haven't pulled down the issues, otherwise we did it already
issued = cv.getComic(comicid, 'issue')
if issued is None:
logger.warn('Unable to retrieve data from ComicVine. Get your own API key already!')
return
logger.info('Sucessfully retrieved issue details for ' + comic['ComicName'])
#move to own function so can call independently to only refresh issue data
#issued is from cv.getComic, comic['ComicName'] & comicid would both be already known to do independent call.
updateddata = updateissuedata(comicid, comic['ComicName'], issued, comicIssues, calledfrom, SeriesYear=SeriesYear, latestissueinfo=latestissueinfo)
issuedata = updateddata['issuedata']
anndata = updateddata['annualchk']
nostatus = updateddata['nostatus']
importantdates = updateddata['importantdates']
if issuedata is None:
logger.warn('Unable to complete Refreshing / Adding issue data - this WILL create future problems if not addressed.')
return {'status': 'incomplete'}
if any([calledfrom is None, calledfrom == 'maintenance']):
issue_collection(issuedata, nostatus='False')
#need to update annuals at this point too....
if anndata:
manualAnnual(annchk=anndata)
if mylar.CONFIG.ALTERNATE_LATEST_SERIES_COVERS is True: #, lastissueid != importantdates['LatestIssueID']]):
if os.path.join(mylar.CONFIG.CACHE_DIR, comicid + '.jpg') is True:
cover_modtime = datetime.datetime.utcfromtimestamp(os.path.getmtime(os.path.join(mylar.CONFIG.CACHE_DIR, comicid + '.jpg')))
cover_mtime = datetime.datetime.strftime(cover_modtime, '%Y-%m-%d')
if importantdates['LatestStoreDate'] != '0000-00-00':
lsd = re.sub('-', '', importantdates['LatestStoreDate']).strip()
else:
lsd = re.sub('-', '', importantdates['LatestDate']).strip()
if re.sub('-', '', cover_mtime).strip() < lsd:
logger.info('Attempting to retrieve new issue cover for display')
image_it(comicid, importantdates['LatestIssueID'], comlocation, comic['ComicImage'])
else:
logger.fdebug('no update required - lastissueid [%s] = latestissueid [%s]' % (lastissueid, importantdates['LatestIssueID']))
else:
image_it(comicid, importantdates['LatestIssueID'], comlocation, comic['ComicImage'])
else:
logger.fdebug('no update required - lastissueid [%s] = latestissueid [%s]' % (lastissueid, importantdates['LatestIssueID']))
if (mylar.CONFIG.CVINFO or (mylar.CONFIG.CV_ONLY and mylar.CONFIG.CVINFO)) and os.path.isdir(comlocation):
if os.path.isfile(os.path.join(comlocation, "cvinfo")) is False:
with open(os.path.join(comlocation, "cvinfo"), "w") as text_file:
text_file.write(str(comic['ComicURL']))
if calledfrom == 'weekly':
logger.info('Successfully refreshed ' + comic['ComicName'] + ' (' + str(SeriesYear) + '). Returning to Weekly issue comparison.')
logger.info('Update issuedata for ' + str(issuechk) + ' of : ' + str(weeklyissue_check))
return {'status': 'complete',
'issuedata': issuedata} # this should be the weeklyissue_check data from updateissuedata function
elif calledfrom == 'dbupdate':
logger.info('returning to dbupdate module')
return {'status': 'complete',
'issuedata': issuedata,
'anndata': anndata } # this should be the issuedata data from updateissuedata function
elif calledfrom == 'weeklycheck':
logger.info('Successfully refreshed ' + comic['ComicName'] + ' (' + str(SeriesYear) + '). Returning to Weekly issue update.')
return #no need to return any data here.
logger.info('Updating complete for: ' + comic['ComicName'])
#if it made it here, then the issuedata contains dates, let's pull the data now.
latestiss = importantdates['LatestIssue']
latestdate = importantdates['LatestDate']
lastpubdate = importantdates['LastPubDate']
series_status = importantdates['SeriesStatus']
#move the files...if imported is not empty & not futurecheck (meaning it's not from the mass importer.)
#logger.info('imported is : ' + str(imported))
if imported is None or imported == 'None' or imported == 'futurecheck':
pass
else:
if mylar.CONFIG.IMP_MOVE:
logger.info('Mass import - Move files')
moveit.movefiles(comicid, comlocation, imported)
else:
logger.info('Mass import - Moving not Enabled. Setting Archived Status for import.')
moveit.archivefiles(comicid, comlocation, imported)
#check for existing files...
statbefore = myDB.selectone("SELECT Status FROM issues WHERE ComicID=? AND Int_IssueNumber=?", [comicid, helpers.issuedigits(latestiss)]).fetchone()
logger.fdebug('issue: ' + latestiss + ' status before chk :' + str(statbefore['Status']))
updater.forceRescan(comicid)
statafter = myDB.selectone("SELECT Status FROM issues WHERE ComicID=? AND Int_IssueNumber=?", [comicid, helpers.issuedigits(latestiss)]).fetchone()
logger.fdebug('issue: ' + latestiss + ' status after chk :' + str(statafter['Status']))
logger.fdebug('pullupd: ' + str(pullupd))
logger.fdebug('lastpubdate: ' + str(lastpubdate))
logger.fdebug('series_status: ' + str(series_status))
if pullupd is None:
# lets' check the pullist for anything at this time as well since we're here.
# do this for only Present comics....
if mylar.CONFIG.AUTOWANT_UPCOMING and lastpubdate == 'Present' and series_status == 'Active': #and 'Present' in gcdinfo['resultPublished']:
logger.fdebug('latestissue: #' + str(latestiss))
chkstats = myDB.selectone("SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?", [comicid, helpers.issuedigits(latestiss)]).fetchone()
if chkstats is None:
if mylar.CONFIG.ANNUALS_ON:
chkstats = myDB.selectone("SELECT * FROM annuals WHERE ComicID=? AND Int_IssueNumber=?", [comicid, helpers.issuedigits(latestiss)]).fetchone()
if chkstats:
logger.fdebug('latestissue status: ' + chkstats['Status'])
if chkstats['Status'] == 'Skipped' or chkstats['Status'] == 'Wanted' or chkstats['Status'] == 'Snatched':
logger.info('Checking this week pullist for new issues of ' + comic['ComicName'])
if comic['ComicName'] != comicname_filesafe:
cn_pull = comicname_filesafe
else:
cn_pull = comic['ComicName']
updater.newpullcheck(ComicName=cn_pull, ComicID=comicid, issue=latestiss)
#here we grab issues that have been marked as wanted above...
if calledfrom != 'maintenance':
results = []
issresults = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [comicid])
if issresults:
for issr in issresults:
results.append({'IssueID': issr['IssueID'],
'Issue_Number': issr['Issue_Number'],
'Status': issr['Status']
})
if mylar.CONFIG.ANNUALS_ON:
an_results = myDB.select("SELECT * FROM annuals WHERE ComicID=? AND Status='Wanted'", [comicid])
if an_results:
for ar in an_results:
results.append({'IssueID': ar['IssueID'],
'Issue_Number': ar['Issue_Number'],
'Status': ar['Status']
})
if results:
logger.info('Attempting to grab wanted issues for : ' + comic['ComicName'])
for result in results:
logger.fdebug('Searching for : ' + str(result['Issue_Number']))
logger.fdebug('Status of : ' + str(result['Status']))
search.searchforissue(result['IssueID'])
else: logger.info('No issues marked as wanted for ' + comic['ComicName'])
logger.info('Finished grabbing what I could.')
else:
logger.info('Already have the latest issue : #' + str(latestiss))
if chkwant is not None:
#if this isn't None, this is being called from the futureupcoming list
#a new series was added automagically, but it has more than 1 issue (probably because it was a back-dated issue)
#the chkwant is a tuple containing all the data for the given series' issues that were marked as Wanted for futureupcoming dates.
chkresults = myDB.select("SELECT * FROM issues WHERE ComicID=? AND Status='Skipped'", [comicid])
if chkresults:
logger.info('[FROM THE FUTURE CHECKLIST] Attempting to grab wanted issues for : ' + comic['ComicName'])
for result in chkresults:
for chkit in chkwant:
logger.fdebug('checking ' + chkit['IssueNumber'] + ' against ' + result['Issue_Number'])
if chkit['IssueNumber'] == result['Issue_Number']:
logger.fdebug('Searching for : ' + result['Issue_Number'])
logger.fdebug('Status of : ' + str(result['Status']))
search.searchforissue(result['IssueID'])
else: logger.info('No issues marked as wanted for ' + comic['ComicName'])
logger.info('Finished grabbing what I could.')
if imported == 'futurecheck':
logger.info('Returning to Future-Check module to complete the add & remove entry.')
return
elif all([imported is not None, imported != 'None']):
logger.info('Successfully imported : ' + comic['ComicName'])
return
if calledfrom == 'addbyid':
logger.info('Sucessfully added %s (%s) to the watchlist by directly using the ComicVine ID' % (comic['ComicName'], SeriesYear))
return {'status': 'complete'}
elif calledfrom == 'maintenance':
logger.info('Sucessfully added %s (%s) to the watchlist' % (comic['ComicName'], SeriesYear))
return {'status': 'complete',
'comicname': comic['ComicName'],
'year': SeriesYear}
else:
logger.info('Sucessfully added %s (%s) to the watchlist' % (comic['ComicName'], SeriesYear))
return {'status': 'complete'}
# if imported['Volume'] is None or imported['Volume'] == 'None':
# results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume IS NULL",[imported['DynamicName']])
# else:
# if not imported['Volume'].lower().startswith('v'):
# volume = 'v' + str(imported['Volume'])
# results = myDB.select("SELECT * FROM importresults WHERE (WatchMatch is Null OR WatchMatch LIKE 'C%') AND DynamicName=? AND Volume=?",[imported['DynamicName'],imported['Volume']])
#
# if results is not None:
# for result in results:
# controlValue = {"DynamicName": imported['DynamicName'],
# "Volume": imported['Volume']}
# newValue = {"Status": "Imported",
# "SRID": result['SRID'],
# "ComicID": comicid}
# myDB.upsert("importresults", newValue, controlValue)
def GCDimport(gcomicid, pullupd=None, imported=None, ogcname=None):
# this is for importing via GCD only and not using CV.
# used when volume spanning is discovered for a Comic (and can't be added using CV).
# Issue Counts are wrong (and can't be added).
# because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish.
# CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719)
gcdcomicid = gcomicid
myDB = db.DBConnection()
# We need the current minimal info in the database instantly
# so we don't throw a 500 error when we redirect to the artistPage
controlValueDict = {"ComicID": gcdcomicid}
comic = myDB.selectone('SELECT ComicName, ComicYear, Total, ComicPublished, ComicImage, ComicLocation, ComicPublisher FROM comics WHERE ComicID=?', [gcomicid]).fetchone()
ComicName = comic[0]
ComicYear = comic[1]
ComicIssues = comic[2]
ComicPublished = comic[3]
comlocation = comic[5]
ComicPublisher = comic[6]
#ComicImage = comic[4]
#print ("Comic:" + str(ComicName))
newValueDict = {"Status": "Loading"}
myDB.upsert("comics", newValueDict, controlValueDict)
# we need to lookup the info for the requested ComicID in full now
#comic = cv.getComic(comicid,'comic')
if not comic:
logger.warn('Error fetching comic. ID for : ' + gcdcomicid)
if dbcomic is None:
newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (gcdcomicid),
"Status": "Active"}
else:
newValueDict = {"Status": "Active"}
myDB.upsert("comics", newValueDict, controlValueDict)
return
#run the re-sortorder here in order to properly display the page
if pullupd is None:
helpers.ComicSort(comicorder=mylar.COMICSORT, imported=gcomicid)
if ComicName.startswith('The '):
sortname = ComicName[4:]
else:
sortname = ComicName
logger.info(u"Now adding/updating: " + ComicName)
#--Now that we know ComicName, let's try some scraping
#--Start
# gcd will return issue details (most importantly publishing date)
comicid = gcomicid[1:]
resultURL = "/series/" + str(comicid) + "/"
gcdinfo=parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=gcdcomicid, TotalIssues=ComicIssues, issvariation=None, resultPublished=None)
if gcdinfo == "No Match":
logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")")
updater.no_searchresults(gcomicid)
nomatch = "true"
return nomatch
logger.info(u"Sucessfully retrieved details for " + ComicName)
# print ("Series Published" + parseit.resultPublished)
#--End
ComicImage = gcdinfo['ComicImage']
#comic book location on machine
# setup default location here
if comlocation is None:
# let's remove the non-standard characters here.
u_comicnm = ComicName
u_comicname = u_comicnm.encode('ascii', 'ignore').strip()
if ':' in u_comicname or '/' in u_comicname or ',' in u_comicname:
comicdir = u_comicname
if ':' in comicdir:
comicdir = comicdir.replace(':', '')
if '/' in comicdir:
comicdir = comicdir.replace('/', '-')
if ',' in comicdir:
comicdir = comicdir.replace(',', '')
else: comicdir = u_comicname
series = comicdir
publisher = ComicPublisher
year = ComicYear
#do work to generate folder path
values = {'$Series': series,
'$Publisher': publisher,
'$Year': year,
'$series': series.lower(),
'$publisher': publisher.lower(),
'$Volume': year
}
if mylar.CONFIG.FOLDER_FORMAT == '':
comlocation = mylar.CONFIG.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")"
else:
comlocation = mylar.CONFIG.DESTINATION_DIR + "/" + helpers.replace_all(mylar.CONFIG.FOLDER_FORMAT, values)
#comlocation = mylar.CONFIG.DESTINATION_DIR + "/" + comicdir + " (" + ComicYear + ")"
if mylar.CONFIG.DESTINATION_DIR == "":
logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.")
return
if mylar.CONFIG.REPLACE_SPACES:
#mylar.CONFIG.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
comlocation = comlocation.replace(' ', mylar.CONFIG.REPLACE_CHAR)
#if it doesn't exist - create it (otherwise will bugger up later on)
if os.path.isdir(comlocation):
logger.info(u"Directory (" + comlocation + ") already exists! Continuing...")
else:
if mylar.CONFIG.CREATE_FOLDERS is True:
checkdirectory = filechecker.validateAndCreateDirectory(comlocation, True)
if not checkdirectory:
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
return
comicIssues = gcdinfo['totalissues']
#let's download the image...
if os.path.exists(mylar.CONFIG.CACHE_DIR): pass
else:
#let's make the dir.
try:
os.makedirs(str(mylar.CONFIG.CACHE_DIR))
logger.info(u"Cache Directory successfully created at: " + str(mylar.CONFIG.CACHE_DIR))
except OSError:
logger.error(u"Could not create cache dir : " + str(mylar.CONFIG.CACHE_DIR))
coverfile = os.path.join(mylar.CONFIG.CACHE_DIR, str(gcomicid) + ".jpg")
#new CV API restriction - one api request / second.
if mylar.CONFIG.CVAPI_RATE is None or mylar.CONFIG.CVAPI_RATE < 2:
time.sleep(2)
else:
time.sleep(mylar.CONFIG.CVAPI_RATE)
urllib.urlretrieve(str(ComicImage), str(coverfile))
try:
with open(str(coverfile)) as f:
ComicImage = os.path.join('cache', str(gcomicid) + ".jpg")
#this is for Firefox when outside the LAN...it works, but I don't know how to implement it
#without breaking the normal flow for inside the LAN (above)
#ComicImage = "http://" + str(mylar.CONFIG.HTTP_HOST) + ":" + str(mylar.CONFIG.HTTP_PORT) + "/cache/" + str(comi$
logger.info(u"Sucessfully retrieved cover for " + ComicName)
#if the comic cover local is checked, save a cover.jpg to the series folder.
if mylar.CONFIG.COMIC_COVER_LOCAL and os.path.isdir(comlocation):
comiclocal = os.path.join(comlocation + "/cover.jpg")
shutil.copy(ComicImage, comiclocal)
except IOError as e:
logger.error(u"Unable to save cover locally at this time.")
#if comic['ComicVersion'].isdigit():
# comicVol = "v" + comic['ComicVersion']
#else:
# comicVol = None
controlValueDict = {"ComicID": gcomicid}
newValueDict = {"ComicName": ComicName,
"ComicSortName": sortname,
"ComicYear": ComicYear,
"Total": comicIssues,
"ComicLocation": comlocation,
#"ComicVersion": comicVol,
"ComicImage": ComicImage,
"ComicImageURL": comic.get('ComicImage', ''),
"ComicImageALTURL": comic.get('ComicImageALT', ''),
#"ComicPublisher": comic['ComicPublisher'],
#"ComicPublished": comicPublished,
"DateAdded": helpers.today(),
"Status": "Loading"}
myDB.upsert("comics", newValueDict, controlValueDict)
#comicsort here...
#run the re-sortorder here in order to properly display the page
if pullupd is None:
helpers.ComicSort(sequence='update')
logger.info(u"Sucessfully retrieved issue details for " + ComicName)
n = 0
iscnt = int(comicIssues)
issnum = []
issname = []
issdate = []
int_issnum = []
#let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
latestiss = "0"
latestdate = "0000-00-00"
#print ("total issues:" + str(iscnt))
#---removed NEW code here---
logger.info(u"Now adding/updating issues for " + ComicName)
bb = 0
while (bb <= iscnt):
#---NEW.code
try:
gcdval = gcdinfo['gcdchoice'][bb]
#print ("gcdval: " + str(gcdval))
except IndexError:
#account for gcd variation here
if gcdinfo['gcdvariation'] == 'gcd':
#print ("gcd-variation accounted for.")
issdate = '0000-00-00'
int_issnum = int (issis / 1000)
break
if 'nn' in str(gcdval['GCDIssue']):
#no number detected - GN, TP or the like
logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.")
updater.no_searchresults(comicid)
return
elif '.' in str(gcdval['GCDIssue']):
issst = str(gcdval['GCDIssue']).find('.')
issb4dec = str(gcdval['GCDIssue'])[:issst]
#if the length of decimal is only 1 digit, assume it's a tenth
decis = str(gcdval['GCDIssue'])[issst +1:]
if len(decis) == 1:
decisval = int(decis) * 10
issaftdec = str(decisval)
if len(decis) == 2:
decisval = int(decis)
issaftdec = str(decisval)
if int(issaftdec) == 0: issaftdec = "00"
gcd_issue = issb4dec + "." + issaftdec
gcdis = (int(issb4dec) * 1000) + decisval
else:
gcdis = int(str(gcdval['GCDIssue'])) * 1000
gcd_issue = str(gcdval['GCDIssue'])
#get the latest issue / date using the date.
int_issnum = int(gcdis / 1000)
issdate = str(gcdval['GCDDate'])
issid = "G" + str(gcdval['IssueID'])
if gcdval['GCDDate'] > latestdate:
latestiss = str(gcd_issue)
latestdate = str(gcdval['GCDDate'])
#print("(" + str(bb) + ") IssueID: " + str(issid) + " IssueNo: " + str(gcd_issue) + " Date" + str(issdate) )
#---END.NEW.
# check if the issue already exists
iss_exists = myDB.selectone('SELECT * from issues WHERE IssueID=?', [issid]).fetchone()
# Only change the status & add DateAdded if the issue is not already in the database
if iss_exists is None:
newValueDict['DateAdded'] = helpers.today()
#adjust for inconsistencies in GCD date format - some dates have ? which borks up things.
if "?" in str(issdate):
issdate = "0000-00-00"
controlValueDict = {"IssueID": issid}
newValueDict = {"ComicID": gcomicid,
"ComicName": ComicName,
"Issue_Number": gcd_issue,
"IssueDate": issdate,
"Int_IssueNumber": int_issnum
}
#print ("issueid:" + str(controlValueDict))
#print ("values:" + str(newValueDict))
if mylar.CONFIG.AUTOWANT_ALL:
newValueDict['Status'] = "Wanted"
elif issdate > helpers.today() and mylar.CONFIG.AUTOWANT_UPCOMING:
newValueDict['Status'] = "Wanted"
else:
newValueDict['Status'] = "Skipped"
if iss_exists:
#print ("Existing status : " + str(iss_exists['Status']))
newValueDict['Status'] = iss_exists['Status']
myDB.upsert("issues", newValueDict, controlValueDict)
bb+=1
# logger.debug(u"Updating comic cache for " + ComicName)
# cache.getThumb(ComicID=issue['issueid'])
# logger.debug(u"Updating cache for: " + ComicName)
# cache.getThumb(ComicIDcomicid)
controlValueStat = {"ComicID": gcomicid}
newValueStat = {"Status": "Active",
"LatestIssue": latestiss,
"LatestDate": latestdate,
"LastUpdated": helpers.now()
}
myDB.upsert("comics", newValueStat, controlValueStat)
if mylar.CONFIG.CVINFO and os.path.isdir(comlocation):
if not os.path.exists(comlocation + "/cvinfo"):
with open(comlocation + "/cvinfo", "w") as text_file:
text_file.write("http://comicvine.gamespot.com/volume/49-" + str(comicid))
logger.info(u"Updating complete for: " + ComicName)
#move the files...if imported is not empty (meaning it's not from the mass importer.)
if imported is None or imported == 'None':
pass
else:
if mylar.CONFIG.IMP_MOVE:
logger.info("Mass import - Move files")
moveit.movefiles(gcomicid, comlocation, ogcname)
else:
logger.info("Mass import - Moving not Enabled. Setting Archived Status for import.")
moveit.archivefiles(gcomicid, ogcname)
#check for existing files...
updater.forceRescan(gcomicid)
if pullupd is None:
# lets' check the pullist for anyting at this time as well since we're here.
if mylar.CONFIG.AUTOWANT_UPCOMING and 'Present' in ComicPublished:
logger.info(u"Checking this week's pullist for new issues of " + ComicName)
updater.newpullcheck(comic['ComicName'], gcomicid)
#here we grab issues that have been marked as wanted above...
results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [gcomicid])
if results:
logger.info(u"Attempting to grab wanted issues for : " + ComicName)
for result in results:
foundNZB = "none"
if (mylar.CONFIG.NZBSU or mylar.CONFIG.DOGNZB or mylar.CONFIG.EXPERIMENTAL or mylar.CONFIG.NEWZNAB) and (mylar.CONFIG.SAB_HOST):
foundNZB = search.searchforissue(result['IssueID'])
if foundNZB == "yes":
updater.foundsearch(result['ComicID'], result['IssueID'])
else: logger.info(u"No issues marked as wanted for " + ComicName)
logger.info(u"Finished grabbing what I could.")
def issue_collection(issuedata, nostatus):
myDB = db.DBConnection()
nowdate = datetime.datetime.now()
now_week = datetime.datetime.strftime(nowdate, "%Y%U")
if issuedata:
for issue in issuedata:
controlValueDict = {"IssueID": issue['IssueID']}
newValueDict = {"ComicID": issue['ComicID'],
"ComicName": issue['ComicName'],
"IssueName": issue['IssueName'],
"Issue_Number": issue['Issue_Number'],
"IssueDate": issue['IssueDate'],
"ReleaseDate": issue['ReleaseDate'],
"DigitalDate": issue['DigitalDate'],
"Int_IssueNumber": issue['Int_IssueNumber'],
"ImageURL": issue['ImageURL'],
"ImageURL_ALT": issue['ImageURL_ALT']
#"Status": "Skipped" #set this to Skipped by default to avoid NULL entries.
}
# check if the issue already exists
iss_exists = myDB.selectone('SELECT * from issues WHERE IssueID=?', [issue['IssueID']]).fetchone()
dbwrite = "issues"
#if iss_exists is None:
# iss_exists = myDB.selectone('SELECT * from annuals WHERE IssueID=?', [issue['IssueID']]).fetchone()
# if iss_exists:
# dbwrite = "annuals"
if nostatus == 'False':
# Only change the status & add DateAdded if the issue is already in the database
if iss_exists is None:
newValueDict['DateAdded'] = helpers.today()
if issue['ReleaseDate'] == '0000-00-00':
dk = re.sub('-', '', issue['IssueDate']).strip()
else:
dk = re.sub('-', '', issue['ReleaseDate']).strip() # converts date to 20140718 format
if dk == '00000000':
logger.warn('Issue Data is invalid for Issue Number %s. Marking this issue as Skipped' % issue['Issue_Number'])
newValueDict['Status'] = "Skipped"
else:
datechk = datetime.datetime.strptime(dk, "%Y%m%d")
issue_week = datetime.datetime.strftime(datechk, "%Y%U")
if mylar.CONFIG.AUTOWANT_ALL:
newValueDict['Status'] = "Wanted"
#logger.fdebug('autowant all')
elif issue_week >= now_week and mylar.CONFIG.AUTOWANT_UPCOMING:
#logger.fdebug(str(datechk) + ' >= ' + str(nowtime))
newValueDict['Status'] = "Wanted"
else:
newValueDict['Status'] = "Skipped"
#logger.fdebug('status is : ' + str(newValueDict))
else:
#logger.fdebug('Existing status for issue #%s : %s' % (issue['Issue_Number'], iss_exists['Status']))
if any([iss_exists['Status'] is None, iss_exists['Status'] == 'None']):
is_status = 'Skipped'
else:
is_status = iss_exists['Status']
newValueDict['Status'] = is_status
else:
#logger.fdebug("Not changing the status at this time - reverting to previous module after to re-append existing status")
pass #newValueDict['Status'] = "Skipped"
try:
myDB.upsert(dbwrite, newValueDict, controlValueDict)
except sqlite3.InterfaceError, e:
#raise sqlite3.InterfaceError(e)
logger.error('Something went wrong - I cannot add the issue information into my DB.')
myDB.action("DELETE FROM comics WHERE ComicID=?", [issue['ComicID']])
return
def manualAnnual(manual_comicid=None, comicname=None, comicyear=None, comicid=None, annchk=None, manualupd=False):
#called when importing/refreshing an annual that was manually added.
myDB = db.DBConnection()
if annchk is None:
nowdate = datetime.datetime.now()
now_week = datetime.datetime.strftime(nowdate, "%Y%U")
annchk = []
issueid = manual_comicid
logger.fdebug(str(issueid) + ' added to series list as an Annual')
sr = cv.getComic(manual_comicid, 'comic')
logger.fdebug('Attempting to integrate ' + sr['ComicName'] + ' (' + str(issueid) + ') to the existing series of ' + comicname + '(' + str(comicyear) + ')')
if len(sr) is None or len(sr) == 0:
logger.fdebug('Could not find any information on the series indicated : ' + str(manual_comicid))
return
else:
n = 0
issued = cv.getComic(re.sub('4050-', '', manual_comicid).strip(), 'issue')
if int(sr['ComicIssues']) == 0 and len(issued['issuechoice']) == 1:
noissues = 1
else:
noissues = sr['ComicIssues']
logger.fdebug('there are ' + str(noissues) + ' annuals within this series.')
while (n < int(noissues)):
try:
firstval = issued['issuechoice'][n]
except IndexError:
break
try:
cleanname = helpers.cleanName(firstval['Issue_Name'])
except:
cleanname = 'None'
if firstval['Store_Date'] == '0000-00-00':
dk = re.sub('-', '', firstval['Issue_Date']).strip()
else:
dk = re.sub('-', '', firstval['Store_Date']).strip() # converts date to 20140718 format
if dk == '00000000':
logger.warn('Issue Data is invalid for Issue Number %s. Marking this issue as Skipped' % firstval['Issue_Number'])
astatus = "Skipped"
else:
datechk = datetime.datetime.strptime(dk, "%Y%m%d")
issue_week = datetime.datetime.strftime(datechk, "%Y%U")
if mylar.CONFIG.AUTOWANT_ALL:
astatus = "Wanted"
elif issue_week >= now_week and mylar.CONFIG.AUTOWANT_UPCOMING is True:
astatus = "Wanted"
else:
astatus = "Skipped"
annchk.append({'IssueID': str(firstval['Issue_ID']),
'ComicID': comicid,
'ReleaseComicID': re.sub('4050-', '', manual_comicid).strip(),
'ComicName': comicname,
'Issue_Number': str(firstval['Issue_Number']),
'IssueName': cleanname,
'IssueDate': str(firstval['Issue_Date']),
'ReleaseDate': str(firstval['Store_Date']),
'DigitalDate': str(firstval['Digital_Date']),
'Status': astatus,
'ReleaseComicName': sr['ComicName']})
n+=1
if manualupd is True:
return annchk
for ann in annchk:
newCtrl = {"IssueID": ann['IssueID']}
newVals = {"Issue_Number": ann['Issue_Number'],
"Int_IssueNumber": helpers.issuedigits(ann['Issue_Number']),
"IssueDate": ann['IssueDate'],
"ReleaseDate": ann['ReleaseDate'],
"DigitalDate": ann['DigitalDate'],
"IssueName": ann['IssueName'],
"ComicID": ann['ComicID'], #this is the series ID
"ReleaseComicID": ann['ReleaseComicID'], #this is the series ID for the annual(s)
"ComicName": ann['ComicName'], #series ComicName
"ReleaseComicName": ann['ReleaseComicName'], #series ComicName for the manual_comicid
"Status": ann['Status']}
#need to add in the values for the new series to be added.
#"M_ComicName": sr['ComicName'],
#"M_ComicID": manual_comicid}
myDB.upsert("annuals", newVals, newCtrl)
if len(annchk) > 0:
logger.info('Successfully integrated ' + str(len(annchk)) + ' annuals into the series: ' + annchk[0]['ComicName'])
return
def updateissuedata(comicid, comicname=None, issued=None, comicIssues=None, calledfrom=None, issuechk=None, issuetype=None, SeriesYear=None, latestissueinfo=None):
annualchk = []
weeklyissue_check = []
logger.fdebug('issuedata call references...')
logger.fdebug('comicid: %s' % comicid)
logger.fdebug('comicname: %s' % comicname)
logger.fdebug('comicissues: %s' % comicIssues)
logger.fdebug('calledfrom: %s' % calledfrom)
logger.fdebug('issuechk: %s' % issuechk)
logger.fdebug('latestissueinfo: %s' % latestissueinfo)
logger.fdebug('issuetype: %s' % issuetype)
#to facilitate independent calls to updateissuedata ONLY, account for data not available and get it.
#chkType comes from the weeklypulllist - either 'annual' or not to distinguish annuals vs. issues
if comicIssues is None:
comic = cv.getComic(comicid, 'comic')
if comic is None:
logger.warn('Error retrieving from ComicVine - either the site is down or you are not using your own CV API key')
return
if comicIssues is None:
comicIssues = comic['ComicIssues']
if SeriesYear is None:
SeriesYear = comic['ComicYear']
if comicname is None:
comicname = comic['ComicName']
if issued is None:
issued = cv.getComic(comicid, 'issue')
if issued is None:
logger.warn('Error retrieving from ComicVine - either the site is down or you are not using your own CV API key')
return
# poll against annuals here - to make sure annuals are uptodate.
annualchk = annual_check(comicname, SeriesYear, comicid, issuetype, issuechk, annualchk)
if annualchk is None:
annualchk = []
logger.fdebug('Finished Annual checking.')
n = 0
iscnt = int(comicIssues)
issid = []
issnum = []
issname = []
issdate = []
issuedata = []
#let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
latestiss = "0"
latestdate = "0000-00-00"
latest_stdate = "0000-00-00"
latestissueid = None
firstiss = "10000000"
firstdate = "2099-00-00"
#print ("total issues:" + str(iscnt))
logger.info('Now adding/updating issues for ' + comicname)
if iscnt > 0: #if a series is brand new, it wont have any issues/details yet so skip this part
while (n <= iscnt):
try:
firstval = issued['issuechoice'][n]
except IndexError:
break
try:
cleanname = helpers.cleanName(firstval['Issue_Name'])
except:
cleanname = 'None'
issid = str(firstval['Issue_ID'])
issnum = firstval['Issue_Number']
issname = cleanname
issdate = str(firstval['Issue_Date'])
storedate = str(firstval['Store_Date'])
digitaldate = str(firstval['Digital_Date'])
int_issnum = None
if issnum.isdigit():
int_issnum = int(issnum) * 1000
else:
if 'a.i.' in issnum.lower() or 'ai' in issnum.lower():
issnum = re.sub('\.', '', issnum)
#int_issnum = (int(issnum[:-2]) * 1000) + ord('a') + ord('i')
if 'au' in issnum.lower():
int_issnum = (int(issnum[:-2]) * 1000) + ord('a') + ord('u')
elif 'inh' in issnum.lower():
int_issnum = (int(issnum[:-4]) * 1000) + ord('i') + ord('n') + ord('h')
elif 'now' in issnum.lower():
int_issnum = (int(issnum[:-4]) * 1000) + ord('n') + ord('o') + ord('w')
elif 'mu' in issnum.lower():
int_issnum = (int(issnum[:-3]) * 1000) + ord('m') + ord('u')
elif 'hu' in issnum.lower():
int_issnum = (int(issnum[:-3]) * 1000) + ord('h') + ord('u')
elif u'\xbd' in issnum:
tmpiss = re.sub('[^0-9]', '', issnum).strip()
if len(tmpiss) > 0:
int_issnum = (int(tmpiss) + .5) * 1000
else:
int_issnum = .5 * 1000
logger.fdebug('1/2 issue detected :' + issnum + ' === ' + str(int_issnum))
elif u'\xbc' in issnum:
int_issnum = .25 * 1000
elif u'\xbe' in issnum:
int_issnum = .75 * 1000
elif u'\u221e' in issnum:
#issnum = utf-8 will encode the infinity symbol without any help
int_issnum = 9999999999 * 1000 # set 9999999999 for integer value of issue
elif '.' in issnum or ',' in issnum:
if ',' in issnum: issnum = re.sub(',', '.', issnum)
issst = str(issnum).find('.')
#logger.fdebug("issst:" + str(issst))
if issst == 0:
issb4dec = 0
else:
issb4dec = str(issnum)[:issst]
#logger.fdebug("issb4dec:" + str(issb4dec))
#if the length of decimal is only 1 digit, assume it's a tenth
decis = str(issnum)[issst +1:]
#logger.fdebug("decis:" + str(decis))
if len(decis) == 1:
decisval = int(decis) * 10
issaftdec = str(decisval)
elif len(decis) == 2:
decisval = int(decis)
issaftdec = str(decisval)
else:
decisval = decis
issaftdec = str(decisval)
#if there's a trailing decimal (ie. 1.50.) and it's either intentional or not, blow it away.
if issaftdec[-1:] == '.':
logger.fdebug('Trailing decimal located within issue number. Irrelevant to numbering. Obliterating.')
issaftdec = issaftdec[:-1]
try:
# int_issnum = str(issnum)
int_issnum = (int(issb4dec) * 1000) + (int(issaftdec) * 10)
except ValueError:
logger.error('This has no issue # for me to get - Either a Graphic Novel or one-shot.')
updater.no_searchresults(comicid)
return
else:
try:
x = float(issnum)
#validity check
if x < 0:
logger.fdebug('I have encountered a negative issue #: ' + str(issnum) + '. Trying to accomodate.')
logger.fdebug('value of x is : ' + str(x))
int_issnum = (int(x) *1000) - 1
else: raise ValueError
except ValueError, e:
x = 0
tstord = None
issno = None
invchk = "false"
if issnum.lower() != 'preview':
while (x < len(issnum)):
if issnum[x].isalpha():
#take first occurance of alpha in string and carry it through
tstord = issnum[x:].rstrip()
tstord = re.sub('[\-\,\.\+]', '', tstord).rstrip()
issno = issnum[:x].rstrip()
issno = re.sub('[\-\,\.\+]', '', issno).rstrip()
try:
isschk = float(issno)
except ValueError, e:
if len(issnum) == 1 and issnum.isalpha():
logger.fdebug('detected lone alpha issue. Attempting to figure this out.')
break
logger.fdebug('[' + issno + '] invalid numeric for issue - cannot be found. Ignoring.')
issno = None
tstord = None
invchk = "true"
break
x+=1
if all([tstord is not None, issno is not None, int_issnum is None]):
a = 0
ordtot = 0
if len(issnum) == 1 and issnum.isalpha():
int_issnum = ord(tstord.lower())
else:
while (a < len(tstord)):
ordtot += ord(tstord[a].lower()) #lower-case the letters for simplicty
a+=1
int_issnum = (int(issno) * 1000) + ordtot
elif invchk == "true":
if any([issnum.lower() == 'fall 2005', issnum.lower() == 'spring 2005', issnum.lower() == 'summer 2006', issnum.lower() == 'winter 2009']):
issnum = re.sub('[0-9]+', '', issnum).strip()
inu = 0
ordtot = 0
while (inu < len(issnum)):
ordtot += ord(issnum[inu].lower()) #lower-case the letters for simplicty
inu+=1
int_issnum = ordtot
else:
logger.fdebug('this does not have an issue # that I can parse properly.')
return
else:
if int_issnum is not None:
pass
elif issnum == '9-5':
issnum = u'9\xbd'
logger.fdebug('issue: 9-5 is an invalid entry. Correcting to : ' + issnum)
int_issnum = (9 * 1000) + (.5 * 1000)
elif issnum == '112/113':
int_issnum = (112 * 1000) + (.5 * 1000)
elif issnum == '14-16':
int_issnum = (15 * 1000) + (.5 * 1000)
elif issnum.lower() == 'preview':
inu = 0
ordtot = 0
while (inu < len(issnum)):
ordtot += ord(issnum[inu].lower()) #lower-case the letters for simplicty
inu+=1
int_issnum = ordtot
else:
logger.error(issnum + ' this has an alpha-numeric in the issue # which I cannot account for.')
return
#get the latest issue / date using the date.
#logger.fdebug('issue : ' + str(issnum))
#logger.fdebug('latest date: ' + str(latestdate))
#logger.fdebug('first date: ' + str(firstdate))
#logger.fdebug('issue date: ' + str(firstval['Issue_Date']))
#logger.fdebug('issue date: ' + storedate)
if any([firstval['Issue_Date'] >= latestdate, storedate >= latestdate]):
#logger.fdebug('date check hit for issue date > latestdate')
if int_issnum > helpers.issuedigits(latestiss):
#logger.fdebug('assigning latest issue to : ' + str(issnum))
latestiss = issnum
latestissueid = issid
if firstval['Issue_Date'] != '0000-00-00':
latestdate = str(firstval['Issue_Date'])
latest_stdate = storedate
else:
latestdate = storedate
latest_stdate = storedate
if firstval['Issue_Date'] < firstdate and firstval['Issue_Date'] != '0000-00-00':
firstiss = issnum
firstdate = str(firstval['Issue_Date'])
if issuechk is not None and issuetype == 'series':
logger.fdebug('comparing ' + str(issuechk) + ' .. to .. ' + str(int_issnum))
if issuechk == int_issnum:
weeklyissue_check.append({"Int_IssueNumber": int_issnum,
"Issue_Number": issnum,
"IssueDate": issdate,
"ReleaseDate": storedate,
"ComicID": comicid,
"IssueID": issid})
issuedata.append({"ComicID": comicid,
"IssueID": issid,
"ComicName": comicname,
"IssueName": issname,
"Issue_Number": issnum,
"IssueDate": issdate,
"ReleaseDate": storedate,
"DigitalDate": digitaldate,
"Int_IssueNumber": int_issnum,
"ImageURL": firstval['Image'],
"ImageURL_ALT": firstval['ImageALT']})
n+=1
if calledfrom == 'futurecheck' and len(issuedata) == 0:
logger.fdebug('This is a NEW series with no issue data - skipping issue updating for now, and assigning generic information so things don\'t break')
latestdate = latestissueinfo[0]['latestdate'] # if it's from futurecheck, issuechk holds the latestdate for the given issue
latestiss = latestissueinfo[0]['latestiss']
lastpubdate = 'Present'
publishfigure = str(SeriesYear) + ' - ' + str(lastpubdate)
else:
#if calledfrom == 'weeklycheck':
if len(issuedata) >= 1 and not calledfrom == 'dbupdate':
logger.fdebug('initiating issue updating - info & status')
issue_collection(issuedata, nostatus='False')
else:
logger.fdebug('initiating issue updating - just the info')
issue_collection(issuedata, nostatus='True')
styear = str(SeriesYear)
if firstdate is not None:
if SeriesYear != firstdate[:4]:
if firstdate[:4] == '2099':
logger.fdebug('Series start date (%s) differs from First Issue start date as First Issue date is unknown - assuming Series Year as Start Year (even though CV might say previous year - it\'s all gravy).' % (SeriesYear))
else:
logger.fdebug('Series start date (%s) cannot be properly determined and/or it might cross over into different year (%s) - assuming store date of first issue (%s) as Start Year (even though CV might say previous year - it\'s all gravy).' % (SeriesYear, firstdate[:4], firstdate))
if firstdate == '2099-00-00':
firstdate = '%s-01-01' % SeriesYear
styear = str(firstdate[:4])
if firstdate[5:7] == '00':
stmonth = "?"
else:
stmonth = helpers.fullmonth(firstdate[5:7])
ltyear = re.sub('/s', '', latestdate[:4])
if latestdate[5:7] == '00':
ltmonth = "?"
else:
ltmonth = helpers.fullmonth(latestdate[5:7])
#try to determine if it's an 'actively' published comic from above dates
#threshold is if it's within a month (<55 days) let's assume it's recent.
try:
c_date = datetime.date(int(latestdate[:4]), int(latestdate[5:7]), 1)
except:
logger.error('Cannot determine Latest Date for given series. This is most likely due to an issue having a date of : 0000-00-00')
latestdate = str(SeriesYear) + '-01-01'
logger.error('Setting Latest Date to be ' + str(latestdate) + '. You should inform CV that the issue data is stale.')
c_date = datetime.date(int(latestdate[:4]), int(latestdate[5:7]), 1)
n_date = datetime.date.today()
recentchk = (n_date - c_date).days
if recentchk <= 55:
lastpubdate = 'Present'
else:
if ltmonth == '?':
if ltyear == '0000':
lastpubdate = '?'
else:
lastpubdate = str(ltyear)
elif ltyear == '0000':
lastpubdate = '?'
else:
lastpubdate = str(ltmonth) + ' ' + str(ltyear)
if stmonth == '?' and ('?' in lastpubdate and '0000' in lastpubdate):
lastpubdate = 'Present'
newpublish = True
publishfigure = str(styear) + ' - ' + str(lastpubdate)
else:
newpublish = False
publishfigure = str(stmonth) + ' ' + str(styear) + ' - ' + str(lastpubdate)
if stmonth == '?' and styear == '?' and lastpubdate =='0000' and comicIssues == '0':
logger.info('No available issue data - I believe this is a NEW series.')
latestdate = latestissueinfo[0]['latestdate']
latestiss = latestissueinfo[0]['latestiss']
lastpubdate = 'Present'
publishfigure = str(SeriesYear) + ' - ' + str(lastpubdate)
controlValueStat = {"ComicID": comicid}
newValueStat = {"Status": "Active",
"Total": comicIssues,
"ComicPublished": publishfigure,
"NewPublish": newpublish,
"LatestIssue": latestiss,
"LatestIssueID": latestissueid,
"LatestDate": latestdate,
"LastUpdated": helpers.now()
}
myDB = db.DBConnection()
myDB.upsert("comics", newValueStat, controlValueStat)
importantdates = {}
importantdates['LatestIssue'] = latestiss
importantdates['LatestIssueID'] = latestissueid
importantdates['LatestDate'] = latestdate
importantdates['LatestStoreDate'] = latest_stdate
importantdates['LastPubDate'] = lastpubdate
importantdates['SeriesStatus'] = 'Active'
if calledfrom == 'weeklycheck':
return weeklyissue_check
elif len(issuedata) >= 1 and not calledfrom == 'dbupdate':
return {'issuedata': issuedata,
'annualchk': annualchk,
'importantdates': importantdates,
'nostatus': False}
elif calledfrom == 'dbupdate':
return {'issuedata': issuedata,
'annualchk': annualchk,
'importantdates': importantdates,
'nostatus': True}
else:
return importantdates
def annual_check(ComicName, SeriesYear, comicid, issuetype, issuechk, annualslist):
annualids = [] #to be used to make sure an ID isn't double-loaded
annload = []
anncnt = 0
nowdate = datetime.datetime.now()
now_week = datetime.datetime.strftime(nowdate, "%Y%U")
myDB = db.DBConnection()
annual_load = myDB.select('SELECT * FROM annuals WHERE ComicID=?', [comicid])
logger.fdebug('checking annual db')
for annthis in annual_load:
if not any(d['ReleaseComicID'] == annthis['ReleaseComicID'] for d in annload):
#print 'matched on annual'
annload.append({
'ReleaseComicID': annthis['ReleaseComicID'],
'ReleaseComicName': annthis['ReleaseComicName'],
'ComicID': annthis['ComicID'],
'ComicName': annthis['ComicName']
})
if annload is None:
pass
else:
for manchk in annload:
if manchk['ReleaseComicID'] is not None or manchk['ReleaseComicID'] is not None: #if it exists, then it's a pre-existing add
#print str(manchk['ReleaseComicID']), comic['ComicName'], str(SeriesYear), str(comicid)
annualslist += manualAnnual(manchk['ReleaseComicID'], ComicName, SeriesYear, comicid, manualupd=True)
annualids.append(manchk['ReleaseComicID'])
annualcomicname = re.sub('[\,\:]', '', ComicName)
if annualcomicname.lower().startswith('the'):
annComicName = annualcomicname[4:] + ' annual'
else:
annComicName = annualcomicname + ' annual'
mode = 'series'
annualyear = SeriesYear # no matter what, the year won't be less than this.
logger.fdebug('[IMPORTER-ANNUAL] - Annual Year:' + str(annualyear))
sresults = mb.findComic(annComicName, mode, issue=None)
type='comic'
annual_types_ignore = {'paperback', 'collecting', 'reprints', 'collected edition', 'print edition', 'tpb', 'available in print', 'collects'}
if len(sresults) > 0:
logger.fdebug('[IMPORTER-ANNUAL] - there are ' + str(len(sresults)) + ' results.')
num_res = 0
while (num_res < len(sresults)):
sr = sresults[num_res]
#logger.fdebug("description:" + sr['description'])
for x in annual_types_ignore:
if x in sr['description'].lower():
test_id_position = sr['description'].find(comicid)
if test_id_position >= sr['description'].lower().find(x) or test_id_position == -1:
logger.fdebug('[IMPORTER-ANNUAL] - tradeback/collected edition detected - skipping ' + str(sr['comicid']))
continue
if comicid in sr['description']:
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(comicid) + ' found. Assuming it is part of the greater collection.')
issueid = sr['comicid']
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' added to series list as an Annual')
if issueid in annualids:
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(issueid) + ' already exists within current annual list for series.')
num_res+=1 # need to manually increment since not a for-next loop
continue
issued = cv.getComic(issueid, 'issue')
if len(issued) is None or len(issued) == 0:
logger.fdebug('[IMPORTER-ANNUAL] - Could not find any annual information...')
pass
else:
n = 0
if int(sr['issues']) == 0 and len(issued['issuechoice']) == 1:
sr_issues = 1
else:
if int(sr['issues']) != len(issued['issuechoice']):
sr_issues = len(issued['issuechoice'])
else:
sr_issues = sr['issues']
logger.fdebug('[IMPORTER-ANNUAL] - There are ' + str(sr_issues) + ' annuals in this series.')
while (n < int(sr_issues)):
try:
firstval = issued['issuechoice'][n]
except IndexError:
break
try:
cleanname = helpers.cleanName(firstval['Issue_Name'])
except:
cleanname = 'None'
issid = str(firstval['Issue_ID'])
issnum = str(firstval['Issue_Number'])
issname = cleanname
issdate = str(firstval['Issue_Date'])
stdate = str(firstval['Store_Date'])
digdate = str(firstval['Digital_Date'])
int_issnum = helpers.issuedigits(issnum)
iss_exists = myDB.selectone('SELECT * from annuals WHERE IssueID=?', [issid]).fetchone()
if iss_exists is None:
if stdate == '0000-00-00':
dk = re.sub('-', '', issdate).strip()
else:
dk = re.sub('-', '', stdate).strip() # converts date to 20140718 format
if dk == '00000000':
logger.warn('Issue Data is invalid for Issue Number %s. Marking this issue as Skipped' % firstval['Issue_Number'])
astatus = "Skipped"
else:
datechk = datetime.datetime.strptime(dk, "%Y%m%d")
issue_week = datetime.datetime.strftime(datechk, "%Y%U")
if mylar.CONFIG.AUTOWANT_ALL:
astatus = "Wanted"
elif issue_week >= now_week and mylar.CONFIG.AUTOWANT_UPCOMING is True:
astatus = "Wanted"
else:
astatus = "Skipped"
else:
astatus = iss_exists['Status']
annualslist.append({"Issue_Number": issnum,
"Int_IssueNumber": int_issnum,
"IssueDate": issdate,
"ReleaseDate": stdate,
"DigitalDate": digdate,
"IssueName": issname,
"ComicID": comicid,
"IssueID": issid,
"ComicName": ComicName,
"ReleaseComicID": re.sub('4050-', '', firstval['Comic_ID']).strip(),
"ReleaseComicName": sr['name'],
"Status": astatus})
#myDB.upsert("annuals", newVals, newCtrl)
# --- don't think this does anything since the value isn't returned in this module
#if issuechk is not None and issuetype == 'annual':
# #logger.fdebug('[IMPORTER-ANNUAL] - Comparing annual ' + str(issuechk) + ' .. to .. ' + str(int_issnum))
# if issuechk == int_issnum:
# weeklyissue_check.append({"Int_IssueNumber": int_issnum,
# "Issue_Number": issnum,
# "IssueDate": issdate,
# "ReleaseDate": stdate})
n+=1
num_res+=1
manualAnnual(annchk=annualslist)
return annualslist
elif len(sresults) == 0 or len(sresults) is None:
logger.fdebug('[IMPORTER-ANNUAL] - No results, removing the year from the agenda and re-querying.')
sresults = mb.findComic(annComicName, mode, issue=None)
if len(sresults) == 1:
sr = sresults[0]
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(comicid) + ' found. Assuming it is part of the greater collection.')
else:
resultset = 0
else:
logger.fdebug('[IMPORTER-ANNUAL] - Returning results to screen - more than one possibility')
for sr in sresults:
if annualyear < sr['comicyear']:
logger.fdebug('[IMPORTER-ANNUAL] - ' + str(annualyear) + ' is less than ' + str(sr['comicyear']))
if int(sr['issues']) > (2013 - int(sr['comicyear'])):
logger.fdebug('[IMPORTER-ANNUAL] - Issue count is wrong')
#if this is called from the importer module, return the weeklyissue_check
def image_it(comicid, latestissueid, comlocation, ComicImage):
#alternate series covers download latest image...
cimage = os.path.join(mylar.CONFIG.CACHE_DIR, str(comicid) + '.jpg')
imageurl = mylar.cv.getComic(comicid, 'image', issueid=latestissueid)
covercheck = helpers.getImage(comicid, imageurl['image'])
if covercheck == 'retry':
logger.fdebug('Attempting to retrieve a different comic image for this particular issue.')
if imageurl['image_alt'] is not None:
covercheck = helpers.getImage(comicid, imageurl['image_alt'])
else:
if not os.path.isfile(cimage):
logger.fdebug('Failed to retrieve issue image, possibly because not available. Reverting back to series image.')
covercheck = helpers.getImage(comicid, ComicImage)
PRComicImage = os.path.join('cache', str(comicid) + ".jpg")
ComicImage = helpers.replacetheslash(PRComicImage)
#if the comic cover local is checked, save a cover.jpg to the series folder.
if all([mylar.CONFIG.COMIC_COVER_LOCAL is True, os.path.isdir(comlocation) is True, os.path.isfile(os.path.join(comlocation, 'cover.jpg'))]):
try:
comiclocal = os.path.join(comlocation, 'cover.jpg')
shutil.copyfile(cimage, comiclocal)
if mylar.CONFIG.ENFORCE_PERMS:
filechecker.setperms(comiclocal)
except IOError as e:
logger.error('[%s] Error saving cover (%s) into series directory (%s) at this time' % (e, cimage, comiclocal))
except Exception as e:
logger.error('[%s] Unable to save cover (%s) into series directory (%s) at this time' % (e, cimage, comiclocal))
myDB = db.DBConnection()
myDB.upsert('comics', {'ComicImage': ComicImage}, {'ComicID': comicid})
|
gpl-3.0
| -6,903,761,207,587,351,000 | 48.885802 | 298 | 0.531708 | false |
joshmoore/zeroc-ice
|
py/test/Ice/info/Client.py
|
1
|
1142
|
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, traceback
import Ice
slice_dir = Ice.getSliceDir()
if not slice_dir:
print sys.argv[0] + ': Slice directory not found.'
sys.exit(1)
Ice.loadSlice("'-I" + slice_dir + "' Test.ice")
import AllTests
def test(b):
if not b:
raise RuntimeError('test assertion failed')
def run(args, communicator):
AllTests.allTests(communicator, False)
return True
try:
initData = Ice.InitializationData()
initData.properties = Ice.createProperties(sys.argv)
communicator = Ice.initialize(sys.argv, initData)
status = run(sys.argv, communicator)
except:
traceback.print_exc()
status = False
if communicator:
try:
communicator.destroy()
except:
traceback.print_exc()
status = False
sys.exit(not status)
|
gpl-2.0
| 3,822,398,164,370,931,000 | 23.826087 | 72 | 0.596322 | false |
google/ehr-predictions
|
ehr_prediction_modeling/utils/label_utils.py
|
1
|
2694
|
# coding=utf-8
# Copyright 2020 Google Health Research.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utility functions used in label computations."""
from typing import List
# Open Source Labels
ADVERSE_OUTCOME_IN_ADMISSION = "adverse_outcome_in_admission"
MORTALITY_IN_ADMISSION_LABEL = "mortality_in_admission"
MORTALITY_LOOKAHEAD_LABEL_BASE = "mortality_in"
READMISSION_LABEL_BASE = "readmission_within"
TIME_UNTIL_NEXT_ADMISSION = "time_until_next_admission"
SEGMENT_LABEL = "segment_mask"
CENSORED_PATIENT_LABEL = "patient_mask"
LAB_LOOKAHEAD_REGEX_BASE = r"lab_[0-9]+_value_within"
LOS_LABEL = "length_of_stay"
TIMESTAMP_KEY = "timestamp"
TSA_LABEL = "time_since_admission"
IGNORE_LABEL = "ignore_label"
TOD_LABEL = "time_of_day_label"
DISCHARGE_LABEL = "discharge_label"
# In the early stages of the project the focus was on longer time windows. Due
# to the clinical relevance we have now extended this list to be more finely
# granular on the low-end, with multiple shorter time windows 6h apart. For
# backwards compatibility, we still compute the longer windows - in order to be
# able to compare to our past performance. However, note that these should not
# really be used in models/evaluation going forward.
DEFAULT_LOOKAHEAD_WINDOWS = [6, 12, 18, 24, 36, 48, 60, 72]
def get_lab_label_lookahead_key(lab_number: str,
time_window_hours: int,
suffix=None) -> str:
if not suffix:
return f"lab_{lab_number}_in_{time_window_hours}h"
else:
return f"lab_{lab_number}_in_{time_window_hours}h_{suffix}"
def get_adverse_outcome_lookahead_label_key(time_window_hours: int) -> str:
"""Returns the lookahead label key for the provided time window in hours."""
return f"adverse_outcome_within_{time_window_hours}h"
def get_readmission_label_keys(time_windows: List[int]) -> List[str]:
"""Get label keys for readmission.
Args:
time_windows: list<int> of the considered time windows (in days) for
readmission.
Returns:
list<str> of labels for readmission within X days
"""
return [f"{READMISSION_LABEL_BASE}_{t}_days" for t in time_windows]
|
apache-2.0
| -7,459,397,773,896,663,000 | 35.405405 | 79 | 0.720861 | false |
heromod/migrid
|
state/sss_home/MiG-SSS/mig_xsss.py
|
1
|
1992
|
#!/usr/bin/env python
import os
import time
MiG_DIR = "~/MiG-SSS"
G_XSCREENSAVER_COMMAND = "/usr/X11R6/bin/xscreensaver-command -watch"
G_GPIDFILE = "/tmp/mig_xsss_job.gpid"
# Returns Group ID of running job
def getRunningGPID( param_sGPIDFile ):
iGPID = -1
if ( os.path.isfile( param_sGPIDFile ) ):
input = open( param_sGPIDFile, "r" )
iGPID = int(input.readline())
input.close()
return iGPID
# Writes Group ID of running job to param_sGPIDFile
def writeRunningGPID( param_iGPID, param_sGPIDFile ):
output = open( param_sGPIDFile, "w" )
output.write( str(param_iGPID) + "\n" )
output.close()
def startJob(param_sGPIDFile):
iPID = os.fork()
if ( iPID == 0 ):
os.setpgrp()
else:
# Write GPID to file
writeRunningGPID( os.getpgrp(), param_sGPIDFile )
cmd = "qemu -hda "+MiG_DIR+"/hda.img -cdrom "+MiG_DIR+"/MiG.iso -boot d -kernel-kqemu -nographic"
fd = os.popen(cmd)
fd.close()
# We never end here as it is right now
# Job finished, remove param_sGPIDFile.
os.unlink( param_sGPIDFile )
os._exit(0)
def killJob( param_sGPIDFile ):
iGPID = getRunningGPID( param_sGPIDFile );
if (iGPID != -1) :
try:
# Kill all processes with group id GPID
os.kill( 0-iGPID, 9 )
except OSError, e:
# Process doesnt exist, ignore
print ""
# Job killed, remove param_sGPIDFile.
os.unlink( param_sGPIDFile )
def SSS():
while(1):
str = ""
bScreenSaverActive = 0
fd = os.popen( G_XSCREENSAVER_COMMAND )
str = fd.readline()
while ( len(str) != 0 ):
if ( (str[0:5] == "BLANK" or str[0:4] == "LOCK") and bScreenSaverActive == 0 ):
bScreenSaverActive = 1
startJob(G_GPIDFILE)
fd = os.popen(G_XSCREENSAVER_COMMAND)
elif ( str[0:7] == "UNBLANK" and bScreenSaverActive == 1 ):
bScreenSaverActive = 0
killJob(G_GPIDFILE)
str = fd.readline()
fd.close()
def main():
iPID = os.fork()
if (iPID == 0):
os.setpgrp()
SSS()
if __name__ == '__main__' : main()
|
gpl-2.0
| 7,651,162,478,310,294,000 | 24.87013 | 105 | 0.63002 | false |
RPGroup-PBoC/gist_pboc_2017
|
code/inclass/phase_portrait_in_class.py
|
1
|
1286
|
# Duhhhh
import numpy as np
import matplotlib.pyplot as plt
import seaborn
plt.close('all')
# Define the parameters
r = 20 # the production rate
gamma = 1 / 30 # the degradation rate
k = 200 # in units of concentration
max_R = 1000 # maximum number of R1 and R2
R1 = np.linspace(0, max_R, 500)
R2 = np.linspace(0, max_R, 500)
# Compute the nullclines.
R1_null = (r / gamma) / (1 + (R2 / k)**2)
R2_null = (r / gamma) / (1 + (R1 / k)**2)
# Plot the nullclines.
plt.figure()
plt.plot(R1, R1_null, label='dR1/dt = 0')
plt.plot(R2_null, R2, label='dR2/dt = 0')
plt.xlabel('R1')
plt.ylabel('R2')
plt.legend()
plt.show()
# Generate the vector fields
R1_m, R2_m = np.meshgrid(R1[1::30], R2[1::30])
# Compute the derivatives
dR1_dt = -gamma * R1_m + r / (1 + (R2_m / k)**2)
dR2_dt = -gamma * R2_m + r / (1 + (R1_m / k)**2)
# Plot the vector fields!!
plt.quiver(R1_m, R2_m, dR1_dt, dR2_dt)
plt.show()
# Plot the orbit.
time = 200
R1 = 800
R2 = 400
# Loop through time and integrate.
for t in range(time):
dR1 = -gamma * R1 + r / (1 + (R2 / k)**2)
dR2 = -gamma * R2 + r / (1 + (R1 / k)**2)
# Add this change to our current position
R1 = R1 + dR1
# This is the same operation as above..
R2 += dR2
plt.plot(R1, R2, 'ro')
plt.show()
plt.pause(0.05)
|
mit
| -3,922,817,809,920,688,000 | 21.172414 | 48 | 0.600311 | false |
lincolnloop/django-jsonit
|
jsonit/encoder.py
|
1
|
2054
|
import datetime
import json
from django.conf import settings
from django.contrib.messages.storage.base import Message
from django.utils.functional import Promise
from django.utils import six
def encode_message(message):
return {'class': message.tags, 'message': message.message}
class JsonitEncoder(json.JSONEncoder):
default_encoders = (
(Promise, six.text_type),
(Message, encode_message),
(datetime.datetime, lambda d: d.isoformat()),
(datetime.date, lambda d: d.isoformat()),
)
def __init__(self, *args, **kwargs):
"""
In addition to the standard JSONEncoder constructor arguments, this
class uses the following keyword arguments::
:param extra_encoders: A list of two-element tuples containing of extra
encoders to help convert objects into JSON. Each tuple should
contain the class as the first element and the conversion function
for objects of that class as the second.
"""
self.encoders = self.default_encoders
extra_encoders = kwargs.pop('extra_encoders', None)
if extra_encoders:
classes = [encoder_tuple[0] for encoder_tuple in extra_encoders]
self.encoders = list(extra_encoders) + [
encoder_tuple for encoder_tuple in self.encoders
if not encoder_tuple[0] in classes]
super(JsonitEncoder, self).__init__(*args, **kwargs)
def default(self, o):
for cls, func in self.encoders:
if isinstance(o, cls):
return func(o)
super(JsonitEncoder, self).default(o)
def encode(object, encoders=None):
"""
Encode an object into a JSON representation.
:param object: The object to encode.
:param encoders: An optional dictionary of encoders to help convert
objects.
All other parameters are passed to the standard JSON encode.
"""
indent = settings.DEBUG and 2 or None
return JsonitEncoder(indent=indent, extra_encoders=encoders).encode(object)
|
bsd-3-clause
| -6,261,254,148,897,235,000 | 33.813559 | 79 | 0.658228 | false |
stplaydog/OPTKIT
|
scripts/process_data_pattern.py
|
1
|
3246
|
#
# Copy right YMSys, 2015,2016 Zhaoming Yin
#
# @brief 1) This script do some sanity check of the data, and remove the
# corrupted data;
# 2) This script separate difffernt crime types.
#
# MODIFIED (MM/DD/YY)
# stplaydog 08/24/16 - Stats calculation
# stplaydog 08/06/16 - Additional data bug fix
# stplaydog 07/31/16 - Creation
#
import sys
from datetime import datetime
from subprocess import call
reader = open(sys.argv[1])
lines = reader.readlines()
list = {}
stats = {'num_events' : 0,
'num_type' : 0,
'num_dups' : 0,
'max_x' : 0,
'min_x' : sys.maxint,
'max_y' : 0,
'min_y' : sys.maxint,
'max_t' : "0/0/0",
'min_t' : "9999/12/30"}
def compare_earlier(t1, t2):
items1 = t1.split('/')
items2 = t2.split('/')
if int(items1[0]) < int(items2[0]):
return True
elif int(items1[1]) < int(items2[1]):
return True
elif int(items1[2]) < int(items2[2]):
return True
else:
return False
for line in lines:
if line .find("time") == -1:
items = line.split(",")
key = items[4].strip().replace("/","").replace(" "," ").replace(" ", "_").replace("?", "")
t = items[0]
x = items[1].split(".")[0]
y = items[2].split(".")[0]
if len(str(x)) >= 6 and len(str(x)) < 9 and len(str(y)) >= 6 and len(str(y)) < 9 and int(x) >0 and int(y)>0:
stats['num_events'] += 1
stats['max_x'] = max(int(x), stats['max_x'])
stats['min_x'] = min(int(x), stats['min_x'])
stats['max_y'] = max(int(y), stats['max_y'])
stats['min_y'] = min(int(y), stats['min_y'])
stats['max_t'] = t if compare_earlier(stats['max_t'], t) else stats['max_t']
stats['min_t'] = t if compare_earlier(t, stats['min_t']) else stats['min_t']
if key not in list:
stats['num_type'] += 1
list[key] = []
list[key].append(items[0]+","+x+","+y+","+items[3]+","+key+"\n")
else:
first_line = line
for key in list:
file = sys.argv[2]+"/"+key
writer = open(file, "w")
writer.write(first_line)
for line in list[key]:
writer.write(line)
writer.close()
call(["sort", "-t,", "-n", "-k", "2", file, "-o", file+".sort"])
reader = open(file+".sort")
writer = open(file, "w")
dup_count = 0
pred = []
pred_line = []
pred_count = 0
for line in reader.readlines():
cur = line.split(",")
if len(pred) != 0:
if pred[0] != cur[0] or pred[1] != cur[1] or pred[2] != cur[2]:
if pred_count >= 2:
writer.write(pred_line.replace(",1", ","+str(pred_count)))
else:
writer.write(pred_line)
pred_count = 1
else:
dup_count += 1
stats['num_dups'] += 1
pred_count += 1
pred = cur
pred_line = line
writer.close()
print file, "has ", str(dup_count), " duplications"
call(["rm", file+".sort"])
print stats
|
gpl-3.0
| 7,054,453,880,571,916,000 | 29.914286 | 116 | 0.475354 | false |
FrontSide/Sizun
|
sizun/controllers/inspectors/inspection.py
|
1
|
5266
|
"""
Sizun
MIT License
(C) 2015 David Rieger
"""
from abc import ABCMeta
from enum import IntEnum, unique
from sizun.controllers.aghandler import AGHandler
from sizun.controllers.pmdhandler import PMDHandler
from sizun.controllers.syntaxhandler import SyntaxHandler
from sizun.controllers.linegrabber import LineGrabber
from sizun.errorhandlers.concrete_error import UnallowedOperationError
from flask import current_app as app
class InspectionRunner:
inspection_set = ["CC", "CD", "FE", "LPL", "MC"]
def __init__(self, _settings, _rulehandler):
self.settings = _settings
self.ag = AGHandler(self.settings)
self.syntaxhandler = SyntaxHandler(self.settings)
self.rulehandler = _rulehandler
self.pmd = PMDHandler(self.settings)
self.linegrabber = LineGrabber(self.settings)
def run(self, specific_inspection=None):
"""
Run the full inspection suite
"""
result = dict()
stat = dict()
# Get the activation status of all inspection suits OR
# deactivate all inspections except the one set in specific_inspection, respectively
if specific_inspection is None:
for insp in self.inspection_set:
stat[insp] = self.settings.isset_inspection(insp)
else:
for insp in self.inspection_set:
if specific_inspection.upper() == insp:
stat[insp] = True
else:
stat[insp] = False
# Run activated inspections
# Cyclomatic Complexity
if stat["CC"]:
from .circular_complexity import CCInspector
result["CC"] = CCInspector(self.ag, self.syntaxhandler, self.rulehandler).run()
# Code Duplication
if stat["CD"]:
from .code_duplication import CDInspector
result["CD"] = CDInspector(self.pmd, self.rulehandler, self.linegrabber).run()
# Feature Envy
if stat["FE"]:
from .feature_envy import FEInspector
result["FE"] = FEInspector(self.ag, self.syntaxhandler, self.rulehandler).run()
# Long Parameter List
if stat["LPL"]:
from .long_parameter_list import LPLInspector
result["LPL"] = LPLInspector(self.ag, self.syntaxhandler, self.rulehandler).run()
# Message Chain
if stat["MC"]:
from .message_chain import MCInspector
result["MC"] = MCInspector(self.ag, self.syntaxhandler, self.rulehandler).run()
return result
class InspectionABC(metaclass=ABCMeta):
"""
Abstract Inspection Super-Class
"""
_result = dict()
def run(self):
"""
Triggers the inspection process
and returns the resulting dictionary
THIS IS THE METHOD THAT MUST BE CALLED FROM THE
INSPECTION RUNNER
"""
self.inspect()
return self._result
def inspect(self):
"""
Main inspection process method
Must be overridden by the concrete
inspector subclass and must call this super method
i.e. super().inspect()
! DO NOT ! DIRECTLY CALL THIS METHOD FROM THE INSPECTION RUNNER
"""
app.logger.debug("An inspection has been triggered...")
# Write dict entry for JSON response
self._result = dict()
self._result[str(ResultKey.ESCALATION)] = EscalationLevel.NO_ERROR
self._result[str(ResultKey.VIOLATIONS)] = list()
def note_violation(self, filename, line, code=None, info=None):
"""
Adds the info of a rule violation to the result dictionary
"""
violation = {"FILE": filename, "LINE": line, "CODE": code, "INFO": info}
self._result[str(ResultKey.VIOLATIONS)].append(violation)
def add_info(self, key, value):
"""
Adds a key value pair to the result dictionary
Called by the conrete inspector subclass whenever
additional information needs to be provided
"""
if key in ResultKey.__members__.items():
raise UnallowedOperationError("Concrete Inspectors may not change the pre-defined key \"{}\"".format(key))
self._result[str(key)] = value
def escalate(self):
"""
Ecalates the error level for the inspected metric
Must be called from the concrete inspector subclass
whenever a rule violation occurs and an escalation to
a higher error level is necessary
"""
_ecalation_level = self._result[str(ResultKey.ESCALATION)]
if _ecalation_level is EscalationLevel.NO_ERROR:
_ecalation_level = EscalationLevel.MINOR_ERROR
elif _ecalation_level is EscalationLevel.MINOR_ERROR:
_ecalation_level = EscalationLevel.MAJOR_ERROR
else:
_ecalation_level = EscalationLevel.CRITICAL_ERROR
self._result[str(ResultKey.ESCALATION)] = _ecalation_level
class ResultKey(IntEnum):
ESCALATION = 0
VIOLATIONS = 1
def __str__(self):
return self.name
@unique
class EscalationLevel(IntEnum):
NO_RESULT = 0
NO_ERROR = 1
MINOR_ERROR = 2
MAJOR_ERROR = 3
CRITICAL_ERROR = 4
def __str__(self):
return self.name
|
mit
| 4,828,805,411,688,617,000 | 30.532934 | 118 | 0.627991 | false |
jellis18/emcee3
|
emcee3/model.py
|
1
|
1839
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["Model"]
import numpy as np
from .state import State
class Model(object):
def __init__(self, lnlikefn=None, lnpriorfn=None, args=tuple()):
if lnpriorfn is None:
lnpriorfn = _default_lnprior_function
self.lnpriorfn = lnpriorfn
self.lnlikefn = lnlikefn
self.args = args
def setup(self, state, **kwargs):
pass
def get_lnprior(self, state, **kwargs):
return self.lnpriorfn(state.coords, *(self.args))
def get_lnlike(self, state, **kwargs):
return self.lnlikefn(state.coords, *(self.args))
def get_state(self, coords, **kwargs):
state = State(coords, -np.inf, -np.inf, False)
self.setup(state, **kwargs)
# Compute the prior.
state.lnprior = self.get_lnprior(state, **kwargs)
if not np.isfinite(state.lnprior):
state.lnprior = -np.inf
return state
# Compute the likelihood.
state.lnlike = self.get_lnlike(state, **kwargs)
if not np.isfinite(state.lnlike):
state.lnlike = -np.inf
return state
def __call__(self, coords, **kwargs):
return self.get_state(coords, **kwargs)
def check_grad(self, coords, eps=1.234e-7):
good = True
grad = self.get_state(coords, compute_grad=True).grad_lnprob
for i, c in enumerate(coords):
coords[i] = c + eps
plus = self.get_state(coords).lnprob
coords[i] = c - eps
minus = self.get_state(coords).lnprob
comp = 0.5*(plus-minus)/eps
print(i, comp, grad[i])
if not np.allclose(comp, grad[i]):
good = False
return good
def _default_lnprior_function(x, *args):
return 0.0
|
mit
| -4,614,568,854,339,946,000 | 27.292308 | 68 | 0.574769 | false |
dknlght/dkodi
|
src/script.module.urlresolver/lib/urlresolver/plugins/__generic_resolver__.py
|
1
|
2035
|
"""
Plugin for URLResolver
Copyright (C) 2016 script.module.urlresolver
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import abc
from urlresolver.plugins.lib import helpers
from urlresolver.resolver import UrlResolver
class GenericResolver(UrlResolver):
__metaclass__ = abc.ABCMeta
"""
Generic Resolver
___
name |str| : resolver name
domains |list of str| : list of supported domains
pattern |str| : supported uri regex pattern, match groups: 1=host, 2=media_id
"""
name = 'generic'
domains = ['example.com']
pattern = None
def __init__(self):
if self.pattern is None:
self.pattern = r'(?://|\.)(%s)/(?:embed[/-])?([A-Za-z0-9]+)' % re.escape('|'.join(self.domains))
def get_media_url(self, host, media_id):
"""
source scraping to get resolved uri goes here
return |str| : resolved/playable uri or raise ResolverError
___
helpers.get_media_url result_blacklist: |list of str| : list of strings to blacklist in source results
"""
return helpers.get_media_url(self.get_url(host, media_id)).replace(' ', '%20')
def get_url(self, host, media_id):
"""
return |str| : uri to be used by get_media_url
___
_default_get_url template: |str| : 'http://{host}/embed-{media_id}.html'
"""
return self._default_get_url(host, media_id)
|
gpl-2.0
| -373,994,545,689,759,040 | 34.701754 | 110 | 0.650123 | false |
geosohh/AnimeTorr
|
animetorr/shared/db.py
|
1
|
18589
|
# -*- coding: utf-8 -*-
"""
Anything related to the DB.
Includes the classes for Anime and Config.
"""
__author__ = 'Sohhla'
import re
import sqlite3
from log import LoggerManager
import constant
from strings import escape_unicode
class DBManager():
"""
Controls access to the SQLite DB.
DB is located at the path defined by constant.DB_PATH.
"""
def __init__(self):
self.re_words = re.compile("(\w+)")
self.number_of_anime_enabled = 0
self.log = LoggerManager().get_logger("DB Manager")
self.conn = sqlite3.connect(constant.DB_PATH)
self.__update_db()
def __update_db(self):
"""
Update an old DB to the latest version.
:return: none
"""
db = self.__read_table("config")
try:
db_version = db[0]["dbVersion"] # v4 or later
except KeyError:
db_version = db[0]["dbversion"] # v3 or earlier
if db_version==3:
"""
Update to 4 (Nov 06 2014):
- config: added column "preferRss" (type=numeric, default=1)
"""
c = self.conn.cursor()
c.execute("ALTER TABLE config ADD COLUMN preferRss NUMERIC")
c.execute("UPDATE config SET preferRss=1")
c.execute("UPDATE config SET dbversion=4")
self.conn.commit()
c.close()
if db_version==4:
"""
Update to 5 (Mar 30 2015):
- config: removed some columns, renamed others
"""
c = self.conn.cursor()
c.execute("START TRANSACTION")
c.execute("ALTER TABLE config RENAME TO old_config")
c.execute("CREATE TABLE config (showNotification INT,sleepTime INT,preferRss INT,useDefaultApp INT,appPath TEXT,"
"animeFolder TEXT,firstUse INT,dbVersion INT )")
c.execute("INSERT INTO config (showNotification,sleepTime,preferRss,appPath,animeFolder,firstUse) "
"SELECT notifyWhenDownloadFile,sleepTime,preferRss,uTorrentPath,uTorrentDefaultContentDownloadFolder,firstUse FROM old_config")
c.execute("DROP TABLE old_config")
c.execute("UPDATE config SET useDefaultApp=0")
c.execute("UPDATE config SET dbVersion=5")
c.execute("COMMIT")
self.conn.commit()
c.close()
# If the DB is modified again, add another IF here to update previous versions
def __read_table(self,table):
"""
Reads all data from the given table.
:type table: str
:param table: Name of the table.
:rtype: dict
:return:
Dictionary with all data from the table.
The dictionary has the following format: dic[line_number (int)][column_name (str)]
"""
try:
__c = self.conn.cursor()
__c.execute("SELECT * FROM %s" % table)
results = []
for row in __c:
results.append(row)
dic = {}
for count in range(len(results)):
temp_dic = {}
for column in range(len(__c.description)):
temp_dic[__c.description[column][0]] = results[count][column]
dic[count] = temp_dic
__c.close()
return dic
except Exception as error:
self.log.print_traceback(error,self.log.error)
def update_registry(self,table,column,new_value,pk_column=None,pk_value=None):
"""
Update a line from a table.
:type table: str
:param table: Name of the table.
:type column: str
:param column: Name of the column.
:type new_value: int or str
:param new_value: Value the registry will be updated to.
:type pk_column: str
:param pk_column: Primary Key column, used to identify a single line from the table.
:type pk_value: int or str
:param pk_value: Primary Key column value that uniquely identifies a single line from the table.
"""
__c = self.conn.cursor()
try: #if OK, then new_value is an integer
int(new_value)
value = "%d" % new_value
except ValueError: #otherwise it's text
value = "'%s'" % new_value
if pk_column is None:
__c.execute("UPDATE %s SET %s=%s" % (table,column,value))
else:
__c.execute("UPDATE %s SET %s=%s WHERE %s='%s'" % (table,column,value,pk_column,pk_value))
self.conn.commit()
def get_config(self):
"""
Reads 'config' table from DB.
:rtype: Config
:return: All config data.
"""
db = self.__read_table("config")
config = Config(show_notification = bool(db[0]["showNotification"]),
sleep_time = db[0]["sleepTime"],
prefer_rss = bool(db[0]["preferRss"]),
use_default_app = bool(db[0]["useDefaultApp"]),
app_path = db[0]["appPath"],
anime_folder = db[0]["animeFolder"],
first_use = bool(db[0]["firstUse"]),
db_version = db[0]["dbVersion"])
return config
def get_anime_list(self):
"""
Reads 'anime' table from DB, generates a list with all data.
:rtype: list[Anime]
:return: All data from all anime on the DB.
"""
anime_list = []
self.number_of_anime_enabled = 0
db = self.__read_table("anime")
for result in db:
anime_list.append(Anime(enabled= bool(db[result]["enabled"]),
name= db[result]["name"],
episode= db[result]["episodeNumber"],
version= db[result]["versionNumber"],
search_terms= db[result]["search"],
last_file_downloaded=db[result]["lastFileDownloaded"],
download_folder= db[result]["downloadFolder"],
check_anime_index= db[result]["checkAnimeIndex"],
check_anime_tosho= db[result]["checkAnimeTosho"],
check_anirena= db[result]["checkAnirena"],
check_nyaa= db[result]["checkNyaa"],
check_tokyotosho= db[result]["checkTokyotosho"]))
if db[result]["enabled"]:
self.number_of_anime_enabled+=1
return anime_list
def insert_anime(self,anime):
"""
Inserts anime in the DB.
:type anime: Anime
:param anime: ...
:rtype: bool
:return: If the anime was added successfully to the DB or not.
"""
success = False
try:
__c = self.conn.cursor()
enabled = int(anime.enabled)
check_anime_index = int(anime.check_anime_index)
check_anime_tosho = int(anime.check_anime_tosho)
check_anirena = int(anime.check_anirena)
check_nyaa = int(anime.check_nyaa)
check_tokyotosho = int(anime.check_tokyotosho)
__c.execute("INSERT INTO anime ({0:s}) VALUES ({1:d}, \"{2:s}\", {3:d}, {4:d}, \"{5:s}\", \"{6:s}\", \"{7:s}\", {8:d}, {9:d}, {10:d}, {11:d}, {12:d})".format(
"enabled,name,episodeNumber,versionNumber,search,lastFileDownloaded,downloadFolder,checkAnimeIndex,checkAnimeTosho,checkAnirena,checkNyaa,checkTokyotosho",
enabled,
anime.name,
anime.episode,
anime.version,
anime.search_terms,
anime.last_file_downloaded,
anime.download_folder,
check_anime_index,
check_anime_tosho,
check_anirena,
check_nyaa,
check_tokyotosho))
self.conn.commit()
success = True
except Exception as error:
self.log.error("ERROR while inserting [%s] into table 'anime'" % escape_unicode(anime.name))
self.log.print_traceback(error,self.log.error)
return success
def remove_anime(self,anime_name):
"""
Removes anime from the DB.
:type anime_name: str or unicode
:param anime_name: ...
:rtype: bool
:return: If the anime was successfully removed or not.
"""
success = False
try:
__c = self.conn.cursor()
__c.execute("DELETE FROM anime WHERE name='%s'" % anime_name)
self.conn.commit()
success = True
except Exception as error:
self.log.error("ERROR while removing [%s] into table 'anime'" % escape_unicode(anime_name))
self.log.print_traceback(error,self.log.error)
return success
class Anime():
"""
Anime class.
"""
def __init__(self,enabled=False,name="",episode=0,version=0,search_terms="",last_file_downloaded="",download_folder="",
check_anime_index=True,check_anime_tosho=True,check_anirena=True,check_nyaa=True,check_tokyotosho=True):
"""
:type enabled: bool
:param enabled: search for new episodes or not
:type name: str or unicode
:param name: ...
:type episode: int
:param episode: ...
:type version: int
:param version: v0,v1,v2,....
:type search_terms: str or unicode
:param search_terms: ...
:type last_file_downloaded: str or unicode
:param last_file_downloaded: used to search for v2,v3,etc.
:type download_folder: str or unicode
:param download_folder: where the torrent application should save the episode
:type check_anime_index: bool
:param check_anime_index: ...
:type check_anime_tosho: bool
:param check_anime_tosho: ...
:type check_anirena: bool
:param check_anirena: ...
:type check_nyaa: bool
:param check_nyaa: ...
:type check_tokyotosho: bool
:param check_tokyotosho: ...
"""
self.enabled = enabled
self.name = name
self.episode = episode
self.version = version
self.search_terms = search_terms
self.last_file_downloaded = last_file_downloaded
self.download_folder = download_folder
self.check_anime_index = check_anime_index
self.check_anime_tosho = check_anime_tosho
self.check_anirena = check_anirena
self.check_nyaa = check_nyaa
self.check_tokyotosho = check_tokyotosho
def update_enabled(self,new_enabled):
"""
Updates "enabled" status.
:type new_enabled: bool
:param new_enabled: ...
"""
DBManager().update_registry("anime","enabled",new_enabled,"name",self.name)
self.enabled = new_enabled
def update_name(self,new_name):
"""
Changes anime name.
:type new_name: str or unicode
:param new_name: ...
"""
DBManager().update_registry("anime","name",new_name,"name",self.name)
self.name = new_name
def update_episode(self,new_episode):
"""
Updates episode number.
:type new_episode: int
:param new_episode: ...
"""
DBManager().update_registry("anime","episodeNumber",new_episode,"name",self.name)
self.episode = new_episode
def update_version(self,new_version):
"""
Updates version number.
:type new_version: int
:param new_version: ...
"""
DBManager().update_registry("anime","versionNumber",new_version,"name",self.name)
self.version = new_version
def update_search_terms(self,new_search_terms):
"""
Updates search terms.
:type new_search_terms: str or unicode
:param new_search_terms: ...
"""
DBManager().update_registry("anime","search",new_search_terms,"name",self.name)
self.search_terms = new_search_terms
def update_last_file_downloaded(self,new_last_file_downloaded):
"""
Updates last file downloaded.
:type new_last_file_downloaded: str or unicode
:param new_last_file_downloaded: ...
"""
DBManager().update_registry("anime","lastFileDownloaded",new_last_file_downloaded,"name",self.name)
self.last_file_downloaded = new_last_file_downloaded
def update_download_folder(self,new_download_folder):
"""
Updates download folder.
:type new_download_folder: str or unicode
:param new_download_folder: ...
"""
DBManager().update_registry("anime","downloadFolder",new_download_folder,"name",self.name)
self.download_folder = new_download_folder
def update_check_anime_index(self,new_check_anime_index):
"""
Updates "check_anime_index" status.
:type new_check_anime_index: bool
:param new_check_anime_index: ...
"""
DBManager().update_registry("anime","checkAnimeIndex",new_check_anime_index,"name",self.name)
self.check_anime_index = new_check_anime_index
def update_check_anime_tosho(self,new_check_anime_tosho):
"""
Updates "check_anime_tosho" status.
:type new_check_anime_tosho: bool
:param new_check_anime_tosho: ...
"""
DBManager().update_registry("anime","checkAnimeTosho",new_check_anime_tosho,"name",self.name)
self.check_anime_tosho = new_check_anime_tosho
def update_check_anirena(self,new_check_anirena):
"""
Updates "check_anirena" status.
:type new_check_anirena: bool
:param new_check_anirena: ...
"""
DBManager().update_registry("anime","checkAnirena",new_check_anirena,"name",self.name)
self.check_anirena = new_check_anirena
def update_check_nyaa(self,new_check_nyaa):
"""
Updates "check_nyaa" status.
:type new_check_nyaa: bool
:param new_check_nyaa: ...
"""
DBManager().update_registry("anime","checkNyaa",new_check_nyaa,"name",self.name)
self.check_nyaa = new_check_nyaa
def update_check_tokyotosho(self,new_check_tokyotosho):
"""
Updates "check_tokyotosho" status.
:type new_check_tokyotosho: bool
:param new_check_tokyotosho: ...
"""
DBManager().update_registry("anime","checkTokyotosho",new_check_tokyotosho,"name",self.name)
self.check_tokyotosho = new_check_tokyotosho
class Config():
"""
Config class.
"""
def __init__(self,show_notification=True,sleep_time=3600,prefer_rss=True,use_default_app=True,app_path="",
anime_folder="",first_use=True,db_version=5):
"""
:type show_notification: bool
:param show_notification: Notify when a new torrent is downloaded or not.
:type sleep_time: int
:param sleep_time: How long to wait before searching for new episodes again.
:type prefer_rss: bool
:param prefer_rss: Search using RSS instead of HTML (Nyaa only)
:type use_default_app: bool
:param use_default_app: Use default torrent application or not.
:type app_path: str or unicode
:param app_path: Path to custom torrent application.
:type anime_folder: str or unicode
:param anime_folder: uTorrent only - Default folder where the torrent application should save episodes.
:type first_use: bool
:param first_use: First time using the app or not.
:type db_version: int
:param db_version: ...
"""
self.show_notification = show_notification
self.sleep_time = sleep_time
self.prefer_rss = prefer_rss
self.use_default_app = use_default_app
self.app_path = app_path
self.anime_folder = anime_folder
self.first_use = first_use
self.db_version = db_version
def update_show_notification(self,new_show_notification):
"""
Updates "show_notification" status.
:type new_show_notification: bool
:param new_show_notification: ...
"""
DBManager().update_registry("config","showNotification",new_show_notification)
self.show_notification = new_show_notification
def update_sleep_time(self,new_sleep_time):
"""
Updates "sleep_time" value.
:type new_sleep_time: int
:param new_sleep_time: ...
"""
DBManager().update_registry("config","sleepTime",new_sleep_time)
self.sleep_time = new_sleep_time
def update_prefer_rss(self,new_prefer_rss):
"""
Updates "prefer_rss" status.
:type new_prefer_rss: bool
:param new_prefer_rss: ...
"""
DBManager().update_registry("config","preferRss",new_prefer_rss)
self.prefer_rss = new_prefer_rss
def update_use_default_app(self,new_use_default_app):
"""
Updates "use_default_app" status.
:type new_use_default_app: bool
:param new_use_default_app: ...
"""
DBManager().update_registry("config","useDefaultApp",new_use_default_app)
self.use_default_app = new_use_default_app
def update_app_path(self,new_app_path):
"""
Updates torrent application path.
:type new_app_path: str or unicode
:param new_app_path: ...
"""
DBManager().update_registry("config","appPath",new_app_path)
self.app_path = new_app_path
def update_anime_folder(self,new_anime_folder):
"""
Updates anime folder.
:type new_anime_folder: str or unicode
:param new_anime_folder: ...
"""
DBManager().update_registry("config","animeFolder",new_anime_folder)
self.anime_folder = new_anime_folder
def update_first_use(self,new_first_use):
"""
Updates "first_use" status.
:type new_first_use: bool
:param new_first_use: ...
"""
DBManager().update_registry("config","firstUse",new_first_use)
self.first_use = new_first_use
|
gpl-2.0
| 4,674,652,593,245,927,000 | 34.477099 | 179 | 0.55974 | false |
mgagne/nova
|
nova/tests/unit/virt/libvirt/fakelibvirt.py
|
1
|
34902
|
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import uuid
import fixtures
from lxml import etree
import mock
from nova.compute import arch
# Allow passing None to the various connect methods
# (i.e. allow the client to rely on default URLs)
allow_default_uri_connection = True
# string indicating the CPU arch
node_arch = arch.X86_64 # or 'i686' (or whatever else uname -m might return)
# memory size in kilobytes
node_kB_mem = 4096
# the number of active CPUs
node_cpus = 2
# expected CPU frequency
node_mhz = 800
# the number of NUMA cell, 1 for unusual NUMA topologies or uniform
# memory access; check capabilities XML for the actual NUMA topology
node_nodes = 1 # NUMA nodes
# number of CPU sockets per node if nodes > 1, total number of CPU
# sockets otherwise
node_sockets = 1
# number of cores per socket
node_cores = 2
# number of threads per core
node_threads = 1
# CPU model
node_cpu_model = "Penryn"
# CPU vendor
node_cpu_vendor = "Intel"
# Has libvirt connection been used at least once
connection_used = False
def _reset():
global allow_default_uri_connection
allow_default_uri_connection = True
# virDomainState
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_XML_SECURE = 1
VIR_DOMAIN_XML_INACTIVE = 2
VIR_DOMAIN_BLOCK_REBASE_SHALLOW = 1
VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT = 2
VIR_DOMAIN_BLOCK_REBASE_COPY = 8
VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT = 2
VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0
VIR_DOMAIN_EVENT_DEFINED = 0
VIR_DOMAIN_EVENT_UNDEFINED = 1
VIR_DOMAIN_EVENT_STARTED = 2
VIR_DOMAIN_EVENT_SUSPENDED = 3
VIR_DOMAIN_EVENT_RESUMED = 4
VIR_DOMAIN_EVENT_STOPPED = 5
VIR_DOMAIN_EVENT_SHUTDOWN = 6
VIR_DOMAIN_EVENT_PMSUSPENDED = 7
VIR_DOMAIN_UNDEFINE_MANAGED_SAVE = 1
VIR_DOMAIN_AFFECT_CURRENT = 0
VIR_DOMAIN_AFFECT_LIVE = 1
VIR_DOMAIN_AFFECT_CONFIG = 2
VIR_CPU_COMPARE_ERROR = -1
VIR_CPU_COMPARE_INCOMPATIBLE = 0
VIR_CPU_COMPARE_IDENTICAL = 1
VIR_CPU_COMPARE_SUPERSET = 2
VIR_CRED_USERNAME = 1
VIR_CRED_AUTHNAME = 2
VIR_CRED_LANGUAGE = 3
VIR_CRED_CNONCE = 4
VIR_CRED_PASSPHRASE = 5
VIR_CRED_ECHOPROMPT = 6
VIR_CRED_NOECHOPROMPT = 7
VIR_CRED_REALM = 8
VIR_CRED_EXTERNAL = 9
VIR_MIGRATE_LIVE = 1
VIR_MIGRATE_PEER2PEER = 2
VIR_MIGRATE_TUNNELLED = 4
VIR_MIGRATE_UNDEFINE_SOURCE = 16
VIR_MIGRATE_NON_SHARED_INC = 128
VIR_NODE_CPU_STATS_ALL_CPUS = -1
VIR_DOMAIN_START_PAUSED = 1
# libvirtError enums
# (Intentionally different from what's in libvirt. We do this to check,
# that consumers of the library are using the symbolic names rather than
# hardcoding the numerical values)
VIR_FROM_QEMU = 100
VIR_FROM_DOMAIN = 200
VIR_FROM_NWFILTER = 330
VIR_FROM_REMOTE = 340
VIR_FROM_RPC = 345
VIR_FROM_NODEDEV = 666
VIR_ERR_NO_SUPPORT = 3
VIR_ERR_XML_DETAIL = 350
VIR_ERR_NO_DOMAIN = 420
VIR_ERR_OPERATION_INVALID = 55
VIR_ERR_OPERATION_TIMEOUT = 68
VIR_ERR_NO_NWFILTER = 620
VIR_ERR_SYSTEM_ERROR = 900
VIR_ERR_INTERNAL_ERROR = 950
VIR_ERR_CONFIG_UNSUPPORTED = 951
VIR_ERR_NO_NODE_DEVICE = 667
VIR_ERR_NO_SECRET = 66
# Readonly
VIR_CONNECT_RO = 1
# virConnectBaselineCPU flags
VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES = 1
# snapshotCreateXML flags
VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA = 4
VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY = 16
VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
# blockCommit flags
VIR_DOMAIN_BLOCK_COMMIT_RELATIVE = 4
VIR_CONNECT_LIST_DOMAINS_ACTIVE = 1
VIR_CONNECT_LIST_DOMAINS_INACTIVE = 2
# secret type
VIR_SECRET_USAGE_TYPE_NONE = 0
VIR_SECRET_USAGE_TYPE_VOLUME = 1
VIR_SECRET_USAGE_TYPE_CEPH = 2
VIR_SECRET_USAGE_TYPE_ISCSI = 3
def _parse_disk_info(element):
disk_info = {}
disk_info['type'] = element.get('type', 'file')
disk_info['device'] = element.get('device', 'disk')
driver = element.find('./driver')
if driver is not None:
disk_info['driver_name'] = driver.get('name')
disk_info['driver_type'] = driver.get('type')
source = element.find('./source')
if source is not None:
disk_info['source'] = source.get('file')
if not disk_info['source']:
disk_info['source'] = source.get('dev')
if not disk_info['source']:
disk_info['source'] = source.get('path')
target = element.find('./target')
if target is not None:
disk_info['target_dev'] = target.get('dev')
disk_info['target_bus'] = target.get('bus')
return disk_info
class libvirtError(Exception):
"""This class was copied and slightly modified from
`libvirt-python:libvirt-override.py`.
Since a test environment will use the real `libvirt-python` version of
`libvirtError` if it's installed and not this fake, we need to maintain
strict compatibility with the original class, including `__init__` args
and instance-attributes.
To create a libvirtError instance you should:
# Create an unsupported error exception
exc = libvirtError('my message')
exc.err = (libvirt.VIR_ERR_NO_SUPPORT,)
self.err is a tuple of form:
(error_code, error_domain, error_message, error_level, str1, str2,
str3, int1, int2)
Alternatively, you can use the `make_libvirtError` convenience function to
allow you to specify these attributes in one shot.
"""
def __init__(self, defmsg, conn=None, dom=None, net=None, pool=None,
vol=None):
Exception.__init__(self, defmsg)
self.err = None
def get_error_code(self):
if self.err is None:
return None
return self.err[0]
def get_error_domain(self):
if self.err is None:
return None
return self.err[1]
def get_error_message(self):
if self.err is None:
return None
return self.err[2]
def get_error_level(self):
if self.err is None:
return None
return self.err[3]
def get_str1(self):
if self.err is None:
return None
return self.err[4]
def get_str2(self):
if self.err is None:
return None
return self.err[5]
def get_str3(self):
if self.err is None:
return None
return self.err[6]
def get_int1(self):
if self.err is None:
return None
return self.err[7]
def get_int2(self):
if self.err is None:
return None
return self.err[8]
class NWFilter(object):
def __init__(self, connection, xml):
self._connection = connection
self._xml = xml
self._parse_xml(xml)
def _parse_xml(self, xml):
tree = etree.fromstring(xml)
root = tree.find('.')
self._name = root.get('name')
def undefine(self):
self._connection._remove_filter(self)
class NodeDevice(object):
def __init__(self, connection, xml=None):
self._connection = connection
self._xml = xml
if xml is not None:
self._parse_xml(xml)
def _parse_xml(self, xml):
tree = etree.fromstring(xml)
root = tree.find('.')
self._name = root.get('name')
def attach(self):
pass
def dettach(self):
pass
def reset(self):
pass
class Domain(object):
def __init__(self, connection, xml, running=False, transient=False):
self._connection = connection
if running:
connection._mark_running(self)
self._state = running and VIR_DOMAIN_RUNNING or VIR_DOMAIN_SHUTOFF
self._transient = transient
self._def = self._parse_definition(xml)
self._has_saved_state = False
self._snapshots = {}
self._id = self._connection._id_counter
def _parse_definition(self, xml):
try:
tree = etree.fromstring(xml)
except etree.ParseError:
raise make_libvirtError(
libvirtError, "Invalid XML.",
error_code=VIR_ERR_XML_DETAIL,
error_domain=VIR_FROM_DOMAIN)
definition = {}
name = tree.find('./name')
if name is not None:
definition['name'] = name.text
uuid_elem = tree.find('./uuid')
if uuid_elem is not None:
definition['uuid'] = uuid_elem.text
else:
definition['uuid'] = str(uuid.uuid4())
vcpu = tree.find('./vcpu')
if vcpu is not None:
definition['vcpu'] = int(vcpu.text)
memory = tree.find('./memory')
if memory is not None:
definition['memory'] = int(memory.text)
os = {}
os_type = tree.find('./os/type')
if os_type is not None:
os['type'] = os_type.text
os['arch'] = os_type.get('arch', node_arch)
os_kernel = tree.find('./os/kernel')
if os_kernel is not None:
os['kernel'] = os_kernel.text
os_initrd = tree.find('./os/initrd')
if os_initrd is not None:
os['initrd'] = os_initrd.text
os_cmdline = tree.find('./os/cmdline')
if os_cmdline is not None:
os['cmdline'] = os_cmdline.text
os_boot = tree.find('./os/boot')
if os_boot is not None:
os['boot_dev'] = os_boot.get('dev')
definition['os'] = os
features = {}
acpi = tree.find('./features/acpi')
if acpi is not None:
features['acpi'] = True
definition['features'] = features
devices = {}
device_nodes = tree.find('./devices')
if device_nodes is not None:
disks_info = []
disks = device_nodes.findall('./disk')
for disk in disks:
disks_info += [_parse_disk_info(disk)]
devices['disks'] = disks_info
nics_info = []
nics = device_nodes.findall('./interface')
for nic in nics:
nic_info = {}
nic_info['type'] = nic.get('type')
mac = nic.find('./mac')
if mac is not None:
nic_info['mac'] = mac.get('address')
source = nic.find('./source')
if source is not None:
if nic_info['type'] == 'network':
nic_info['source'] = source.get('network')
elif nic_info['type'] == 'bridge':
nic_info['source'] = source.get('bridge')
nics_info += [nic_info]
devices['nics'] = nics_info
definition['devices'] = devices
return definition
def create(self):
self.createWithFlags(0)
def createWithFlags(self, flags):
# FIXME: Not handling flags at the moment
self._state = VIR_DOMAIN_RUNNING
self._connection._mark_running(self)
self._has_saved_state = False
def isActive(self):
return int(self._state == VIR_DOMAIN_RUNNING)
def undefine(self):
self._connection._undefine(self)
def undefineFlags(self, flags):
self.undefine()
if flags & VIR_DOMAIN_UNDEFINE_MANAGED_SAVE:
if self.hasManagedSaveImage(0):
self.managedSaveRemove()
def destroy(self):
self._state = VIR_DOMAIN_SHUTOFF
self._connection._mark_not_running(self)
def ID(self):
return self._id
def name(self):
return self._def['name']
def UUIDString(self):
return self._def['uuid']
def interfaceStats(self, device):
return [10000242400, 1234, 0, 2, 213412343233, 34214234, 23, 3]
def blockStats(self, device):
return [2, 10000242400, 234, 2343424234, 34]
def suspend(self):
self._state = VIR_DOMAIN_PAUSED
def shutdown(self):
self._state = VIR_DOMAIN_SHUTDOWN
self._connection._mark_not_running(self)
def reset(self, flags):
# FIXME: Not handling flags at the moment
self._state = VIR_DOMAIN_RUNNING
self._connection._mark_running(self)
def info(self):
return [self._state,
long(self._def['memory']),
long(self._def['memory']),
self._def['vcpu'],
123456789L]
def migrateToURI(self, desturi, flags, dname, bandwidth):
raise make_libvirtError(
libvirtError,
"Migration always fails for fake libvirt!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def migrateToURI2(self, dconnuri, miguri, dxml, flags, dname, bandwidth):
raise make_libvirtError(
libvirtError,
"Migration always fails for fake libvirt!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def attachDevice(self, xml):
disk_info = _parse_disk_info(etree.fromstring(xml))
disk_info['_attached'] = True
self._def['devices']['disks'] += [disk_info]
return True
def attachDeviceFlags(self, xml, flags):
if (flags & VIR_DOMAIN_AFFECT_LIVE and
self._state != VIR_DOMAIN_RUNNING):
raise make_libvirtError(
libvirtError,
"AFFECT_LIVE only allowed for running domains!",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
self.attachDevice(xml)
def detachDevice(self, xml):
disk_info = _parse_disk_info(etree.fromstring(xml))
disk_info['_attached'] = True
return disk_info in self._def['devices']['disks']
def detachDeviceFlags(self, xml, _flags):
self.detachDevice(xml)
def XMLDesc(self, flags):
disks = ''
for disk in self._def['devices']['disks']:
disks += '''<disk type='%(type)s' device='%(device)s'>
<driver name='%(driver_name)s' type='%(driver_type)s'/>
<source file='%(source)s'/>
<target dev='%(target_dev)s' bus='%(target_bus)s'/>
<address type='drive' controller='0' bus='0' unit='0'/>
</disk>''' % disk
nics = ''
for nic in self._def['devices']['nics']:
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
<source %(type)s='%(source)s'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x0'/>
</interface>''' % nic
return '''<domain type='kvm'>
<name>%(name)s</name>
<uuid>%(uuid)s</uuid>
<memory>%(memory)s</memory>
<currentMemory>%(memory)s</currentMemory>
<vcpu>%(vcpu)s</vcpu>
<os>
<type arch='%(arch)s' machine='pc-0.12'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='localtime'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
<emulator>/usr/bin/kvm</emulator>
%(disks)s
<controller type='ide' index='0'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01'
function='0x1'/>
</controller>
%(nics)s
<serial type='file'>
<source path='dummy.log'/>
<target port='0'/>
</serial>
<serial type='pty'>
<source pty='/dev/pts/27'/>
<target port='1'/>
</serial>
<serial type='tcp'>
<source host="-1" service="-1" mode="bind"/>
</serial>
<console type='file'>
<source path='dummy.log'/>
<target port='0'/>
</console>
<input type='tablet' bus='usb'/>
<input type='mouse' bus='ps2'/>
<graphics type='vnc' port='-1' autoport='yes'/>
<graphics type='spice' port='-1' autoport='yes'/>
<video>
<model type='cirrus' vram='9216' heads='1'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02'
function='0x0'/>
</video>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04'
function='0x0'/>
</memballoon>
</devices>
</domain>''' % {'name': self._def['name'],
'uuid': self._def['uuid'],
'memory': self._def['memory'],
'vcpu': self._def['vcpu'],
'arch': self._def['os']['arch'],
'disks': disks,
'nics': nics}
def managedSave(self, flags):
self._connection._mark_not_running(self)
self._has_saved_state = True
def managedSaveRemove(self, flags):
self._has_saved_state = False
def hasManagedSaveImage(self, flags):
return int(self._has_saved_state)
def resume(self):
self._state = VIR_DOMAIN_RUNNING
def snapshotCreateXML(self, xml, flags):
tree = etree.fromstring(xml)
name = tree.find('./name').text
snapshot = DomainSnapshot(name, self)
self._snapshots[name] = snapshot
return snapshot
def vcpus(self):
vcpus = ([], [])
for i in range(0, self._def['vcpu']):
vcpus[0].append((i, 1, 120405L, i))
vcpus[1].append((True, True, True, True))
return vcpus
def memoryStats(self):
return {}
def maxMemory(self):
return self._def['memory']
def blockJobInfo(self, disk, flags):
return {}
class DomainSnapshot(object):
def __init__(self, name, domain):
self._name = name
self._domain = domain
def delete(self, flags):
del self._domain._snapshots[self._name]
class Connection(object):
def __init__(self, uri=None, readonly=False, version=9011):
if not uri or uri == '':
if allow_default_uri_connection:
uri = 'qemu:///session'
else:
raise ValueError("URI was None, but fake libvirt is "
"configured to not accept this.")
uri_whitelist = ['qemu:///system',
'qemu:///session',
'lxc:///', # from LibvirtDriver.uri()
'xen:///', # from LibvirtDriver.uri()
'uml:///system',
'test:///default',
'parallels:///system']
if uri not in uri_whitelist:
raise make_libvirtError(
libvirtError,
"libvirt error: no connection driver "
"available for No connection for URI %s" % uri,
error_code=5, error_domain=0)
self.readonly = readonly
self._uri = uri
self._vms = {}
self._running_vms = {}
self._id_counter = 1 # libvirt reserves 0 for the hypervisor.
self._nwfilters = {}
self._nodedevs = {}
self._event_callbacks = {}
self.fakeLibVersion = version
self.fakeVersion = version
def _add_filter(self, nwfilter):
self._nwfilters[nwfilter._name] = nwfilter
def _remove_filter(self, nwfilter):
del self._nwfilters[nwfilter._name]
def _add_nodedev(self, nodedev):
self._nodedevs[nodedev._name] = nodedev
def _remove_nodedev(self, nodedev):
del self._nodedevs[nodedev._name]
def _mark_running(self, dom):
self._running_vms[self._id_counter] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
self._id_counter += 1
def _mark_not_running(self, dom):
if dom._transient:
self._undefine(dom)
dom._id = -1
for (k, v) in self._running_vms.iteritems():
if v == dom:
del self._running_vms[k]
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STOPPED, 0)
return
def _undefine(self, dom):
del self._vms[dom.name()]
if not dom._transient:
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_UNDEFINED, 0)
def getInfo(self):
return [node_arch,
node_kB_mem,
node_cpus,
node_mhz,
node_nodes,
node_sockets,
node_cores,
node_threads]
def numOfDomains(self):
return len(self._running_vms)
def listDomainsID(self):
return self._running_vms.keys()
def lookupByID(self, id):
if id in self._running_vms:
return self._running_vms[id]
raise make_libvirtError(
libvirtError,
'Domain not found: no domain with matching id %d' % id,
error_code=VIR_ERR_NO_DOMAIN,
error_domain=VIR_FROM_QEMU)
def lookupByName(self, name):
if name in self._vms:
return self._vms[name]
raise make_libvirtError(
libvirtError,
'Domain not found: no domain with matching name "%s"' % name,
error_code=VIR_ERR_NO_DOMAIN,
error_domain=VIR_FROM_QEMU)
def listAllDomains(self, flags):
vms = []
for vm in self._vms:
if flags & VIR_CONNECT_LIST_DOMAINS_ACTIVE:
if vm.state != VIR_DOMAIN_SHUTOFF:
vms.append(vm)
if flags & VIR_CONNECT_LIST_DOMAINS_INACTIVE:
if vm.state == VIR_DOMAIN_SHUTOFF:
vms.append(vm)
return vms
def _emit_lifecycle(self, dom, event, detail):
if VIR_DOMAIN_EVENT_ID_LIFECYCLE not in self._event_callbacks:
return
cbinfo = self._event_callbacks[VIR_DOMAIN_EVENT_ID_LIFECYCLE]
callback = cbinfo[0]
opaque = cbinfo[1]
callback(self, dom, event, detail, opaque)
def defineXML(self, xml):
dom = Domain(connection=self, running=False, transient=False, xml=xml)
self._vms[dom.name()] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_DEFINED, 0)
return dom
def createXML(self, xml, flags):
dom = Domain(connection=self, running=True, transient=True, xml=xml)
self._vms[dom.name()] = dom
self._emit_lifecycle(dom, VIR_DOMAIN_EVENT_STARTED, 0)
return dom
def getType(self):
if self._uri == 'qemu:///system':
return 'QEMU'
def getLibVersion(self):
return self.fakeLibVersion
def getVersion(self):
return self.fakeVersion
def getHostname(self):
return 'compute1'
def domainEventRegisterAny(self, dom, eventid, callback, opaque):
self._event_callbacks[eventid] = [callback, opaque]
def registerCloseCallback(self, cb, opaque):
pass
def getCapabilities(self):
"""Return spoofed capabilities."""
return '''<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
<feature name='tm2'/>
<feature name='est'/>
<feature name='vmx'/>
<feature name='ds_cpl'/>
<feature name='monitor'/>
<feature name='pbe'/>
<feature name='tm'/>
<feature name='ht'/>
<feature name='ss'/>
<feature name='acpi'/>
<feature name='ds'/>
<feature name='vme'/>
</cpu>
<migration_features>
<live/>
<uri_transports>
<uri_transport>tcp</uri_transport>
</uri_transports>
</migration_features>
<secmodel>
<model>apparmor</model>
<doi>0</doi>
</secmodel>
</host>
<guest>
<os_type>hvm</os_type>
<arch name='i686'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
<domain type='qemu'>
</domain>
<domain type='kvm'>
<emulator>/usr/bin/kvm</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<pae/>
<nonpae/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='x86_64'>
<wordsize>64</wordsize>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
<domain type='qemu'>
</domain>
<domain type='kvm'>
<emulator>/usr/bin/kvm</emulator>
<machine>pc-0.14</machine>
<machine canonical='pc-0.14'>pc</machine>
<machine>pc-0.13</machine>
<machine>pc-0.12</machine>
<machine>pc-0.11</machine>
<machine>pc-0.10</machine>
<machine>isapc</machine>
</domain>
</arch>
<features>
<cpuselection/>
<deviceboot/>
<acpi default='on' toggle='yes'/>
<apic default='on' toggle='no'/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='armv7l'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-arm</emulator>
<machine>integratorcp</machine>
<machine>vexpress-a9</machine>
<machine>syborg</machine>
<machine>musicpal</machine>
<machine>mainstone</machine>
<machine>n800</machine>
<machine>n810</machine>
<machine>n900</machine>
<machine>cheetah</machine>
<machine>sx1</machine>
<machine>sx1-v1</machine>
<machine>beagle</machine>
<machine>beaglexm</machine>
<machine>tosa</machine>
<machine>akita</machine>
<machine>spitz</machine>
<machine>borzoi</machine>
<machine>terrier</machine>
<machine>connex</machine>
<machine>verdex</machine>
<machine>lm3s811evb</machine>
<machine>lm3s6965evb</machine>
<machine>realview-eb</machine>
<machine>realview-eb-mpcore</machine>
<machine>realview-pb-a8</machine>
<machine>realview-pbx-a9</machine>
<machine>versatilepb</machine>
<machine>versatileab</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='mips'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-mips</emulator>
<machine>malta</machine>
<machine>mipssim</machine>
<machine>magnum</machine>
<machine>pica61</machine>
<machine>mips</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='mipsel'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-mipsel</emulator>
<machine>malta</machine>
<machine>mipssim</machine>
<machine>magnum</machine>
<machine>pica61</machine>
<machine>mips</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='sparc'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-sparc</emulator>
<machine>SS-5</machine>
<machine>leon3_generic</machine>
<machine>SS-10</machine>
<machine>SS-600MP</machine>
<machine>SS-20</machine>
<machine>Voyager</machine>
<machine>LX</machine>
<machine>SS-4</machine>
<machine>SPARCClassic</machine>
<machine>SPARCbook</machine>
<machine>SS-1000</machine>
<machine>SS-2000</machine>
<machine>SS-2</machine>
<domain type='qemu'>
</domain>
</arch>
</guest>
<guest>
<os_type>hvm</os_type>
<arch name='ppc'>
<wordsize>32</wordsize>
<emulator>/usr/bin/qemu-system-ppc</emulator>
<machine>g3beige</machine>
<machine>virtex-ml507</machine>
<machine>mpc8544ds</machine>
<machine canonical='bamboo-0.13'>bamboo</machine>
<machine>bamboo-0.13</machine>
<machine>bamboo-0.12</machine>
<machine>ref405ep</machine>
<machine>taihu</machine>
<machine>mac99</machine>
<machine>prep</machine>
<domain type='qemu'>
</domain>
</arch>
<features>
<deviceboot/>
</features>
</guest>
</capabilities>'''
def compareCPU(self, xml, flags):
tree = etree.fromstring(xml)
arch_node = tree.find('./arch')
if arch_node is not None:
if arch_node.text not in [arch.X86_64,
arch.I686]:
return VIR_CPU_COMPARE_INCOMPATIBLE
model_node = tree.find('./model')
if model_node is not None:
if model_node.text != node_cpu_model:
return VIR_CPU_COMPARE_INCOMPATIBLE
vendor_node = tree.find('./vendor')
if vendor_node is not None:
if vendor_node.text != node_cpu_vendor:
return VIR_CPU_COMPARE_INCOMPATIBLE
# The rest of the stuff libvirt implements is rather complicated
# and I don't think it adds much value to replicate it here.
return VIR_CPU_COMPARE_IDENTICAL
def getCPUStats(self, cpuNum, flag):
if cpuNum < 2:
return {'kernel': 5664160000000L,
'idle': 1592705190000000L,
'user': 26728850000000L,
'iowait': 6121490000000L}
else:
raise make_libvirtError(
libvirtError,
"invalid argument: Invalid cpu number",
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
def nwfilterLookupByName(self, name):
try:
return self._nwfilters[name]
except KeyError:
raise make_libvirtError(
libvirtError,
"no nwfilter with matching name %s" % name,
error_code=VIR_ERR_NO_NWFILTER,
error_domain=VIR_FROM_NWFILTER)
def nwfilterDefineXML(self, xml):
nwfilter = NWFilter(self, xml)
self._add_filter(nwfilter)
def nodeDeviceLookupByName(self, name):
try:
return self._nodedevs[name]
except KeyError:
raise make_libvirtError(
libvirtError,
"no nodedev with matching name %s" % name,
error_code=VIR_ERR_NO_NODE_DEVICE,
error_domain=VIR_FROM_NODEDEV)
def listDefinedDomains(self):
return []
def listDevices(self, cap, flags):
return []
def baselineCPU(self, cpu, flag):
"""Add new libvirt API."""
return """<cpu mode='custom' match='exact'>
<model>Penryn</model>
<vendor>Intel</vendor>
<feature name='xtpr'/>
<feature name='tm2'/>
<feature name='est'/>
<feature name='vmx'/>
<feature name='ds_cpl'/>
<feature name='monitor'/>
<feature name='pbe'/>
<feature name='tm'/>
<feature name='ht'/>
<feature name='ss'/>
<feature name='acpi'/>
<feature name='ds'/>
<feature name='vme'/>
<feature policy='require' name='aes'/>
</cpu>"""
def secretLookupByUsage(self, usage_type_obj, usage_id):
pass
def secretDefineXML(self, xml):
pass
def openAuth(uri, auth, flags):
if type(auth) != list:
raise Exception("Expected a list for 'auth' parameter")
if type(auth[0]) != list:
raise Exception("Expected a function in 'auth[0]' parameter")
if not callable(auth[1]):
raise Exception("Expected a function in 'auth[1]' parameter")
return Connection(uri, (flags == VIR_CONNECT_RO))
def virEventRunDefaultImpl():
time.sleep(1)
def virEventRegisterDefaultImpl():
if connection_used:
raise Exception("virEventRegisterDefaultImpl() must be "
"called before connection is used.")
def registerErrorHandler(handler, ctxt):
pass
def make_libvirtError(error_class, msg, error_code=None,
error_domain=None, error_message=None,
error_level=None, str1=None, str2=None, str3=None,
int1=None, int2=None):
"""Convenience function for creating `libvirtError` exceptions which
allow you to specify arguments in constructor without having to manipulate
the `err` tuple directly.
We need to pass in `error_class` to this function because it may be
`libvirt.libvirtError` or `fakelibvirt.libvirtError` depending on whether
`libvirt-python` is installed.
"""
exc = error_class(msg)
exc.err = (error_code, error_domain, error_message, error_level,
str1, str2, str3, int1, int2)
return exc
virDomain = Domain
virNodeDevice = NodeDevice
virConnect = Connection
class FakeLibvirtFixture(fixtures.Fixture):
"""This fixture patches the libvirt.openAuth method so that it
always returns an instance of fakelibvirt.virConnect. This
ensures the tests don't mistakenly connect to a real libvirt
daemon instance which would lead to non-deterministic behaviour.
"""
def setUp(self):
super(FakeLibvirtFixture, self).setUp()
try:
import libvirt
patcher = mock.patch.object(
libvirt, "openAuth",
return_value=virConnect("qemu:///system"))
patcher.start()
self.addCleanup(patcher.stop)
except ImportError:
# If we can't import libvirt, the tests will use
# fakelibvirt regardless, so nothing todo here
pass
|
apache-2.0
| 8,469,707,087,257,902,000 | 28.036606 | 78 | 0.577388 | false |
impredicative/gcam
|
src/__main__.py
|
1
|
54173
|
#!/usr/bin/env python3
# GPFS Current Activity Monitor
# Run with -h to print help and allowable arguments.
# See params.py for more customizations.
import argparse, collections, curses, datetime, functools, inspect
import itertools, locale, logging, signal, subprocess, sys, threading, time
# Local imports
import common, errors, params
from prettytable import PrettyTable
from numsort import numsorted # Uses "@functools.lru_cache(maxsize=None)"
class ArgParser(argparse.ArgumentParser):
"""Parse and store input arguments. Arguments on the command line override
those in the parameters file."""
def __init__(self):
epilog = "Pressing the '{}' key pauses or resumes the display.".format(
params.DISPLAY_PAUSE_KEY)
super().__init__(description=params._PROGRAM_NAME, epilog=epilog,
prog=params._PROGRAM_NAME_SHORT)
self._add_misc_args()
self._add_logging_args()
self._args = self.parse_args()
self._store_misc_args()
self._store_logging_args()
def _add_misc_args(self):
"""Add miscellaneous arguments to parser."""
self.add_argument('-hn', default=params.MMPMON_HOST,
help='GPFS node name on which to run mmpmon '
'(requires automated SSH login if not '
'localhost) (currently: %(default)s)')
self.add_argument('-n', type=float,
default=params.MONITORING_INTERVAL_SECS,
help='Refresh interval in seconds (%(type)s >=1) '
'(currently: %(default)s)')
nodeset = params.GPFS_NODESET or '(first available)'
self.add_argument('-ns', default=params.GPFS_NODESET,
help='GPFS nodeset (currently: {})'.format(nodeset))
self.add_argument('-t', default=params.TABLE_TYPE, choices=('s', 'i'),
help="Table type ('s'eparated or 'i'nterlaced) "
'(currently: %(default)s)')
def _add_logging_args(self):
"""Add logging arguments to parser."""
arg_group = self.add_argument_group(title='Diagnostic logging arguments')
logging_status = 'enabled' if params.LOG_FILE_WRITE else 'disabled'
arg_group.add_argument('-l', action='store_true',
default=params.LOG_FILE_WRITE,
help='Enable logging to file '
'(currently: {})'.format(logging_status))
arg_group.add_argument('-lf', default=params.LOG_FILE_PATH,
help='Log file path (if logging is enabled) '
"(currently: '%(default)s')")
# type=argparse.FileType('w') is not specified because its value is
# automatically touched as a file. This is undesirable if -l is not
# specified, etc.
arg_group.add_argument('-ll', default=params.LOG_LEVEL,
choices=('i', 'd'),
help='Log level (if logging is enabled) '
"('i'nfo or 'd'ebug) "
'(currently: %(default)s)')
def _store_misc_args(self):
"""Store parsed miscellaneous arguments."""
params.MMPMON_HOST = self._args.hn
params.MONITORING_INTERVAL_SECS = max(self._args.n, 1)
params.GPFS_NODESET = self._args.ns
if self._args.t == 's': params.TABLE_TYPE = 'separated'
elif self._args.t == 'i': params.TABLE_TYPE = 'interlaced'
def _store_logging_args(self):
"""Store parsed logging arguments."""
params.LOG_FILE_WRITE = self._args.l
params.LOG_FILE_PATH = self._args.lf
if self._args.ll == 'i': params.LOG_LEVEL = 'info'
elif self._args.ll == 'd': params.LOG_LEVEL = 'debug'
class DiagnosticLoggerSetup:
"""Set up a logger to which diagnostic messages can be logged."""
def __init__(self):
self.logger = logging.getLogger(params._PROGRAM_NAME_SHORT)
self._configure()
if params.LOG_FILE_WRITE:
self._log_basics()
self._log_params()
def _configure(self):
"""Configure the logger with a level and a formatted handler."""
if params.LOG_FILE_WRITE:
# Set level
level = getattr(logging, params.LOG_LEVEL.upper())
self.logger.setLevel(level)
# Create formatter
attributes = ('asctime', 'levelname', 'module', 'lineno',
'threadName', 'message')
attributes = ['%({})s'.format(a) for a in attributes]
attributes.insert(0, '')
format_ = '::'.join(attributes)
formatter = logging.Formatter(format_)
# Add handler
handler = logging.FileHandler(params.LOG_FILE_PATH, mode='w')
handler.setFormatter(formatter)
else:
handler = logging.NullHandler
self.logger.addHandler(handler)
def _log_basics(self):
"""Retrieve and log basics about the operating system, environment,
platform, program input arguments, and the Python installation in
use."""
import os, platform, sys #@UnusedImport @Reimport
items = ('os.name',
'os.getcwd()',
'os.ctermid()',
'os.getlogin()',
"os.getenv('USER')",
"os.getenv('DISPLAY')",
"os.getenv('LANG')",
"os.getenv('TERM')",
"os.getenv('SHELL')",
"os.getenv('HOSTNAME')",
"os.getenv('PWD')",
'os.uname()',
'platform.architecture()',
'platform.machine()',
'platform.node()',
'platform.platform()',
'platform.processor()',
'platform.python_build()',
'platform.python_compiler()',
'platform.python_implementation()',
'platform.python_revision()',
'platform.python_version_tuple()',
'platform.release()',
'platform.system()',
'platform.version()',
'platform.uname()',
'platform.dist()',
'sys.argv',
'sys.executable',
'sys.flags',
'sys.path',
'sys.platform',
'sys.version',
'sys.version_info',
)
# Run above-mentioned code and log the respective outputs
for source in items:
value = str(eval(source)).replace('\n', ' ')
message = '{}::{}'.format(source, value)
self.logger.info(message)
def _log_params(self):
"""Log the names and values of all parameters."""
for item in dir(params):
if (not item.startswith('__')) and (item == item.upper()):
value = str(getattr(params, item)).replace('\n', ' ')
message = 'params.{}::{}'.format(item, value)
self.logger.info(message)
class Logger:
"""
Provide a base class to provision logging functionality.
Instances of derived classes can log messages using methods self.logger and
self.logvar.
"""
logger = logging.getLogger(params._PROGRAM_NAME_SHORT)
def logvar(self, var_str, level='info'):
"""Log the provided variable's access string and value, and also its
class and method names at the optionally indicated log level.
The variable can be a local variable. Alternatively, if accessed using
the 'self.' prefix, it can be a class instance variable or otherwise a
class variable.
"""
# Inspect the stack
stack = inspect.stack()
try:
# Obtain class and method names
class_name = stack[1][0].f_locals['self'].__class__.__name__
method_name = stack[1][3]
# Obtain variable value
if not var_str.startswith('self.'):
# Assuming local variable
var_val = stack[1][0].f_locals[var_str]
else:
var_name = var_str[5:] # len('self.') = 5
try:
# Assuming class instance variable
var_val = stack[1][0].f_locals['self'].__dict__[var_name]
except KeyError:
# Assuming class variable
var_val = (stack[1][0].f_locals['self'].__class__.__dict__
[var_name])
finally:
del stack # Recommended.
# See http://docs.python.org/py3k/library/inspect.html#the-interpreter-stack
# Format and log the message
message = '{}.{}::{}::{}'.format(class_name, method_name, var_str,
var_val)
level = getattr(logging, level.upper())
self.logger.log(level, message)
class Receiver(Logger):
"""Return an iterable containing mmpmon fs_io_s recordset containing
records for all responding nodes and file systems."""
def __iter__(self):
return self._fsios_record_group_objectifier()
def close(self):
"""
Close the subprocess providing data to the iterator.
This must be used if an unhandled exception occurs.
"""
try: self._mmpmon_subprocess.terminate()
except AttributeError: pass
@staticmethod
def _process_cmd_args(cmd_args):
"""Return command line arguments conditionally modified to run on
remote host."""
if params.MMPMON_HOST not in ('localhost', 'localhost.localdomain',
'127.0.0.1'):
cmd_args = params.SSH_ARGS + [params.MMPMON_HOST] + cmd_args
return cmd_args
@property
def node_seq(self):
"""Return a sequence of strings with names of all GPFS nodes in the
specified nodeset."""
try:
return self._node_seq
except AttributeError:
cmd_args = [r'/usr/lpp/mmfs/bin/mmlsnode']
cmd_args = self._process_cmd_args(cmd_args)
self.logvar('cmd_args')
try:
output = subprocess.check_output(cmd_args)
except (OSError, subprocess.CalledProcessError) as exception:
# OSError example:
# [Errno 2] No such file or directory:
# '/usr/lpp/mmfs/bin/mmlsnode'
# subprocess.CalledProcessError example:
# Command '['ssh', '-o', 'BatchMode=yes', '-o',
# 'ConnectTimeout=4', 'invalidhost',
# '/usr/lpp/mmfs/bin/mmlsnode']' returned non-zero exit
# status 255
raise errors.SubprocessError(str(exception))
output = output.split(b'\n')[2:-1]
# Extract node names for relevant nodeset only
for line in output:
line = line.decode()
node_set, node_seq = line.split(None, 1)
if ((not params.GPFS_NODESET) or
(node_set == params.GPFS_NODESET)):
node_seq = node_seq.split()
node_seq.sort() # possibly useful if viewing logs
self._node_seq = node_seq
return self._node_seq
else:
if params.GPFS_NODESET:
err = '{} is not a valid nodeset per mmlsnode'.format(
params.GPFS_NODESET)
else:
err = 'no nodeset could be found using mmlsnode'
raise errors.ArgumentError(err)
@property
def num_node(self):
"""Return the number of GPFS nodes in the specified nodeset."""
try:
return self._num_node
except AttributeError:
if not params.DEBUG_MODE:
self._num_node = len(self.node_seq)
else:
self._num_node = len(params.DEBUG_NODES)
return self._num_node
def _mmpmon_caller(self):
"""Run and prepare the mmpmon subprocess to output data."""
# Determine input arguments
delay = str(int(params.MONITORING_INTERVAL_SECS * 1000))
# int above removes decimals
runs = '0' if not params.DEBUG_MODE else str(params.DEBUG_MMPMON_RUNS)
# Determine mmpmon command
cmd_args = [r'/usr/lpp/mmfs/bin/mmpmon', '-p', '-s', '-r', runs,
'-d', delay]
cmd_args = self._process_cmd_args(cmd_args)
self.logvar('cmd_args')
# Determine input commands to mmpmon process
node_seq = params.DEBUG_NODES if params.DEBUG_MODE else self.node_seq
for node in node_seq: self.logvar('node') #@UnusedVariable
mmpmon_inputs = ('nlist add {}'.format(node) for node in node_seq)
# While multiple nodes can be added using the same nlist command,
# this apparently restricts the number of nodes added to 98 per
# nlist command. Due to this restriction, only one node is added
# per command instead.
mmpmon_inputs = itertools.chain(mmpmon_inputs, ('fs_io_s',))
# Call subprocess, and provide it with relevant commands
self._mmpmon_subprocess = subprocess.Popen(cmd_args,
bufsize=-1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for mmpmon_input in mmpmon_inputs:
mmpmon_input = mmpmon_input.encode() + b'\n'
self.logvar('mmpmon_input', 'debug')
self._mmpmon_subprocess.stdin.write(mmpmon_input)
self._mmpmon_subprocess.stdin.close() # this also does flush()
def _mmpmon_stdout_processor(self):
"""Yield lines of text returned by mmpmon."""
self._mmpmon_caller()
# Handle possible known error message
line = next(self._mmpmon_subprocess.stdout);
line = self._mmpmon_line_processor(line)
if line == 'Could not establish connection to file system daemon.':
err_msg = ('Only a limited number of mmpmon processes can be run '
'simultaneously on a host. Kill running instances of '
'this application that are no longer needed. Also kill '
'unnecessary existing mmpmon processes on MMPMON_HOST, '
'i.e. {}.').format(params.MMPMON_HOST)
# A max of 5 mmpmon processes were observed to run simultaneously
# on a host.
raise errors.SubprocessError(err_msg)
# Yield mmpmon output
yield line # (obtained earlier)
for line in self._mmpmon_subprocess.stdout:
yield self._mmpmon_line_processor(line)
def _mmpmon_line_processor(self, line):
"""Return a formatted version of a line returned by mmpmon, so it can
be used for further processing."""
line = line.decode().rstrip()
if params.LOG_FILE_WRITE and \
(self.logger.getEffectiveLevel() <= logging.DEBUG) and \
params.LOG_NUM_MMPMON_LINES:
# Note: It is uncertain whether grouping the above conditions into
# a single tuple will result in short-circuit evaluation.
params.LOG_NUM_MMPMON_LINES -= 1
self.logvar('line', 'debug') # CPU and disk intensive
# else:
# # Simplify method definition to avoid the now unnecessary check
# self._mmpmon_line_processor = lambda line: line.decode().rstrip()
return line
def _record_processor(self):
"""Yield dicts corresponding to lines returned by mmpmon."""
for record in self._mmpmon_stdout_processor():
record = record.split()
type_ = record[0][1:-1]
properties = {k[1:-1]: v for k, v in common.grouper(2, record[1:])}
record = {'type':type_, 'properties':properties}
yield record
def _fsios_record_filter(self):
"""Yield only fs_io_s records along with their group number."""
# Yield records with their group number
counter = itertools.count(start=1)
for r in self._record_processor():
if (r['type'] == 'fs_io_s' and r['properties']['rc'] == '0'):
r['properties']['gn'] = count #@UndefinedVariable
yield r['properties']
elif (r['type'] == 'nlist' and 'c' in r['properties']):
count = next(counter) #@UnusedVariable
def _fsios_record_objectifier(self):
"""Yield fs_io_s record dicts as Record objects."""
return (Record(record) for record in self._fsios_record_filter())
def _fsios_record_grouper(self):
"""Yield fs_io_s records grouped into a sequence based on their
creation time.
"""
# Group records that were created approximately simultaneously, i.e.
# with the same group number
record_group_iterator = itertools.groupby(
self._fsios_record_objectifier(),
lambda r: r.gn)
# Sort records in each group, and yield groups
for _, record_group in record_group_iterator: # _ = record_group_num
record_group = list(record_group)
# converting from iterator to list, to allow list to be sorted
# later.
for i in range(len(record_group)): del record_group[i].gn
record_group.sort(key = lambda r: (r.nn, r.fs))
# sorting to allow further grouping by nn
# "lambda r: operator.itemgetter('nn', 'fs')(r)"
# may work alternatively
yield record_group
def _fsios_record_group_objectifier(self):
"""Yield fs_io_s record group sequences as RecordGroup objects."""
return (RecordGroup(record_group) for record_group in
self._fsios_record_grouper())
class Record:
"""Return a record object with attributes
fs, gn, nn, ts, fs, br, bw, brw.
"""
_filter_in_keys = {'gn', 'nn', 't', 'tu', 'fs', 'br', 'bw'}
_non_int_keys = {'gn', 'nn', 'fs'}
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
setattr(self, key, value)
def __str__(self):
return str(self.__dict__)
def __init__(self, fsios_dict):
fsios_dict = self._process(fsios_dict)
self.__dict__.update(fsios_dict)
def _process(self, dict_):
"""Return the processed record dict."""
# Filter out keys that are not needed
dict_ = {key : dict_[key] for key in self._filter_in_keys}
# Convert integer values from str to int
for key in dict_:
if key not in self._non_int_keys:
dict_[key] = int(dict_[key])
# Combine seconds and microseconds
dict_['ts'] = dict_['t'] + dict_['tu']/1000000 # ts = timestamp
for key in ['t', 'tu']: del dict_[key]
# Calculate sum of bytes read and bytes written
dict_['brw'] = dict_['br'] + dict_['bw']
return dict_
def __sub__(self, older): # self is newer
return RecordDelta(self, older)
class RecordDelta(Record):
"""Return a record delta object computed from two successive records.
Included attributes are fs, nn, ts, td, br, bw, brw, brps, bwps, brwps.
"""
# Inheriting from Record allows its functions __getitem__, __setitem__ and
# __str__ to be used.
def __init__(self, new, old):
assert new.fs == old.fs and new.nn == old.nn
# Set identifying attribute values
for attr in ('fs', 'nn', 'ts'):
self[attr] = new[attr]
self._compute_deltas_and_speeds(new, old)
def _compute_deltas_and_speeds(self, new, old):
"""Compute transfer deltas and speeds."""
self.td = new.ts - old.ts # td = time delta
for attr in ('br', 'bw', 'brw'):
self[attr] = (new[attr] - old[attr]) % 18446744073709551615
# 18446744073709551615 == (2**64 - 1)
self[attr + 'ps'] = self[attr] / self.td
# (speed is in bytes per second)
#delattr(self, attr)
# If the above delattr line is uncommented, then
# RecordGroupDelta._lev2_summary_stat_types should not contain
# br, bw, brw.
class RecordGroup:
"""Return a record group object from a sequence of Record objects. Stats
are available as attributes recs, lev1_summary_stats, and
lev2_summary_stats. Timestamp is available as attribute timestamp.
"""
_count = 0
_lev1_summary_stat_types = ('nn', 'fs') # (totals)
_lev2_summary_stat_types = ('br', 'bw', 'brw') # (grand totals)
def count(self):
"""Increment class instance count."""
self.__class__._count += 1
self._count = self.__class__._count # (makes copy as is desired)
def __init__(self, recs):
self.count()
self.recs = recs
self.timestamp = max((datetime.datetime.fromtimestamp(rec.ts) for
rec in self.recs))
#self.compute_summary_stats()
# not necessary, except for debugging these values
def rec_select(self, nn, fs):
"""Return the record for the given node name and file system. None is
returned if the record is not found. Note that iterating over records
using this approach approximately has a complexity of O(n**2).
"""
for rec in self.recs:
if nn == rec.nn and fs == rec.fs:
return rec
def compute_summary_stats(self):
"""Compute summary stats for records, and store them in
self.lev1_summary_stats and self.lev2_summary_stats."""
self.lev1_summary_stats = {}
self.lev2_summary_stats = {}
# Compute level 1 summary stats
for lev1_stattype in self._lev1_summary_stat_types:
seq = [rec[lev1_stattype] for rec in self.recs]
self._compute_lev1_summary_stats(lev1_stattype, seq)
# Compute level 2 summary stats
for lev2_stattype in self._lev2_summary_stat_types:
self.lev2_summary_stats[lev2_stattype] = sum(rec[lev2_stattype] for
rec in self.recs)
def _compute_lev1_summary_stats(self, lev1_stattype, seq):
"""Compute level 1 summary stats, grouped by items in
self._lev1_summary_stat_types.
"""
self.lev1_summary_stats[lev1_stattype] = {}
for i in seq:
curr_summary_stats = {j:0 for j in self._lev2_summary_stat_types}
for rec in self.recs: # can possibly use itertools for efficiency
if i == rec[lev1_stattype]:
for stat in curr_summary_stats.keys():
curr_summary_stats[stat] += rec[stat]
self.lev1_summary_stats[lev1_stattype][i] = curr_summary_stats
@staticmethod
def _sso(seq, tabs=0):
"""Return an informal String representation for the given Sequence of
Objects.
"""
tabs = '\t' * tabs
if isinstance(seq, dict): seq = sorted(seq.items())
strs = ('\n{}{}'.format(tabs, obj) for obj in seq)
str_ = ''.join(strs)
return str_
def __str__(self):
str_ = '{} (#{}) (as of {}):\n'.format(self.__class__.__name__,
self._count, self.timestamp)
# Try storing string for summary stats
sss = lambda k: '\t\tSummarized by {}:{}'.format(k,
self._sso(self.lev1_summary_stats[k], 3))
try:
lev1_summary_stats = (sss(k) for k in self.lev1_summary_stats)
lev1_summary_stats_str = '\n'.join(lev1_summary_stats)
str_ += '\tSummarized record stats:\n{}{}\n'.format(
lev1_summary_stats_str,
self._sso((self.lev2_summary_stats,), 2))
except AttributeError:
pass
# Store string for individual stats
str_ += '\tIndividual record stats:{}'.format(self._sso(self.recs, 2))
return str_
def __sub__(self, older): # self is newer
return RecordGroupDelta(self, older)
class RecordGroupDelta(RecordGroup):
"""Return a record delta object computed from two successive record groups.
Stats are available as attributes recs, lev1_summary_stats, and
lev2_summary_stats. Timestamp is available as attribute timestamp. Time
duration in seconds of the delta is available as attribute
time_duration_secs.
"""
_count = 0
_lev2_summary_stat_types = ('br', 'bw', 'brw', 'brps', 'bwps', 'brwps')
# (grand totals)
_table_types = collections.OrderedDict((
#('brwps', {'label': 'Read+Write', 'label_short': 'R+W'}),
# brwps is disabled as it takes up valuable screen space
('brps', {'label': 'Read', 'label_short': 'R'}),
('bwps', {'label': 'Write', 'label_short': 'W'}),
))
# Inheriting from RecordGroup allows its functions compute_summary_stats
# __str__, etc. to be used.
def __init__(self, new, old):
self.count()
self.timestamp = new.timestamp
self.time_duration_secs = new.timestamp - old.timestamp
self.time_duration_secs = self.time_duration_secs.total_seconds()
self._compute_recs_deltas(new, old)
self.compute_summary_stats()
def _compute_recs_deltas(self, new, old):
"""Compute deltas (differences) of new and old records, and store them
in self.recs.
"""
self.recs = [] # seq of RecordDelta objects, once populated
# Compute deltas
# (very inefficient, but unoptimizable as long as recs is a seq, and
# not say an efficiently accessible multi-level dict instead)
for rec_new in new.recs:
for rec_old in old.recs:
if rec_new.fs == rec_old.fs and rec_new.nn == rec_old.nn:
rec = rec_new - rec_old
self.recs.append(rec)
break
@staticmethod
def _bytes_str(num_bytes):
"""Return a human readable string representation of the provided number
of bytes. Bytes can be an int or a float or None. Powers of 2 are used.
As such, the units used are binary, and not SI. To save a character,
numbers from 1000 to 1023 are transformed to the next largest unit.
Examples: 256 --> ' 256.0 ', 1012 --> '1.0K ', 1450 --> ' 1.4K',
99**99 --> '3.7e+197', None --> ' N/A '
"""
# To disable thousands-character-saving, increase width by 1, and
# comment out str len test section.
# Note that table field headers are hard-coded to have at least the
# same output length as the general output of this function.
width = 5 # output length is this + 1 for unit
if num_bytes != None:
units = (' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
num_bytes_original = num_bytes
for unit_index, unit in enumerate(units):
if num_bytes < 1024:
if len('{:.1f}'.format(num_bytes)) > width:
# The above condition holds True when num_bytes is
# approximately > 999.94. If num_bytes is always an int, it
# could more simply be ">= 1000".
try:
num_bytes /= 1024
# is always actually less than 1.0, but
# formats as 1.0 with {:.1f}
except OverflowError:
break # num_bytes must be too large
try: unit = units[unit_index + 1]
except IndexError: # units are exhausted
break
str_ = '{:{}.1f}{}'.format(num_bytes, width, unit)
# this is always 6 characters
return str_
try: num_bytes /= 1024
except OverflowError: break
try:
# Fall back to scientific notation.
str_ = '{:{}.1e}'.format(num_bytes_original, width)
return str_
except OverflowError:
# Fall back to basic string representation.
str_ = str(num_bytes_original)
return str_
# String length can be greater than normal for very large numbers.
else:
# num_bytes == None
return '{:^{}}'.format('N/A', width + 1)
@staticmethod
@functools.lru_cache(maxsize=None)
def _mmfa_ipf(num_avail_shares, demands):
"""Return the sequence of shares corresponding to the provided number
of available shares and the sequence of demands. Max-min fair
allocation, implemented by an incremental progressive filling algorithm
is used. Note that the implemented algorithm is not entirely efficient
due to its incremental filling nature.
num_avail_shares should be a non-negative int.
demands should be a hashable sequence (such as a tuple, and not a list)
of non-negative ints.
Results are cached in memory.
"""
demands, indexes = list(zip(*sorted(zip(demands, range(len(demands))),
reverse=True)))
# This sorts 'demands' and get indexes.
# indexes, demands = list(zip(*sorted(enumerate(demands),
# key=operator.itemgetter(1),
# reverse=True)))
# # alternative technique for above
# Note that 'reverse' above can be set equal to False for any specific
# applications that require it.
demands = list(demands)
indexes = sorted(range(len(indexes)), key=lambda k: indexes[k])
# This transform indexes to make them useful later for restoring
# the original order.
len_ = len(demands)
shares = [0] * len_
i = 0
while num_avail_shares and any(demands):
if demands[i]:
num_avail_shares -= 1
demands[i] -= 1
shares[i] += 1
i = (i + 1) % len_
shares = tuple(shares[k] for k in indexes)
return shares
def tables_str(self, format_, num_avail_lines=80):
"""Return a string representation of the table types previously
specified in self.__class__._table_types. The representation is of the
specified format, which can be either separated or interlaced. Inactive
nodes are not included.
"""
method_name = '_tables_{}_str'.format(format_)
return getattr(self, method_name)(num_avail_lines)
def _tables_separated_str(self, num_avail_lines):
"""Return a separated string representation of the table types
previously specified in self.__class__._table_types. Inactive nodes are
not included.
"""
# Determine file systems used
fs_seq = numsorted(tuple(self.lev1_summary_stats['fs']))
# tuple results in a hashable object which is required
table_fields = (['Node', 'Total'] +
['{:>6}'.format(fs) for fs in fs_seq])
# 6 is the general len of a str returned by
# self._bytes_str
def nn_active_names_and_lens():
"""Return active node name sequences and their lengths for all
table types. The returned items are dicts.
"""
nn_seq = {}
# Keys and values will be table types and respective node
# names.
nn_seq_len = {}
# A key is a tuple containing
# (table_type, 'displayed' or 'active'). The value is the
# respective nn_seq len.
for table_type in self._table_types:
nn_seq_cur = [nn for nn, nn_summary_stats in
self.lev1_summary_stats['nn'].items() if
nn_summary_stats[table_type] > 0]
#nn_seq_cur.sort()
sort_key = (lambda nn:
self.lev1_summary_stats['nn'][nn][table_type])
nn_seq_cur.sort(key = sort_key, reverse = True)
nn_seq_len[table_type, 'active'] = len(nn_seq_cur)
nn_seq[table_type] = nn_seq_cur
return nn_seq, nn_seq_len
nn_seq, nn_seq_len = nn_active_names_and_lens()
def num_avail_lines_per_table(num_avail_lines, nn_seq):
"""Return a sequence containing the number of available lines for
node name sequences of tables.
"""
num_avail_lines -= (len(self._table_types) * 7)
# 7 is the num of lines cumulatively used by headers, totals
# row, and footers of each table
num_avail_lines = max(num_avail_lines, 0)
lines_reqd_seq = (len(nn_seq[table_type]) for table_type in
self._table_types)
lines_reqd_seq = tuple(lines_reqd_seq)
lines_avail_seq = self._mmfa_ipf(num_avail_lines, lines_reqd_seq)
return lines_avail_seq
lines_avail_seq = num_avail_lines_per_table(num_avail_lines, nn_seq)
def nn_displayed_names_and_lens(nn_seq, nn_seq_len, lines_avail_seq):
"""Return displayed node name sequences and their lengths for all
table types. The returned items are updated dicts.
"""
for table_type, lines_avail in zip(self._table_types,
lines_avail_seq):
nn_seq[table_type] = nn_seq[table_type][:lines_avail]
nn_seq_len[table_type, 'displayed'] = len(nn_seq[table_type])
return nn_seq, nn_seq_len
nn_seq, nn_seq_len = nn_displayed_names_and_lens(nn_seq, nn_seq_len,
lines_avail_seq)
def nn_max_len():
"""Return the max length of a node name across all tables."""
try:
# nn_max_len = max(len(nn_cur) for nn_seq_cur in nn_seq.values()
# for nn_cur in nn_seq_cur)
# # only for active nodes, but varies
nn_max_len = max(len(nn) for nn in
self.lev1_summary_stats['nn'])
# for all responding nodes, and less varying
except ValueError: # max() arg is an empty sequence
nn_max_len = 1
# not set to 0 because str.format causes "ValueError: '='
# alignment not allowed in string format specifier"
# otherwise
return nn_max_len
nn_max_len = nn_max_len()
def tables_str_local(table_fields, fs_seq, nn_max_len, nn_seq,
nn_seq_len):
"""Return a string representations for the specified table
types.
"""
tables = []
for table_type in self._table_types:
# Initialize table
table = PrettyTable(table_fields, padding_width=0)
table.vertical_char = ' '
table.junction_char = '-'
table.set_field_align('Node', 'l')
for field in table_fields[1:]:
table.set_field_align(field, 'r')
# Add totals row
total_speeds = [self.lev1_summary_stats['fs'][fs][table_type]
for fs in fs_seq]
total_speeds = [self._bytes_str(i) for i in total_speeds]
total_speeds_total = self.lev2_summary_stats[table_type]
total_speeds_total = self._bytes_str(total_speeds_total)
nn = '{:*^{}}'.format('Total', nn_max_len)
row = [nn, total_speeds_total] + total_speeds
table.add_row(row)
# Add rows for previously determined file systems and node
# names
for nn in nn_seq[table_type]:
nn_recs = [self.rec_select(nn, fs) for fs in fs_seq]
# self.rec_select(nn, fs) can potentially be == None
nn_speeds = [(nn_rec[table_type] if nn_rec else None) for
nn_rec in nn_recs]
nn_speeds = [self._bytes_str(i) for i in nn_speeds]
nn_speeds_total = (
self.lev1_summary_stats['nn'][nn][table_type])
nn_speeds_total = self._bytes_str(nn_speeds_total)
nn = '{:.<{}}'.format(nn, nn_max_len)
# e.g. '{:.<{}}'.format('xy',4) = 'xy..'
row = [nn, nn_speeds_total] + nn_speeds
table.add_row(row)
# Construct printable tables string
label_template = ('{} bytes/s for top {} of {} active nodes '
'out of {} responding')
label = label_template.format(
self._table_types[table_type]['label'],
nn_seq_len[table_type, 'displayed'],
nn_seq_len[table_type, 'active'],
len(self.lev1_summary_stats['nn']))
table = '\n{}:\n{}'.format(label, table)
tables.append(table)
tables_str = '\n'.join(tables)
return tables_str
tables_str = tables_str_local(table_fields, fs_seq, nn_max_len, nn_seq,
nn_seq_len)
return tables_str
def _tables_interlaced_str(self, num_avail_lines):
"""Return an interlaced string representation of the table types
previously specified in self.__class__._table_types. Inactive nodes are
not included.
"""
# Determine file systems used
fs_seq = numsorted(tuple(self.lev1_summary_stats['fs']))
# tuple results in a hashable object which is required
table_fields = (['Node', 'Type', 'Total'] +
['{:>6}'.format(fs) for fs in fs_seq])
# 6 is the general len of a str returned by
# self._bytes_str
def nn_max(num_avail_lines):
"""Return the maximum number of nodes for which data can be
displayed."""
num_tables = len(self._table_types)
nn_max = num_avail_lines - 6 - num_tables
# 6 is the number of lines used by header and footer rows
# num_tables is the number of lines used by totals rows
nn_max = int(nn_max/num_tables)
num_avail_lines = max(num_avail_lines, 0)
return nn_max
nn_max = nn_max(num_avail_lines)
def nn_names_and_lens(nn_max):
"""Return a sequence of the displayed node names, and a dict of the
node name sequence lengths.
"""
nn_seq_len = {}
nn_seq = [nn for nn, nn_summary_stats in
self.lev1_summary_stats['nn'].items() if
nn_summary_stats['brw'] > 0]
#nn_seq.sort()
sort_key = lambda nn: self.lev1_summary_stats['nn'][nn]['brw']
nn_seq.sort(key = sort_key, reverse=True)
nn_seq_len['active'] = len(nn_seq)
nn_seq = nn_seq[:nn_max]
nn_seq_len['displayed'] = len(nn_seq)
return nn_seq, nn_seq_len
nn_seq, nn_seq_len = nn_names_and_lens(nn_max)
def nn_max_len():
"""Return the max length of a node name."""
try:
# nn_max_len = max(len(nn_cur) for nn_seq_cur in nn_seq.values()
# for nn_cur in nn_seq_cur)
# # this is only for active nodes, but varies
nn_max_len = max(len(nn) for nn in
self.lev1_summary_stats['nn'])
# this is for all responding nodes, and less varying
except ValueError: # max() arg is an empty sequence
nn_max_len = 1
# not set to 0 because str.format causes "ValueError: '='
# alignment not allowed in string format specifier"
# otherwise
return nn_max_len
nn_max_len = nn_max_len()
def tables_str_local(table_fields, fs_seq, nn_max_len, nn_seq):
"""Return a string representation for the specified table types."""
# Initialize table
table = PrettyTable(table_fields, padding_width=0)
table.vertical_char = ' '
table.junction_char = '-'
table.set_field_align('Node', 'l')
for field in table_fields[2:]: table.set_field_align(field, 'r')
# Add totals row
nn = '{:*^{}}'.format('Total', nn_max_len)
for table_type in self._table_types:
total_speeds = [self.lev1_summary_stats['fs'][fs][table_type]
for fs in fs_seq]
total_speeds = [self._bytes_str(i) for i in total_speeds]
total_speeds_total = self.lev2_summary_stats[table_type]
total_speeds_total = self._bytes_str(total_speeds_total)
table_type = self._table_types[table_type]['label_short']
row = [nn, table_type, total_speeds_total] + total_speeds
table.add_row(row)
nn = ''
# Add rows for previously determined file systems and node names
for nn in nn_seq:
nn_recs = [self.rec_select(nn, fs) for fs in fs_seq]
# self.rec_select(nn, fs) can potentially be == None
nn_formatted = '{:.<{}}'.format(nn, nn_max_len)
# e.g. '{:.<{}}'.format('xy',4) = 'xy..'
for table_type in self._table_types:
nn_speeds = [(nn_rec[table_type] if nn_rec else None) for
nn_rec in nn_recs]
nn_speeds = [self._bytes_str(i) for i in nn_speeds]
nn_speeds_total = (
self.lev1_summary_stats['nn'][nn][table_type])
nn_speeds_total = self._bytes_str(nn_speeds_total)
table_type = self._table_types[table_type]['label_short']
row = ([nn_formatted, table_type, nn_speeds_total] +
nn_speeds)
table.add_row(row)
nn_formatted = ''
# Construct printable tables string
label_template = ('Bytes/s for top {} of {} active nodes out of '
'{} responding')
label = label_template.format(nn_seq_len['displayed'],
nn_seq_len['active'],
len(self.lev1_summary_stats['nn']))
tables_str = '\n{}:\n{}'.format(label, table)
return tables_str
tables_str = tables_str_local(table_fields, fs_seq, nn_max_len, nn_seq)
return tables_str
class RecordGroupDeltaIterator:
"""Yield RecordGroupDelta objects."""
def __iter__(self):
self._receiver = Receiver()
for rec_grp_prev, rec_grp_curr in common.pairwise(self._receiver):
rec_grp_delta = rec_grp_curr - rec_grp_prev
# for obj in (rec_grp_prev, rec_grp_curr, rec_grp_delta, ''):
# print(obj)
yield rec_grp_delta
def close(self):
"""Close the subprocess providing data to the iterator.
This must be used if an unhandled exception occurs.
"""
self._receiver.close()
class Display(Logger):
"""Write RecordGroupDelta objects to the console in a user friendly
format.
"""
# # Establish encoding for curses
# locale.setlocale(locale.LC_ALL, '')
# _encoding = locale.getpreferredencoding() # typically 'UTF-8'
# Set format string for datetime
try:
_d_t_fmt = locale.nl_langinfo(locale.D_T_FMT)
except AttributeError:
_d_t_fmt = '%a %b %e %H:%M:%S %Y'
# obtained with locale.getlocale() == ('en_US', 'UTF8')
def __init__(self):
if not sys.stdout.isatty():
err_msg = ('stdout is not open and connected to a tty-like device.'
' If running the application on a host using ssh, use '
'the -t ssh option.')
raise errors.TTYError(err_msg)
try:
self._init_curses()
self._write_initial_status()
self._write_recs()
finally:
try: curses.endwin() #@UndefinedVariable
except curses.error: pass #@UndefinedVariable
try: self._recgrps.close()
except AttributeError: # Attribute in question is self._recgrps
pass
# Any unhandled exception that may have happened earlier is now
# raised automatically.
if params.PRINT_LAST_RECORD:
try: print(self._recgrp_output_str)
except AttributeError: pass
def _init_curses(self):
"""Set up the curses display."""
self.logger.info('Initializing curses...')
self._alert_msg = ''
self._win = curses.initscr()
signal.siginterrupt(signal.SIGWINCH, False)
# siginterrupt above prevents the SIGWINCH signal handler of curses
# from raising IOError in Popen. Whether the signal is nevertheless
# raised or not is unconfirmed, although getmaxyx results are still
# updated.
# Make cursor invisible
try: curses.curs_set(0) #@UndefinedVariable
except curses.error: pass #@UndefinedVariable
# The error message "_curses.error: curs_set() returned ERR" can
# possibly be returned when curses.curs_set is called. This can happen
# if TERM does not support curs_set.
#
# Alternative way:
# if curses.tigetstr('civis') is not None: #@UndefinedVariable
# curses.curs_set(0) #@UndefinedVariable
def _init_key_listener():
"""Set up the curses key listener thread."""
def _key_listener():
"""Run the curses pause and resume key listener."""
curses.noecho() #@UndefinedVariable
self._win.nodelay(False)
pause_key = params.DISPLAY_PAUSE_KEY
alert = 'paused'
while True:
time.sleep(0.1)
# Techncially, sleep should not be necessary here
# because non-blocking mode is previously set by means
# of nodelay(False).
# Nevertheless, sleep was found to avoid getkey/getch
# from crashing (without any Exception) in a host with
# version 5.5-24.20060715 of ncurses. Another host
# (with version 5.7-3.20090208 of ncurses) was not
# observed to have this bug even without sleep.
# Key presses were observed to always be registered
# despite the sleep.
if self._win.getkey() == pause_key:
self._active = not self._active
with self._disp_lock:
if not self._active:
self._ins_alert(alert)
else:
self._del_alert()
# This is useful in the event the next
# normal update to the window is several
# seconds later.
self._active = True
self._disp_lock = threading.Lock()
_key_listener_thread = threading.Thread(group=None,
target=_key_listener,
name='KeyListener')
_key_listener_thread.daemon = True
_key_listener_thread.start()
_init_key_listener()
def _ins_alert(self, alert):
"""Insert the supplied alert str into the second row. Any prior alert
is first deleted.
"""
self._del_alert()
# Delete previous alert so that multiple alerts are not displayed
# at once.
self._alert_msg = alert
# Store current alert to make it available for later deletion.
# Insert alert
if alert:
w = self._win
try:
w.insstr(1, 0, '() ')
w.insstr(1, 1, alert, curses.A_UNDERLINE) #@UndefinedVariable
except: pass
def _del_alert(self):
"""Delete the most recent alert str from the second row."""
if self._alert_msg:
try:
for _ in range(len(self._alert_msg) + 3):
self._win.delch(1, 0)
except: pass
self._alert_msg = ''
def _write_initial_status(self):
"""Write the initial collection status."""
self.logger.info('Writing initial status...')
with self._disp_lock:
nodeset = params.GPFS_NODESET or 'first available'
status_template = ('{}\n\nCollecting initial data for {} nodeset '
'from {}.\n\nWait {:.0f}s.')
status = status_template.format(params._PROGRAM_NAME,
nodeset,
params.MMPMON_HOST,
params.MONITORING_INTERVAL_SECS*2)
# params.MONITORING_INTERVAL_SECS is multiplied by 2 because
# data is fully received for an iteration only after the next
# iteration has internally begun to be received.
self._write(status)
def _write_recs(self):
"""Write individual records."""
self.logger.info('Writing records...')
self._recgrps = RecordGroupDeltaIterator()
for recgrp in self._recgrps:
if self._active and self._disp_lock.acquire(False):
self._recgrp_output_str = self._format_output(recgrp)
self._write(self._recgrp_output_str)
self._disp_lock.release()
# Allow time for the final update to be seen
time.sleep(params.MONITORING_INTERVAL_SECS)
def _format_output(self, recgrp):
"""Return the formatted string to display to the screen."""
strftime = lambda dt: dt.strftime(self._d_t_fmt).replace(' ', ' ')
# replace is used above to for example replace 'Feb 3' to 'Feb 3'
datetime_now = datetime.datetime.now()
recgrp_timestamp_age = datetime_now - recgrp.timestamp
recgrp_timestamp_age = recgrp_timestamp_age.total_seconds()
# Determine header
title = '{} [updated {}]'.format(params._PROGRAM_NAME,
strftime(datetime_now))
status_template = ('Displaying activity for {:.1f}s before the past '
'{:.1f}s.\n')
status = status_template.format(recgrp.time_duration_secs,
recgrp_timestamp_age)
header = '\n'.join((title, status))
# Determine table string
num_avail_lines = self._win.getmaxyx()[0] - header.count('\n')
num_avail_lines = max(num_avail_lines, 0)
tables_str = recgrp.tables_str(format_=params.TABLE_TYPE,
num_avail_lines=num_avail_lines)
return header + tables_str
def _write(self, str_):
"""Update the display with the provided string."""
w = self._win
w.erase()
try:
w.addstr(str(str_))
w.addstr(0, 0, params._PROGRAM_NAME,
curses.A_BOLD) #@UndefinedVariable
except: pass
# The try except block was found to prevent occasional errors by
# addstr, but not if the block enclosed all w actions, which is
# unexpected.
w.refresh()
if __name__ == '__main__':
logger = logging.getLogger(params._PROGRAM_NAME_SHORT)
try:
ArgParser()
DiagnosticLoggerSetup()
Display()
except (KeyboardInterrupt, errors.Error, Exception) as exception:
try:
logger.exception(exception)
except AttributeError:
# Likely cause: AttributeError: type object 'NullHandler' has no
# attribute 'level'
pass
if isinstance(exception, KeyboardInterrupt):
exit()
elif isinstance(exception, errors.Error) and exception.args:
exit('\n'.join(exception.args))
else:
raise
|
agpl-3.0
| 6,257,922,069,138,471,000 | 38.312772 | 88 | 0.530799 | false |
Azure/azure-sdk-for-python
|
sdk/signalr/azure-mgmt-signalr/tests/test_cli_mgmt_signalr.py
|
1
|
4604
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 12
# Methods Covered : 12
# Examples Total : 12
# Examples Tested : 12
# Coverage % : 100
# ----------------------
import unittest
import azure.mgmt.signalr
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
AZURE_LOCATION = 'eastus'
class MgmtSignalRTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtSignalRTest, self).setUp()
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.signalr.SignalRManagementClient
)
@unittest.skip("skip test")
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_signalr(self, resource_group):
SUBSCRIPTION_ID = self.settings.SUBSCRIPTION_ID
TENANT_ID = self.settings.TENANT_ID
RESOURCE_GROUP = resource_group.name
LOCATION = "myLocation"
RESOURCE_NAME = "myResource"
# /SignalR/put/SignalR_CreateOrUpdate[put]
BODY = {
'tags': {
"key1": "value1"
},
'sku': {
"name": "Standard_S1",
"tier": "Standard",
"capacity": "1"
},
'properties': {
"features": [
{
"flag": "ServiceMode",
"value": "Serverless"
}
],
"cors": {
"allowed_origins": [
"https://foo.com",
"https://bar.com"
]
}
},
'location': 'eastus'
}
result = self.mgmt_client.signal_r.create_or_update(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME, parameters=BODY)
result = result.result()
# /SignalR/get/SignalR_Get[get]
result = self.mgmt_client.signal_r.get(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME)
# /SignalR/get/SignalR_ListByResourceGroup[get]
result = self.mgmt_client.signal_r.list_by_resource_group(resource_group_name=RESOURCE_GROUP)
# /Usages/get/Usages_List[get]
result = self.mgmt_client.usages.list(location=LOCATION)
# /SignalR/get/SignalR_ListBySubscription[get]
result = self.mgmt_client.signal_r.list_by_subscription()
# /Operations/get/Operations_List[get]
result = self.mgmt_client.operations.list()
# /SignalR/post/SignalR_RegenerateKey[post]
# result = self.mgmt_client.signal_r.regenerate_key(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME, key_type="Primary")
# result = result.result()
# /SignalR/post/SignalR_ListKeys[post]
result = self.mgmt_client.signal_r.list_keys(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME)
# /SignalR/post/SignalR_Restart[post]
result = self.mgmt_client.signal_r.restart(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME)
result = result.result()
# /SignalR/patch/SignalR_Update[patch]
BODY = {
"tags": {
"key1": "value1"
},
"sku": {
"name": "Standard_S1",
"tier": "Standard",
"capacity": "1"
},
"properties": {
"features": [
{
"flag": "ServiceMode",
"value": "Serverless"
}
],
"cors": {
"allowed_origins": [
"https://foo.com",
"https://bar.com"
]
}
}
}
result = self.mgmt_client.signal_r.update(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME, parameters=BODY)
result = result.result()
# /SignalR/post/SignalR_CheckNameAvailability[post]
result = self.mgmt_client.signal_r.check_name_availability(location="eastus", type="Microsoft.SignalRService/SignalR", name="my-signalr-service")
# /SignalR/delete/SignalR_Delete[delete]
result = self.mgmt_client.signal_r.delete(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME)
result = result.result()
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
mit
| 5,051,266,051,244,888,000 | 32.605839 | 153 | 0.544961 | false |
baroquebobcat/pants
|
contrib/node/tests/python/pants_test/contrib/node/tasks/test_node_bundle_integration.py
|
1
|
5851
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from contextlib import contextmanager
from pants.fs.archive import archiver_for_path, create_archiver
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class NodeBundleIntegrationTest(PantsRunIntegrationTest):
DIST_DIR = 'dist'
TGZ_SUFFIX = '.tar.gz'
JAR_SUFFIX = '.jar'
PROJECT_DIR = 'contrib/node/examples/src/node/web-component-button'
WEB_COMPONENT_BUTTON_PROJECT = 'web-component-button'
WEB_COMPONENT_BUTTON_PROCESSED_PROJECT = 'web-component-button-processed'
WITH_DEPENDENCY_ARTIFACTS_PROJECT = 'web-component-button-processed-with-dependency-artifacts'
WEB_COMPONENT_BUTTON_BUNDLE = 'web-component-button-bundle'
WEB_COMPONENT_BUTTON_PROCESSED_BUNDLE = 'web-component-button-processed-bundle'
PREINSTALLED_PROJECT_DIR = 'contrib/node/examples/src/node/preinstalled-project'
PREINSTALLED_PROJECT = 'preinstalled-project'
PREINSTALLED_BUNDLE = 'preinstalled-project-bundle'
JVM_PROJECT = 'jsresources'
JVM_WITH_ARTIFACTS_PROJECT = 'jsresources-with-dependency-artifacts'
JVM_PROJECT_DIR = 'contrib/node/examples/src/java/org/pantsbuild/testproject/jsresources'
WEB_COMPONENT_BUTTON_ARTIFACT = os.path.join(
DIST_DIR, WEB_COMPONENT_BUTTON_BUNDLE + TGZ_SUFFIX)
WEB_COMPONENT_BUTTON_PROCESSED_ARTIFACT = os.path.join(
DIST_DIR, WEB_COMPONENT_BUTTON_PROCESSED_BUNDLE + TGZ_SUFFIX)
PREINSTALLED_ARTIFACT = os.path.join(
DIST_DIR, PREINSTALLED_BUNDLE + TGZ_SUFFIX)
JVM_PROJECT_ARTIFACT = os.path.join(DIST_DIR, JVM_PROJECT + JAR_SUFFIX)
JVM_WITH_ARTIFACTS_ARTIFACT = os.path.join(DIST_DIR, JVM_WITH_ARTIFACTS_PROJECT + JAR_SUFFIX)
def test_bundle_node_module(self):
command = [
'bundle',
':'.join([self.PROJECT_DIR, self.WEB_COMPONENT_BUTTON_BUNDLE])]
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
with self._extract_archive(self.WEB_COMPONENT_BUTTON_ARTIFACT) as temp_dir:
actual_set = set(os.listdir(temp_dir))
expected_set = {'src', 'test', 'node_modules', 'package.json', 'webpack.config.js'}
self.assertTrue(expected_set <= actual_set)
if expected_set < actual_set:
# npm 5 introduced package-lock.json
self.assertEqual(actual_set - expected_set, {'package-lock.json'})
# Make sure .bin symlinks remains as symlinks.
self.assertTrue(os.path.islink(os.path.join(temp_dir, 'node_modules', '.bin', 'mocha')))
def test_bundle_node_module_processed(self):
command = [
'bundle',
':'.join([self.PROJECT_DIR, self.WEB_COMPONENT_BUTTON_PROCESSED_BUNDLE])]
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
with self._extract_archive(self.WEB_COMPONENT_BUTTON_PROCESSED_ARTIFACT) as temp_dir:
self.assertEquals(
set(os.listdir(temp_dir)),
{'Button.js'}
)
def test_bundle_jvm_binary_with_node_module(self):
command = [
'binary',
':'.join([self.JVM_PROJECT_DIR, self.JVM_PROJECT])
]
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
with self._extract_archive(self.JVM_PROJECT_ARTIFACT) as temp_dir:
self.assertEquals(
set(os.listdir(os.path.join(temp_dir, self.WEB_COMPONENT_BUTTON_PROCESSED_PROJECT))),
{'Button.js'}
)
# Only include node build results, not original node_modules directory
self.assertTrue('node_modules' not in os.listdir(temp_dir))
# Transitive dependency that marked as not generating artifacts should not be included.
self.assertTrue('web-build-tool' not in os.listdir(temp_dir))
def test_bundle_jvm_binary_with_node_module_and_dependencies(self):
command = [
'binary',
':'.join([self.JVM_PROJECT_DIR, self.JVM_WITH_ARTIFACTS_PROJECT])
]
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
with self._extract_archive(self.JVM_WITH_ARTIFACTS_ARTIFACT) as temp_dir:
print (os.listdir(temp_dir))
self.assertEquals(
set(os.listdir(os.path.join(temp_dir, self.WITH_DEPENDENCY_ARTIFACTS_PROJECT))),
{'Button.js'}
)
# Only include node build results, not original node_modules directory
self.assertTrue('node_modules' not in os.listdir(temp_dir))
# Transitive dependency should not be included.
self.assertTrue('web-dependency-test' in os.listdir(temp_dir))
def test_bundle_node_preinstalled_module(self):
command = [
'bundle',
':'.join([self.PREINSTALLED_PROJECT_DIR, self.PREINSTALLED_BUNDLE])]
self.assert_success(self.run_pants(command=command))
with self._extract_archive(self.PREINSTALLED_ARTIFACT) as temp_dir:
self.assertEquals(
set(os.listdir(temp_dir)),
{'src', 'test', 'node_modules', 'package.json'}
)
def test_no_bundle_for_node_module(self):
command = ['bundle', ':'.join([self.PREINSTALLED_PROJECT_DIR, self.PREINSTALLED_PROJECT])]
self.assert_success(self.run_pants(command=command))
self.assertFalse(os.path.exists(self.PREINSTALLED_BUNDLE))
@contextmanager
def _extract_archive(self, archive_path):
with temporary_dir() as temp_dir:
_, extension = os.path.splitext(archive_path)
print (extension)
if extension == '.jar':
extraction_archiver = create_archiver('zip')
else:
extraction_archiver = archiver_for_path(os.path.basename(archive_path))
extraction_archiver.extract(archive_path, temp_dir)
yield temp_dir
|
apache-2.0
| 8,449,368,400,863,214,000 | 39.075342 | 96 | 0.70176 | false |
rp-/honstats
|
provider.py
|
1
|
10448
|
"""
This file is part of honstats.
honstats is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
honstats is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with honstats. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import json
import sqlite3
import urllib.request
from urllib.error import HTTPError
import gzip
import time
DBCREATE = """
CREATE TABLE IF NOT EXISTS player (
id INTEGER PRIMARY KEY,
nick TEXT
);
CREATE TABLE IF NOT EXISTS playerdata (
id INTEGER,
date DATETIME,
statstype TEXT,
data TEXT,
PRIMARY KEY(id, date, statstype)
);
CREATE TABLE IF NOT EXISTS hero (
id INTEGER PRIMARY KEY,
name TEXT
);
"""
class NoResultsError(Exception):
pass
class DataProvider(object):
MatchCacheDir = 'match'
PlayerCacheDir = 'player'
CacheTime = 60 * 15
HeroNicks = {
6: "Devo",
9: "Elec",
14: "NH",
15: "Swift",
16: "BH",
25: "KotF",
26: "TDL",
27: "VJ",
29: "WB",
42: "MadM",
43: "DS",
104: "Hag",
108: "PR",
109: "SR",
114: "CD",
120: "WS",
121: "FA",
124: "Chip",
161: "Gladi",
162: "DR",
185: "Sil",
192: "RA",
195: "EW",
201: "DM",
209: "Salf",
234: "Benz"
}
@staticmethod
def nickoraccountid(aid):
try:
int(aid)
return '/accountid/' + str(aid)
except ValueError:
return '/nickname/' + aid
class HttpDataProvider(DataProvider):
StatsMapping = {'ranked': 'rnk', 'public': 'acc', 'casual': 'cs'}
def __init__(self, url='api.heroesofnewerth.com', token=None, cachedir="~/.honstats"):
self.url = url
self.token = token
self.cachedir = os.path.abspath(os.path.expanduser(cachedir))
if self.cachedir:
os.makedirs(self.cachedir, exist_ok=True)
dbfile = os.path.join(self.cachedir, 'stats.db')
self.db = sqlite3.connect(dbfile)
self.db.executescript(DBCREATE)
os.makedirs(os.path.join(self.cachedir, DataProvider.MatchCacheDir), exist_ok=True)
os.makedirs(os.path.join(self.cachedir, DataProvider.PlayerCacheDir), exist_ok=True)
def __del__(self):
self.db.close()
def nick2id(self, nick):
try:
int(nick)
except ValueError:
cursor = self.db.cursor()
cursor.execute("SELECT id from player WHERE lower(nick) = lower(:nick)", {'nick': nick})
row = cursor.fetchone()
cursor.close()
if row:
return int(row[0])
data = self.fetch('/player_statistics/ranked/nickname/' + nick)
# insert the real nick into database, case sensitiv
self.id2nick(int(data['account_id']))
return int(data['account_id'])
return int(nick)
def id2nick(self, aid):
if isinstance(aid, int):
# resp = urllib.request.urlopen('http://forums.heroesofnewerth.com/member.php?' + str(int(id)))
# begin = resp.read(4048).decode('utf-8')
# print(begin)
# m = re.search(r'<title>View Profile:\s*(\S+)-', begin)
# if m:
# return m.group(1)
cursor = self.db.cursor()
cursor.execute("SELECT nick FROM player WHERE id = :id", {'id': aid})
row = cursor.fetchone()
cursor.close()
if row:
return row[0]
data = self.fetch('/player_statistics/ranked/accountid/' + str(aid))
self.db.execute('INSERT INTO player VALUES( :id, :nick );', {'id': aid, 'nick': data['nickname']})
self.db.commit()
return data['nickname']
return str(aid)
def heroid2name(self, aid, full=False):
if not full and aid in DataProvider.HeroNicks:
return DataProvider.HeroNicks[aid]
cursor = self.db.cursor()
cursor.execute("SELECT name FROM hero WHERE id = :id", {'id': aid})
row = cursor.fetchone()
cursor.close()
if row:
return row[0]
data = self.fetch('/heroes/id/{id}'.format(id=aid))
name = data['disp_name'].strip()
self.db.execute('INSERT INTO hero VALUES( :id, :name);', {'id': aid, 'name': name})
self.db.commit()
return name
def fetch(self, path):
url = self.url + path + "/?token=" + self.token
#print(url)
try:
resp = urllib.request.urlopen(url)
except HTTPError as e:
if e.code == 404:
raise NoResultsError()
if e.code == 429: # too much requests
time.sleep(0.1) # this might be a bit harsh, but fetch until we get what we want
return self.fetch(path)
raise e
raw = resp.read().decode('utf-8').strip()
# work around a serialization bug from hon
if raw.startswith('Notice:'):
raw = raw[raw.find('\n'):]
data = json.loads(raw)
resp.close()
return data
def fetchplayer(self, aid, statstype):
cursor = self.db.cursor()
cursor.execute("SELECT data FROM playerdata WHERE id=:id AND "
"strftime('%s',date)-:date>0 AND statstype=:statstype ORDER BY date;",
{'id': self.nick2id(aid), 'date': int(time.time() - DataProvider.CacheTime),
'statstype': statstype})
row = cursor.fetchone()
cursor.close()
if row:
return json.loads(row[0])
data = self.fetch('/player_statistics/' + statstype + DataProvider.nickoraccountid(aid))
# # check if the data really changed
# cursor = self.db.cursor()
# cursor.execute("SELECT data FROM playerdata WHERE id=:id AND statstype=:statstype " \
# "AND strftime('%s', date)=(select MAX(strftime('%s',date)) " \
# "from playerdata WHERE id=:id AND statstype=:statstype);",
# {'id': self.nick2id(id), 'date': int(time.time() - DataProvider.CacheTime),
# 'statstype': statstype})
# dbdata = json.loads(cursor.fetchone()[0])
# # insert if we have more games
# if int(dbdata[self.StatsMapping[statstype] + '_games_played']) !=
# int(data[self.StatsMapping[statstype] + '_games_played']):
if True:
self.db.execute("INSERT INTO playerdata VALUES(:id, CURRENT_TIMESTAMP, :statstype, :data);",
{'id': self.nick2id(aid), 'statstype': statstype, 'data': json.dumps(data)})
self.db.commit()
return data
def fetchmatches(self, aid, statstype):
playerdir = os.path.join(self.cachedir, DataProvider.PlayerCacheDir)
playermatches = os.path.join(playerdir, "{id}_matches_{statstype}.gz".format(
id=self.nick2id(aid),
statstype=statstype))
if os.path.exists(playermatches) and os.stat(playermatches).st_ctime > time.time() - DataProvider.CacheTime:
with gzip.open(playermatches, 'rt') as f:
data = json.load(f)
else:
path = '/match_history/' + statstype + DataProvider.nickoraccountid(aid)
data = self.fetch(path)
with gzip.open(playermatches, 'wt+') as f:
f.write(json.dumps(data))
return data
def matches(self, aid, statstype):
data = self.fetchmatches(aid, statstype)
history = ""
if len(data) > 0:
history = data[0]['history']
hist = history.split(',')
matchids = [int(x.split('|')[0]) for x in hist]
matchids = sorted(matchids, reverse=True)
return matchids
def fetchmatchdata(self, matchids, *, limit=None, id_hero=None):
"""Fetches match data by id and caches it onto disk
First checks if the match stats are already cached
Args:
matchids: list of match ids
Returns:
dict with matches, the key is the matchid
"""
data = {}
limit = limit if limit else len(matchids)
heroname = None
aid = None
if id_hero:
aid, heroname = id_hero
aid = self.nick2id(aid)
i = 0
while len(data) < limit and i < len(matchids):
matchid = matchids[i]
matchdir = os.path.join(self.cachedir, DataProvider.MatchCacheDir)
matchpath = os.path.join(matchdir, str(matchid)[0:4])
os.makedirs(matchpath, exist_ok=True)
matchpath = os.path.join(matchpath, str(matchid) + ".gz")
if os.path.exists(matchpath):
with gzip.open(matchpath, 'rt') as f:
matchdata = json.load(f)
else:
try:
matchdata = self.fetch('/match/summ/matchid/{id}'.format(id=matchid))
matchstats = self.fetch('/match/all/matchid/{id}'.format(id=matchid))
matchdata.append(matchstats[0][0]) # settings
matchdata.append(matchstats[1]) # items
matchdata.append(matchstats[2]) # player stats
with gzip.open(matchpath, 'wt+') as f:
f.write(json.dumps(matchdata))
except NoResultsError:
matchdata = None
if id_hero and matchdata:
playerstats = matchdata[3]
for stats in playerstats:
if aid == int(stats['account_id']):
playedhero = self.heroid2name(stats['hero_id'], full=True).lower()
if heroname in playedhero:
data[matchid] = matchdata
break
else:
data[matchid] = matchdata
i += 1
return data
def heroes(self):
return self.fetch('/heroes/all')
|
gpl-3.0
| 140,861,126,853,115,950 | 34.178451 | 116 | 0.553025 | false |
ValyrianTech/BitcoinSpellbook-v0.3
|
darwin/genome.py
|
1
|
2090
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from darwin.chromosome import Chromosome
import hashlib
import binascii
import simplejson
class Genome(object):
def __init__(self):
self.chromosomes = {}
self.fitness = None
# def add_chromosome(self, chromosome):
# if not isinstance(chromosome, Chromosome):
# raise Exception('Can not add genome to population: unexpected type: %s' % type(chromosome))
#
# self.chromosomes.append(chromosome)
def add_chromosome(self, chromosome_id, encoding_type, min_value=None, max_value=None, charset=None, n_genes=None):
chromosome = Chromosome(chromosome_id=chromosome_id, encoding_type=encoding_type, n_genes=n_genes)
if min_value is not None:
chromosome.min = min_value
if max_value is not None:
chromosome.max = max_value
if charset is not None:
chromosome.charset = charset
chromosome.init_genes()
self.chromosomes[chromosome_id] = chromosome
def init_with_random_data(self):
for chromosome_id, chromosome in self.chromosomes.items():
for gene in chromosome.genes:
gene.set_random_data()
def info(self):
info = 'Genome id: %s' % self.id()
for chromosome_id, chromosome in self.chromosomes.items():
info += '\nCHROMOSOME %s:\n%s' % (chromosome_id, chromosome.info())
return info
def id(self):
data_string = ''
for i, (chromosome_id, chromosome) in enumerate(self.chromosomes.items()):
data_string += '|%s|' % i
for j, gene in enumerate(chromosome.genes):
data_string += '%s:%s ' % (j, gene.data)
return binascii.hexlify(hashlib.sha256(simplejson.dumps(data_string, sort_keys=True).encode('utf-8')).digest()).decode()
def to_dict(self):
return {'chromosomes': {chromosome_id: chromosome.to_dict() for chromosome_id, chromosome in self.chromosomes.items()},
'fitness': self.fitness,
'id': self.id()}
|
gpl-3.0
| -4,451,533,938,911,407,600 | 32.174603 | 128 | 0.615311 | false |
sam-m888/gprime
|
gprime/plugins/lib/librecords.py
|
1
|
20750
|
# encoding:utf-8
#
# gPrime - A web-based genealogy program - Records plugin
#
# Copyright (C) 2008-2011 Reinhard Müller
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2013-2016 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#------------------------------------------------------------------------
#
# Standard Python modules
#
#------------------------------------------------------------------------
import datetime
#------------------------------------------------------------------------
#
# Gprime modules
#
#------------------------------------------------------------------------
from gprime.const import LOCALE as glocale
_ = glocale.translation.sgettext
from gprime.lib import (ChildRefType, Date, Span, Name, StyledText,
StyledTextTag, StyledTextTagType)
from gprime.display.name import displayer as name_displayer
from gprime.utils.alive import probably_alive
from gprime.proxy import LivingProxyDb
#------------------------------------------------------------------------
#
# List of records
#
#------------------------------------------------------------------------
def _T_(value): # enable deferred translations (see Python docs 22.1.3.4)
return value
# _T_ is a gramps-defined keyword -- see po/update_po.py and po/genpot.sh
RECORDS = [
(_T_("Youngest living person"), 'person_youngestliving', True),
(_T_("Oldest living person"), 'person_oldestliving', True),
(_T_("Person died at youngest age"), 'person_youngestdied', False),
(_T_("Person died at oldest age"), 'person_oldestdied', True),
(_T_("Person married at youngest age"), 'person_youngestmarried', True),
(_T_("Person married at oldest age"), 'person_oldestmarried', True),
(_T_("Person divorced at youngest age"), 'person_youngestdivorced', False),
(_T_("Person divorced at oldest age"), 'person_oldestdivorced', False),
(_T_("Youngest father"), 'person_youngestfather', True),
(_T_("Youngest mother"), 'person_youngestmother', True),
(_T_("Oldest father"), 'person_oldestfather', True),
(_T_("Oldest mother"), 'person_oldestmother', True),
(_T_("Couple with most children"), 'family_mostchildren', True),
(_T_("Living couple married most recently"), 'family_youngestmarried',True),
(_T_("Living couple married most long ago"), 'family_oldestmarried', True),
(_T_("Shortest past marriage"), 'family_shortest', False),
(_T_("Longest past marriage"), 'family_longest', True),
(_T_("Couple with smallest age difference"), 'family_smallestagediff', True),
(_T_("Couple with biggest age difference"), 'family_biggestagediff', True)]
#------------------------------------------------------------------------
#
# Global functions
#
#------------------------------------------------------------------------
def _good_date(date):
return (date is not None and date.is_valid())
def _find_death_date(db, person):
death_ref = person.get_death_ref()
if death_ref:
death = db.get_event_from_handle(death_ref.ref)
return death.get_date_object()
else:
event_list = person.get_primary_event_ref_list()
for event_ref in event_list:
event = db.get_event_from_handle(event_ref.ref)
if event.get_type().is_death_fallback():
return event.get_date_object()
return None
def find_records(db, filter, top_size, callname,
trans_text=glocale.translation.sgettext, name_format=None,
living_mode=LivingProxyDb.MODE_INCLUDE_ALL):
"""
@param trans_text: allow deferred translation of strings
@type trans_text: a Locale sgettext instance
trans_text is a defined keyword (see po/update_po.py, po/genpot.sh)
:param name_format: optional format to control display of person's name
:type name_format: None or int
:param living_mode: enable optional control of living people's records
:type living_mode: int
"""
def get_unfiltered_person_from_handle(person_handle):
if living_mode == LivingProxyDb.MODE_INCLUDE_ALL:
return db.get_person_from_handle(person_handle)
else: # we are in the proxy so get the person before proxy changes
return db.get_unfiltered_person(person_handle)
today = datetime.date.today()
today_date = Date(today.year, today.month, today.day)
# Person records
person_youngestliving = []
person_oldestliving = []
person_youngestdied = []
person_oldestdied = []
person_youngestmarried = []
person_oldestmarried = []
person_youngestdivorced = []
person_oldestdivorced = []
person_youngestfather = []
person_youngestmother = []
person_oldestfather = []
person_oldestmother = []
person_handle_list = db.iter_person_handles()
if filter:
person_handle_list = filter.apply(db, person_handle_list)
for person_handle in person_handle_list:
person = db.get_person_from_handle(person_handle)
unfil_person = get_unfiltered_person_from_handle(person_handle)
if person is None:
continue
# FIXME this should check for a "fallback" birth also/instead
birth_ref = person.get_birth_ref()
if not birth_ref:
# No birth event, so we can't calculate any age.
continue
birth = db.get_event_from_handle(birth_ref.ref)
birth_date = birth.get_date_object()
death_date = _find_death_date(db, person)
if not _good_date(birth_date):
# Birth date unknown or incomplete, so we can't calculate any age.
continue
name = _get_styled_primary_name(person, callname,
trans_text=trans_text,
name_format=name_format)
if death_date is None:
if probably_alive(unfil_person, db):
# Still living, look for age records
_record(person_youngestliving, person_oldestliving,
today_date - birth_date, name, 'Person', person_handle,
top_size)
elif _good_date(death_date):
# Already died, look for age records
_record(person_youngestdied, person_oldestdied,
death_date - birth_date, name, 'Person', person_handle,
top_size)
for family_handle in person.get_family_handle_list():
family = db.get_family_from_handle(family_handle)
marriage_date = None
divorce_date = None
for event_ref in family.get_event_ref_list():
event = db.get_event_from_handle(event_ref.ref)
if (event.get_type().is_marriage() and
(event_ref.get_role().is_family() or
event_ref.get_role().is_primary())):
marriage_date = event.get_date_object()
elif (event.get_type().is_divorce() and
(event_ref.get_role().is_family() or
event_ref.get_role().is_primary())):
divorce_date = event.get_date_object()
if _good_date(marriage_date):
_record(person_youngestmarried, person_oldestmarried,
marriage_date - birth_date,
name, 'Person', person_handle, top_size)
if _good_date(divorce_date):
_record(person_youngestdivorced, person_oldestdivorced,
divorce_date - birth_date,
name, 'Person', person_handle, top_size)
for child_ref in family.get_child_ref_list():
if person.get_gender() == person.MALE:
relation = child_ref.get_father_relation()
elif person.get_gender() == person.FEMALE:
relation = child_ref.get_mother_relation()
else:
continue
if relation != ChildRefType.BIRTH:
continue
child = db.get_person_from_handle(child_ref.ref)
# FIXME this should check for a "fallback" birth also/instead
child_birth_ref = child.get_birth_ref()
if not child_birth_ref:
continue
child_birth = db.get_event_from_handle(child_birth_ref.ref)
child_birth_date = child_birth.get_date_object()
if not _good_date(child_birth_date):
continue
if person.get_gender() == person.MALE:
_record(person_youngestfather, person_oldestfather,
child_birth_date - birth_date,
name, 'Person', person_handle, top_size)
elif person.get_gender() == person.FEMALE:
_record(person_youngestmother, person_oldestmother,
child_birth_date - birth_date,
name, 'Person', person_handle, top_size)
# Family records
family_mostchildren = []
family_youngestmarried = []
family_oldestmarried = []
family_shortest = []
family_longest = []
family_smallestagediff = []
family_biggestagediff = []
for family in db.iter_families():
#family = db.get_family_from_handle(family_handle)
if living_mode != LivingProxyDb.MODE_INCLUDE_ALL:
# FIXME no iter_families method in LivingProxyDb so do it this way
family = db.get_family_from_handle(family.get_handle())
father_handle = family.get_father_handle()
if not father_handle:
continue
mother_handle = family.get_mother_handle()
if not mother_handle:
continue
# Test if either father or mother are in filter
if filter:
if not filter.apply(db, [father_handle, mother_handle]):
continue
father = db.get_person_from_handle(father_handle)
unfil_father = get_unfiltered_person_from_handle(father_handle)
if father is None:
continue
mother = db.get_person_from_handle(mother_handle)
unfil_mother = get_unfiltered_person_from_handle(mother_handle)
if mother is None:
continue
name = StyledText(trans_text("%(father)s and %(mother)s")) % {
'father': _get_styled_primary_name(father, callname,
trans_text=trans_text,
name_format=name_format),
'mother': _get_styled_primary_name(mother, callname,
trans_text=trans_text,
name_format=name_format)}
if (living_mode == LivingProxyDb.MODE_INCLUDE_ALL
or (not probably_alive(unfil_father, db) and
not probably_alive(unfil_mother, db))):
_record(None, family_mostchildren,
len(family.get_child_ref_list()),
name, 'Family', family.handle, top_size)
father_birth_ref = father.get_birth_ref()
if father_birth_ref:
father_birth_date = db.get_event_from_handle(father_birth_ref.ref).get_date_object()
else:
father_birth_date = None
mother_birth_ref = mother.get_birth_ref()
if mother_birth_ref:
mother_birth_date = db.get_event_from_handle(mother_birth_ref.ref).get_date_object()
else:
mother_birth_date = None
if _good_date(father_birth_date) and _good_date(mother_birth_date):
if father_birth_date >> mother_birth_date:
_record(family_smallestagediff, family_biggestagediff,
father_birth_date - mother_birth_date,
name, 'Family', family.handle, top_size)
elif mother_birth_date >> father_birth_date:
_record(family_smallestagediff, family_biggestagediff,
mother_birth_date - father_birth_date,
name, 'Family', family.handle, top_size)
marriage_date = None
divorce = None
divorce_date = None
for event_ref in family.get_event_ref_list():
event = db.get_event_from_handle(event_ref.ref)
if (event.get_type().is_marriage() and
(event_ref.get_role().is_family() or
event_ref.get_role().is_primary())):
marriage_date = event.get_date_object()
if (event and event.get_type().is_divorce() and
(event_ref.get_role().is_family() or
event_ref.get_role().is_primary())):
divorce = event
divorce_date = event.get_date_object()
father_death_date = _find_death_date(db, father)
mother_death_date = _find_death_date(db, mother)
if not _good_date(marriage_date):
# Not married or marriage date unknown
continue
if divorce is not None and not _good_date(divorce_date):
# Divorced but date unknown or inexact
continue
if (not probably_alive(unfil_father, db)
and not _good_date(father_death_date)):
# Father died but death date unknown or inexact
continue
if (not probably_alive(unfil_mother, db)
and not _good_date(mother_death_date)):
# Mother died but death date unknown or inexact
continue
if (divorce_date is None
and father_death_date is None
and mother_death_date is None):
# Still married and alive
if (probably_alive(unfil_father, db)
and probably_alive(unfil_mother, db)):
_record(family_youngestmarried, family_oldestmarried,
today_date - marriage_date,
name, 'Family', family.handle, top_size)
elif (_good_date(divorce_date) or
_good_date(father_death_date) or
_good_date(mother_death_date)):
end = None
if _good_date(father_death_date) and _good_date(mother_death_date):
end = min(father_death_date, mother_death_date)
elif _good_date(father_death_date):
end = father_death_date
elif _good_date(mother_death_date):
end = mother_death_date
if _good_date(divorce_date):
if end:
end = min(end, divorce_date)
else:
end = divorce_date
duration = end - marriage_date
_record(family_shortest, family_longest,
duration, name, 'Family', family.handle, top_size)
#python 3 workaround: assign locals to tmp so we work with runtime version
tmp = locals()
return [(trans_text(text), varname, tmp[varname])
for (text, varname, default) in RECORDS]
def _record(lowest, highest, value, text, handle_type, handle, top_size):
if value < 0: # ignore erroneous data
return # (since the data-verification tool already finds it)
if isinstance(value, Span):
low_value = value.minmax[0]
high_value = value.minmax[1]
else:
low_value = value
high_value = value
if lowest is not None:
lowest.append((high_value, value, text, handle_type, handle))
lowest.sort(key=lambda a: a[0]) # FIXME: Ist das lambda notwendig?
for i in range(top_size, len(lowest)):
if lowest[i-1][0] < lowest[i][0]:
del lowest[i:]
break
if highest is not None:
highest.append((low_value, value, text, handle_type, handle))
highest.sort(reverse=True)
for i in range(top_size, len(highest)):
if highest[i-1][0] > highest[i][0]:
del highest[i:]
break
#------------------------------------------------------------------------
#
# Reusable functions (could be methods of gen.lib.*)
#
#------------------------------------------------------------------------
CALLNAME_DONTUSE = 0
CALLNAME_REPLACE = 1
CALLNAME_UNDERLINE_ADD = 2
def _get_styled(name, callname, placeholder=False,
trans_text=glocale.translation.sgettext, name_format=None):
"""
Return a StyledText object with the name formatted according to the
parameters:
@param callname: whether the callname should be used instead of the first
name (CALLNAME_REPLACE), underlined within the first name
(CALLNAME_UNDERLINE_ADD) or not used at all (CALLNAME_DONTUSE).
@param placeholder: whether a series of underscores should be inserted as a
placeholder if first name or surname are missing.
@param trans_text: allow deferred translation of strings
@type trans_text: a Locale sgettext instance
trans_text is a defined keyword (see po/update_po.py, po/genpot.sh)
:param name_format: optional format to control display of person's name
:type name_format: None or int
"""
# Make a copy of the name object so we don't mess around with the real
# data.
n = Name(source=name)
# Insert placeholders.
if placeholder:
if not n.first_name:
n.first_name = "____________"
if not n.surname:
n.surname = "____________"
if n.call:
if callname == CALLNAME_REPLACE:
# Replace first name with call name.
n.first_name = n.call
elif callname == CALLNAME_UNDERLINE_ADD:
if n.call not in n.first_name:
# Add call name to first name.
# translators: used in French+Russian, ignore otherwise
n.first_name = trans_text('"%(callname)s" (%(firstname)s)') % {
'callname': n.call,
'firstname': n.first_name }
real_format = name_displayer.get_default_format()
if name_format is not None:
name_displayer.set_default_format(name_format)
text = name_displayer.display_name(n)
name_displayer.set_default_format(real_format)
tags = []
if n.call:
if callname == CALLNAME_UNDERLINE_ADD:
# "name" in next line is on purpose: only underline the call name
# if it was a part of the *original* first name
if n.call in name.first_name:
# Underline call name
callpos = text.find(n.call)
tags = [StyledTextTag(StyledTextTagType.UNDERLINE, True,
[(callpos, callpos + len(n.call))])]
return StyledText(text, tags)
def _get_styled_primary_name(person, callname, placeholder=False,
trans_text=glocale.translation.sgettext, name_format=None):
"""
Return a StyledText object with the person's name formatted according to
the parameters:
@param callname: whether the callname should be used instead of the first
name (CALLNAME_REPLACE), underlined within the first name
(CALLNAME_UNDERLINE_ADD) or not used at all (CALLNAME_DONTUSE).
@param placeholder: whether a series of underscores should be inserted as a
placeholder if first name or surname are missing.
@param trans_text: allow deferred translation of strings
@type trans_text: a Locale sgettext instance
trans_text is a defined keyword (see po/update_po.py, po/genpot.sh)
:param name_format: optional format to control display of person's name
:type name_format: None or int
"""
return _get_styled(person.get_primary_name(), callname,
trans_text=trans_text,
placeholder=placeholder, name_format=name_format)
|
gpl-2.0
| -3,168,658,341,514,602,000 | 41.002024 | 96 | 0.565521 | false |
diego-d5000/MisValesMd
|
env/lib/python2.7/site-packages/django/conf/locale/ca/formats.py
|
1
|
1049
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y \a \l\e\s G:i'
YEAR_MONTH_FORMAT = r'F \d\e\l Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y G:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
# '31/12/2009', '31/12/09'
'%d/%m/%Y', '%d/%m/%y'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S',
'%d/%m/%Y %H:%M:%S.%f',
'%d/%m/%Y %H:%M',
'%d/%m/%y %H:%M:%S',
'%d/%m/%y %H:%M:%S.%f',
'%d/%m/%y %H:%M',
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
mit
| -3,880,304,409,747,797,500 | 29.787879 | 77 | 0.590086 | false |
MaxTyutyunnikov/lino
|
lino/management/commands/show.py
|
1
|
1890
|
# -*- coding: UTF-8 -*-
## Copyright 2013 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
"""
"""
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db import models
class Command(BaseCommand):
help = __doc__
args = "action_spec [args ...]"
option_list = BaseCommand.option_list + (
make_option('--username', action='store',
dest='username', default='root',
help='The username to act as. Default is "root".'),
make_option('--language', action='store',
dest='language',
help="The language to use. Default is the site's default language."),
)
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError("I need at least one argument.")
#~ settings.SITE.startup()
spec = args[0]
username = options['username']
ses = settings.SITE.login(username)
#~ language = options['language']
#~ if language:
#~ ses.set_language(language)
ses.show(spec,language=options['language'])
|
gpl-3.0
| -5,115,309,132,442,897,000 | 33.660377 | 81 | 0.621164 | false |
botswana-harvard/getresults-label
|
getresults_label/classes/client_printer.py
|
1
|
1707
|
import socket
from ..exceptions import LabelPrinterError
from ..models import Client, LabelPrinter
from .printer import Printer
class ClientPrinter(Printer):
"""Sets up the printer by selecting an instance of the
Client model for a given IP address.
Args:
* client_addr: ip_address or hostname. client_addr will usually be
passed from the REQUEST object. (Default: localhost).
For example::
client_addr = client_addr or request.META.get('REMOTE_ADDR')"""
def __init__(self, client_addr, printer_name=None, cups_server_host=None):
self.client = None
self.is_default_printer = False
ip_address = socket.gethostbyname(client_addr)
try:
self.client = Client.objects.get(ip=ip_address)
printer_name = self.client.label_printer.cups_printer_name
cups_server_host = self.client.label_printer.cups_server_hostname
self.is_default_printer = self.client.label_printer.default
except Client.DoesNotExist:
try:
label_printer = LabelPrinter.objects.get(default=True)
printer_name = label_printer.cups_printer_name
cups_server_host = label_printer.cups_server_hostname
self.is_default_printer = True
except LabelPrinter.DoesNotExist:
raise LabelPrinterError('Failed to select a printer. Client {} is not associated '
'with a Label Printer and no Label Printer has been set '
'as the default.'.format(ip_address))
super(ClientPrinter, self).__init__(printer_name, cups_server_host)
|
gpl-2.0
| 5,627,137,073,316,461,000 | 43.921053 | 98 | 0.629174 | false |
kernsuite-debian/obit
|
bin/obitinclude.py
|
1
|
1077
|
#!python
# Script to run gcc preprocessor on files whose name ends in "Def.h"
# or echo the file contents otherwise
# File name is only argument
import os, sys
# Routine to recursively read include
def parseInclude(lline):
fname = lline.split()[1].replace('"','')
infil = os.getenv("OBIT")+"include/"+fname
jnput = open(infil)
line = " "
while (line):
line = jnput.readline() # read next line
if not line: # EOF?
break
# Def include to be expanded?
if line.startswith('#include "Obit') and (line.find('Def.h"')>=0):
parseInclude(line)
continue
x=sys.stdout.write(line)
# end parseInclude
# Get filename argument
infile=sys.argv[1]
# read file recursively including
input = open(infile)
line = " "
while (line):
line = input.readline() # read next line
if not line: # EOF?
break
# Def include to be expanded?
if line.startswith('#include "Obit') and (line.find('Def.h"')>=0):
parseInclude(line)
continue
x=sys.stdout.write(line)
|
gpl-2.0
| -7,073,445,046,572,088,000 | 27.342105 | 74 | 0.615599 | false |
nvictus/23andCSB
|
apps/api/views.py
|
1
|
3339
|
from django.http import HttpResponse
from django.shortcuts import redirect
import json
import logging
log = logging.getLogger("apidemo")
# import our OAuth client
from . import client
# view decorator
def requires_login(view_fcn):
def wrapper(request, *args, **kwargs):
if client.OAUTH_KEY in request.session.keys():
c = client.OAuthClient(request.session[client.OAUTH_KEY])
return view_fcn(request, c, *args, **kwargs)
else:
return redirect(client.LOGIN_URL)
return wrapper
# views
def get_resource(request, resource):
c = client.OAuthClient(request.session[client.OAUTH_KEY])
if 'demo' in request.GET and request.GET['demo']=='true':
return HttpResponse(c._get_resource_demo(resource))
user = c.get_user()
d = json.JSONDecoder()
e = json.JSONEncoder()
data = {}
for profile in user['profiles']:
profile_id = profile['id']
if resource == 'names':
data[profile_id] = d.decode(c.get_names())
elif resource == 'risks':
data[profile_id] = d.decode(c.get_risks(profile_id))
elif resource == 'carriers':
data[profile_id] = d.decode(c.get_carriers(profile_id))
elif resource == 'drug_responses':
data[profile_id] = d.decode(c.get_drug_responses(profile_id))
elif resource == 'traits':
data[profile_id] = d.decode(c.get_traits(profile_id))
elif resource == 'neanderthal':
data[profile_id] = d.decode(c.get_neanderthal(profile_id))
else:
raise Exception("invalid API resource requested")
data = e.encode(data)
return HttpResponse(data, mimetype="application/json")
def callback(request):
"""
The 23andMe api calls this view with a ?code=xxxxxx paramter. This
parameter is a short lived authorization code that you must use to get a an
OAuth authorization token which you can use to retrieve user data. This view
uses database backed session to store the auth token instead of cookies in
order to protect the token from leaving the server as it allows access to
significant sensitive user information.
"""
# create a fresh client
c = client.OAuthClient()
# get code returned from 23andMe API after user login
code = request.GET["code"]
log.debug("code: %s" % code)
# request token from 23andMe
log.debug("fetching token...")
(access_token, refresh_token) = c.get_token(code)
log.debug("access_token: %s refresh_token: %s" % (access_token, refresh_token))
log.debug("refreshing token...")
(access_token, refresh_token) = c.refresh_token(refresh_token)
log.debug("access_token: %s refresh_token: %s" % (access_token, refresh_token))
# persist in the browser session database (not cookie)
request.session[client.OAUTH_KEY] = access_token
# authorize the API client
c = client.OAuthClient(request.session[client.OAUTH_KEY])
# store first and last name
names_json = c.get_names()
names = json.loads(names_json)
request.session["name"] = "%s %s" % (names['first_name'], names['last_name'])
return redirect("/csb/")
def logout(request):
# clear browser session on logout
log.debug("logging out...")
request.session.clear()
return redirect("/")
|
bsd-2-clause
| 5,066,159,913,403,948,000 | 33.42268 | 83 | 0.65289 | false |
pyload/pyload
|
src/pyload/plugins/base/xfs_account.py
|
1
|
7673
|
# -*- coding: utf-8 -*-
import locale
import re
import time
import urllib.parse
from pyload.core.utils import parse
from ..helpers import parse_html_form, set_cookie, search_pattern
from .account import BaseAccount
class XFSAccount(BaseAccount):
__name__ = "XFSAccount"
__type__ = "account"
__version__ = "0.60"
__status__ = "stable"
__config__ = [
("enabled", "bool", "Activated", True),
("multi", "bool", "Multi-hoster", True),
("multi_mode", "all;listed;unlisted", "Hosters to use", "all"),
("multi_list", "str", "Hoster list (comma separated)", ""),
("multi_interval", "int", "Reload interval in hours", 12),
]
__description__ = """XFileSharing account plugin"""
__license__ = "GPLv3"
__authors__ = [
("zoidberg", "zoidberg@mujmail.cz"),
("Walter Purcaro", "vuolter@gmail.com"),
]
PLUGIN_DOMAIN = None
PLUGIN_URL = None
LOGIN_URL = None
COOKIES = True
PREMIUM_PATTERN = r"\(Premium only\)"
VALID_UNTIL_PATTERN = r"Premium.[Aa]ccount expire:.*?(\d{1,2} [\w^_]+ \d{4})"
TRAFFIC_LEFT_PATTERN = r"Traffic available today:.*?<b>\s*(?P<S>[\d.,]+|[Uu]nlimited)\s*(?:(?P<U>[\w^_]+)\s*)?</b>"
TRAFFIC_LEFT_UNIT = "MiB" #: Used only if no group <U> was found
LEECH_TRAFFIC_PATTERN = r"Leech Traffic left:<b>.*?(?P<S>[\d.,]+|[Uu]nlimited)\s*(?:(?P<U>[\w^_]+)\s*)?</b>"
LEECH_TRAFFIC_UNIT = "MiB" #: Used only if no group <U> was found
LOGIN_FAIL_PATTERN = r"Incorrect Login or Password|account was banned|Error<"
LOGIN_BAN_PATTERN = r">(Your IP.+?)<a"
LOGIN_SKIP_PATTERN = r"op=logout"
def _set_xfs_cookie(self):
cookie = (self.PLUGIN_DOMAIN, "lang", "english")
if isinstance(self.COOKIES, list) and cookie not in self.COOKIES:
self.COOKIES.insert(cookie)
else:
set_cookie(self.req.cj, *cookie)
def setup(self):
if not self.PLUGIN_DOMAIN:
self.fail_login(self._("Missing PLUGIN DOMAIN"))
if not self.PLUGIN_URL:
self.PLUGIN_URL = "http://www.{}/".format(self.PLUGIN_DOMAIN)
if not self.LOGIN_URL:
self.LOGIN_URL = urllib.parse.urljoin(self.PLUGIN_URL, "login.html")
if self.COOKIES:
self._set_xfs_cookie()
# TODO: Implement default grab_hosters routine
# def grab_hosters(self, user, password, data):
# pass
def grab_info(self, user, password, data):
validuntil = None
trafficleft = None
leechtraffic = None
premium = None
if not self.PLUGIN_URL: # TODO: Remove in 0.6.x
return
self.data = self.load(
self.PLUGIN_URL, get={"op": "my_account"}, cookies=self.COOKIES
)
premium = True if search_pattern(self.PREMIUM_PATTERN, self.data) is not None else False
m = search_pattern(self.VALID_UNTIL_PATTERN, self.data)
if m is not None:
expiredate = m.group(1).strip()
self.log_debug("Expire date: " + expiredate)
previous_locale = locale.getlocale(locale.LC_TIME)
try:
locale.setlocale(locale.LC_TIME, "en_US.UTF-8")
validuntil = time.mktime(time.strptime(expiredate, "%d %B %Y"))
except Exception as exc:
self.log_error(exc)
else:
self.log_debug(f"Valid until: {validuntil}")
if validuntil > time.mktime(time.gmtime()):
premium = True
trafficleft = -1
else:
premium = False
validuntil = None #: Registered account type (not premium)
finally:
locale.setlocale(locale.LC_TIME, previous_locale)
else:
self.log_debug("VALID UNTIL PATTERN not found")
m = search_pattern(self.TRAFFIC_LEFT_PATTERN, self.data)
if m is not None:
try:
traffic = m.groupdict()
size = traffic["S"]
if "nlimited" in size:
trafficleft = -1
if validuntil is None:
validuntil = -1
else:
if "U" in traffic:
unit = traffic["U"]
elif isinstance(self.TRAFFIC_LEFT_UNIT, str):
unit = self.TRAFFIC_LEFT_UNIT
else:
unit = ""
trafficleft = self.parse_traffic(size, unit)
except Exception as exc:
self.log_error(exc)
else:
self.log_debug("TRAFFIC LEFT PATTERN not found")
leech = [
m.groupdict() for m in re.finditer(self.LEECH_TRAFFIC_PATTERN, self.data)
]
if leech:
leechtraffic = 0
try:
for traffic in leech:
size = traffic["S"]
if "nlimited" in size:
leechtraffic = -1
if validuntil is None:
validuntil = -1
break
else:
if "U" in traffic:
unit = traffic["U"]
elif isinstance(self.LEECH_TRAFFIC_UNIT, str):
unit = self.LEECH_TRAFFIC_UNIT
else:
unit = ""
leechtraffic += self.parse_traffic(size + unit)
except Exception as exc:
self.log_error(exc)
else:
self.log_debug("LEECH TRAFFIC PATTERN not found")
return {
"validuntil": validuntil,
"trafficleft": trafficleft,
"leechtraffic": leechtraffic,
"premium": premium,
}
def signin(self, user, password, data):
self.data = self.load(self.LOGIN_URL, cookies=self.COOKIES)
if search_pattern(self.LOGIN_SKIP_PATTERN, self.data):
self.skip_login()
action, inputs = parse_html_form('name="FL"', self.data)
if not inputs:
inputs = {"op": "login", "redirect": self.PLUGIN_URL}
inputs.update({"login": user, "password": password})
if action:
url = urllib.parse.urljoin("http://", action)
else:
url = self.LOGIN_URL
self.data = self.load(url, post=inputs, cookies=self.COOKIES)
self.check_errors()
def check_errors(self):
self.log_info(self._("Checking for link errors..."))
if not self.data:
self.log_warning(self._("No data to check"))
return
m = search_pattern(self.LOGIN_BAN_PATTERN, self.data)
if m is not None:
try:
errmsg = m.group(1)
except (AttributeError, IndexError):
errmsg = m.group(0)
finally:
errmsg = re.sub(r"<.*?>", " ", errmsg.strip())
new_timeout = parse.seconds(errmsg)
if new_timeout > self.timeout:
self.timeout = new_timeout
self.fail_login(errmsg)
m = search_pattern(self.LOGIN_FAIL_PATTERN, self.data)
if m is not None:
try:
errmsg = m.group(1)
except (AttributeError, IndexError):
errmsg = m.group(0)
finally:
errmsg = re.sub(r"<.*?>", " ", errmsg.strip())
self.timeout = self.LOGIN_TIMEOUT
self.fail_login(errmsg)
self.log_info(self._("No errors found"))
|
agpl-3.0
| 3,073,557,959,690,170,000 | 30.191057 | 119 | 0.512707 | false |
brianrogers/TCEmailConnector
|
main.py
|
1
|
1540
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
class MainHandler(webapp2.RequestHandler):
def get(self):
output = ''
output += 'Send an email to tc@tcemailconnector.appspotmail.com with a statement in the body to add it to the Pantry'
output += '<br>'
output += '<br>'
output += 'example statement content:'
output += '<br>'
output += '<br>'
output += 'email:user@example.com'
output += '<br>'
output += 'fname:Joe'
output += '<br>'
output += 'lname:User'
output += '<br>'
output += 'verb:completed'
output += '<br>'
output += 'objectid:http://dev.cloud.scorm.com/testing/102'
output += '<br>'
output += 'objectname:A Test Course'
output += '<br>'
output += 'objectdesc:This is the course description'
self.response.write(output)
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
|
mit
| -7,863,133,173,778,177,000 | 33.222222 | 125 | 0.632468 | false |
jakobkolb/MayaSim
|
mayasim/model/ModelCore.py
|
1
|
66303
|
from __future__ import print_function
import datetime
import operator
import os
import sys
import traceback
import warnings
from itertools import compress
import networkx as nx
import numpy as np
import pandas
import pkg_resources
import scipy.ndimage as ndimage
import scipy.sparse as sparse
try:
import cPickle as pkl
except ImportError:
import pickle as pkl
if __name__ == "__main__":
from ModelParameters import ModelParameters as Parameters
from f90routines import f90routines
else:
from .f90routines import f90routines
from .ModelParameters import ModelParameters as Parameters
class ModelCore(Parameters):
def __init__(self,
n=30,
output_data_location=None,
debug=False,
output_trajectory=True,
**kwargs):
"""
Instance of the MayaSim model.
Parameters
----------
n: int
number of settlements to initialize,
output_data_location: path_like
string stating the folder path to which the output
files will be writen,
debug: bool
switch for debugging output from model,
output_trajectory: bool
switch for output of trajectory data,
output_settlement_data: bool
switch for output of settlement data,
output_geographic_data: bool
switch for output of geographic data.
"""
# Input/Output settings:
# Set path to static input files
input_data_location = pkg_resources. \
resource_filename('mayasim', 'input_data/')
# Debugging settings
self.debug = debug
# In debug mode, allways print stack for warnings and errors.
def warn_with_traceback(message,
category,
filename,
lineno,
file=None,
line=None):
log = file if hasattr(file, 'write') else sys.stderr
traceback.print_stack(file=log)
log.write(
warnings.formatwarning(message, category, filename, lineno,
line))
if self.debug:
warnings.showwarning = warn_with_traceback
# *******************************************************************
# MODEL PARAMETERS (to be varied)
# *******************************************************************
self.output_trajectory = output_trajectory
# Settlement and geographic data will be written to files in each time step,
# Trajectory data will be kept in one data structure to be read out, when
# the model run finished.
if output_data_location != 0:
# remove file ending
self.output_data_location = output_data_location.rsplit('.', 1)[0]
# create callable output paths
self.settlement_output_path = \
lambda i: self.output_data_location + \
f'settlement_data_{i:03d}.pkl'
self.geographic_output_path = \
lambda i: self.output_data_location + \
f'geographic_data_{i:03d}.pkl'
# set switches for output generation
self.output_geographic_data = True
self.output_settlement_data = True
else:
self.output_geographic_data = False
self.output_settlement_data = False
self.trajectory = []
self.traders_trajectory = []
# *******************************************************************
# MODEL DATA SOURCES
# *******************************************************************
# documentation for TEMPERATURE and PRECIPITATION data can be found
# here: http://www.worldclim.org/formats
# apparently temperature data is given in x*10 format to allow for
# smaller file sizes.
# original version of mayasim divides temperature by 12 though
self.temp = np.load(input_data_location +
'0_RES_432x400_temp.npy') / 12.
# precipitation in mm or liters per square meter
# (comparing the numbers to numbers from Wikipedia suggests
# that it is given per year)
self.precip = np.load(input_data_location + '0_RES_432x400_precip.npy')
# in meters above sea level
self.elev = np.load(input_data_location + '0_RES_432x400_elev.npy')
self.slope = np.load(input_data_location + '0_RES_432x400_slope.npy')
# documentation for SOIL PRODUCTIVITY is given at:
# http://www.fao.org/geonetwork/srv/en/
# main.home?uuid=f7a2b3c0-bdbf-11db-a0f6-000d939bc5d8
# The soil production index considers the suitability
# of the best adapted crop to each soils
# condition in an area and makes a weighted average for
# all soils present in a pixel based
# on the formula: 0.9 * VS + 0.6 * S + 0.3 * MS + 0 * NS.
# Values range from 0 (bad) to 6 (good)
self.soilprod = np.load(input_data_location + '0_RES_432x400_soil.npy')
# it also sets soil productivity to 1.5 where the elevation is <= 1
# self.soilprod[self.elev <= 1] = 1.5
# complains because there is nans in elev
for ind, x in np.ndenumerate(self.elev):
if not np.isnan(x):
if x <= 1.:
self.soilprod[ind] = 1.5
# smoothen soil productivity dataset
self.soilprod = ndimage.gaussian_filter(self.soilprod,
sigma=(2, 2),
order=0)
# and set to zero for non land cells
self.soilprod[np.isnan(self.elev)] = 0
# *******************************************************************
# MODEL MAP INITIALIZATION
# *******************************************************************
# dimensions of the map
self.rows, self.columns = self.precip.shape
self.height, self.width = 914., 840. # height and width in km
self.pixel_dim = self.width / self.columns
self.cell_width = self.width / self.columns
self.cell_height = self.height / self.rows
self.land_patches = np.asarray(np.where(np.isfinite(self.elev)))
self.number_of_land_patches = self.land_patches.shape[1]
# lengh unit - total map is about 500 km wide
self.area = 516484. / len(self.land_patches[0])
self.elev[:, 0] = np.inf
self.elev[:, -1] = np.inf
self.elev[0, :] = np.inf
self.elev[-1, :] = np.inf
# create a list of the index values i = (x, y) of the land
# patches with finite elevation h
self.list_of_land_patches = [
i for i, h in np.ndenumerate(self.elev)
if np.isfinite(self.elev[i])
]
# initialize soil degradation and population
# gradient (influencing the forest)
# *******************************************************************
# INITIALIZE ECOSYSTEM
# *******************************************************************
# Soil (influencing primary production and agricultural productivity)
self.soil_deg = np.zeros((self.rows, self.columns))
# Forest
self.forest_state = np.ones((self.rows, self.columns), dtype=int)
self.forest_state[np.isnan(self.elev)] = 0
self.forest_memory = np.zeros((self.rows, self.columns), dtype=int)
self.cleared_land_neighbours = np.zeros((self.rows, self.columns),
dtype=int)
# The forest has three states: 3=climax forest,
# 2=secondary regrowth, 1=cleared land.
for i in self.list_of_land_patches:
self.forest_state[i] = 3
# Variables describing total amount of water and water flow
self.water = np.zeros((self.rows, self.columns))
self.flow = np.zeros((self.rows, self.columns))
self.spaciotemporal_precipitation = np.zeros((self.rows, self.columns))
# initialize the trajectories of the water drops
self.x = np.zeros((self.rows, self.columns), dtype="int")
self.y = np.zeros((self.rows, self.columns), dtype="int")
# define relative coordinates of the neighbourhood of a cell
self.neighbourhood = [(i, j) for i in [-1, 0, 1] for j in [-1, 0, 1]]
self.f90neighbourhood = np.asarray(self.neighbourhood).T
# *******************************************************************
# INITIALIZE SOCIETY
# *******************************************************************
# Population gradient (influencing the forest)
self.pop_gradient = np.zeros((self.rows, self.columns))
self.number_settlements = n
# distribute specified number of settlements on the map
self.settlement_positions = self.land_patches[:,
np.random.choice(
len(self.
land_patches[1]),
n).astype('int')]
self.age = [0] * n
# demographic variables
self.birth_rate = [self.birth_rate_parameter] * n
self.death_rate = [0.1 + 0.05 * r for r in list(np.random.random(n))]
self.population = list(
np.random.randint(self.min_init_inhabitants,
self.max_init_inhabitants, n).astype(float))
self.mig_rate = [0.] * n
self.out_mig = [0] * n
self.migrants = [0] * n
self.pioneer_set = []
self.failed = 0
# index list for populated and abandoned cities
# used until removal of dead cities is implemented.
self.populated_cities = range(n)
self.dead_cities = []
# agricultural influence
self.number_cells_in_influence = [0] * n
self.area_of_influence = [0.] * n
self.coordinates = np.indices((self.rows, self.columns))
self.cells_in_influence = [None] * n # will be a list of arrays
self.cropped_cells = [None] * n
# for now, cropped cells are only the city positions.
# first cropped cells are added at the first call of
# get_cropped_cells()
for city in self.populated_cities:
self.cropped_cells[city] = [[self.settlement_positions[0, city]],
[self.settlement_positions[1, city]]]
# print(self.cropped_cells[1])
self.occupied_cells = np.zeros((self.rows, self.columns))
self.number_cropped_cells = [0] * n
self.crop_yield = [0.] * n
self.eco_benefit = [0.] * n
self.available = 0
# details of income from ecosystems services
self.s_es_ag = [0.] * n
self.s_es_wf = [0.] * n
self.s_es_fs = [0.] * n
self.s_es_sp = [0.] * n
self.s_es_pg = [0.] * n
self.es_ag = np.zeros((self.rows, self.columns), dtype=float)
self.es_wf = np.zeros((self.rows, self.columns), dtype=float)
self.es_fs = np.zeros((self.rows, self.columns), dtype=float)
self.es_sp = np.zeros((self.rows, self.columns), dtype=float)
self.es_pg = np.zeros((self.rows, self.columns), dtype=float)
# Trade Variables
self.adjacency = np.zeros((n, n))
self.rank = [0] * n
self.degree = [0] * n
self.comp_size = [0] * n
self.centrality = [0] * n
self.trade_income = [0] * n
self.max_cluster_size = 0
# total real income per capita
self.real_income_pc = [0] * n
def _get_run_variables(self):
"""
Saves all variables and values of the class instance 'self'
in a dictionary file at the location given by 'path'
Parameters:
-----------
self: class instance
class instance whose variables are saved
"""
dictionary = {
attr: getattr(self, attr)
for attr in dir(self)
if not attr.startswith('__') and not callable(getattr(self, attr))
}
return dictionary
def update_precipitation(self, t):
"""
Modulates the initial precip dataset with a 24 timestep period.
Returns a field of rainfall values for each cell.
If veg_rainfall > 0, cleared_land_neighbours decreases rain.
TO DO: The original Model increases specialization every time
rainfall decreases, assuming that trade gets more important to
compensate for agriculture decline
"""
if self.precipitation_modulation:
self.spaciotemporal_precipitation = \
self.precip * (
1 + self.precipitation_amplitude *
self.precipitation_variation[
(np.ceil(t / self.climate_var) % 8).astype(int)]) \
- self.veg_rainfall * self.cleared_land_neighbours
else:
self.spaciotemporal_precipitation = \
self.precip * (1 -
self.veg_rainfall * self.cleared_land_neighbours)
# check if system time is in drought period
drought = False
for drought_time in self.drought_times:
if drought_time[0] < t <= drought_time[1]:
drought = True
# if so, decrease precipitation by factor percentage given by
# drought severity
if drought:
self.spaciotemporal_precipitation *= \
(1. - self.drought_severity / 100.)
def get_waterflow(self):
"""
waterflow: takes rain as an argument, uses elev, returns
water flow distribution
the precip percent parameter that reduces the amount of raindrops that
have to be moved.
Thereby inceases performance.
f90waterflow takes as arguments:
list of coordinates of land cells (2xN_land)
elevation map in (height x width)
rain_volume per cell map in (height x width)
rain_volume and elevation must have same units: height per cell
neighbourhood offsets
height and width of map as integers,
Number of land cells, N_land
"""
# convert precipitation from mm to meters
# NOTE: I think, this should be 1e-3
# to convert from mm to meters though...
# but 1e-5 is what they do in the original version.
rain_volume = np.nan_to_num(self.spaciotemporal_precipitation * 1e-5)
max_x, max_y = self.rows, self.columns
err, self.flow, self.water = \
f90routines.f90waterflow(self.land_patches,
self.elev,
rain_volume,
self.f90neighbourhood,
max_x,
max_y,
self.number_of_land_patches)
return self.water, self.flow
def forest_evolve(self, npp):
npp_mean = np.nanmean(npp)
# Iterate over all cells repeatedly and regenerate or degenerate
for repeat in range(4):
for i in self.list_of_land_patches:
if not np.isnan(self.elev[i]):
# Forest regenerates faster [slower] (linearly),
# if net primary productivity on the patch
# is above [below] average.
threshold = npp_mean / npp[i]
# Degradation:
# Decrement with probability 0.003
# if there is a settlement around,
# degrade with higher probability
probdec = self.natprobdec * (2 * self.pop_gradient[i] + 1)
if np.random.random() <= probdec:
if self.forest_state[i] == 3:
self.forest_state[i] = 2
self.forest_memory[i] = self.state_change_s2
elif self.forest_state[i] == 2:
self.forest_state[i] = 1
self.forest_memory[i] = 0
# Regeneration:"
# recover if tree = 1 and memory > threshold 1
if (self.forest_state[i] == 1 and self.forest_memory[i] >
self.state_change_s2 * threshold):
self.forest_state[i] = 2
self.forest_memory[i] = self.state_change_s2
# recover if tree = 2 and memory > threshold 2
# and certain number of neighbours are
# climax forest as well
if (self.forest_state[i] == 2 and self.forest_memory[i] >
self.state_change_s3 * threshold):
state_3_neighbours = \
np.sum(self.forest_state[i[0] - 1:i[0] + 2,
i[1] - 1:i[1] + 2] == 3)
if state_3_neighbours > \
self.min_number_of_s3_neighbours:
self.forest_state[i] = 3
# finally, increase memory by one
self.forest_memory[i] += 1
# calculate cleared land neighbours for output:
if self.veg_rainfall > 0:
for i in self.list_of_land_patches:
self.cleared_land_neighbours[i] = \
np.sum(self.forest_state[i[0] - 1:i[0] + 2,
i[1] - 1:i[1] + 2] == 1)
assert not np.any(self.forest_state[~np.isnan(self.elev)] < 1), \
'forest state is smaller than 1 somewhere'
return
def net_primary_prod(self):
"""
net_primaty_prod is the minimum of a quantity
derived from local temperature and rain
Why is it rain and not 'surface water'
according to the waterflow model?
"""
# EQUATION ############################################################
npp = 3000 \
* np.minimum(1 - np.exp(-6.64e-4
* self.spaciotemporal_precipitation),
1. / (1 + np.exp(1.315 - (0.119 * self.temp))))
# EQUATION ############################################################
return npp
def get_ag(self, npp, wf):
"""
agricultural productivit is calculated via a
linear additive model from
net primary productivity, soil productivity,
slope, waterflow and soil degradation
of each patch.
"""
# EQUATION ############################################################
return self.a_npp * npp + self.a_sp * self.soilprod \
- self.a_s * self.slope - self.a_wf * wf - self.soil_deg
# EQUATION ############################################################
def get_ecoserv(self, ag, wf):
"""
Ecosystem Services are calculated via a linear
additive model from agricultural productivity (ag),
waterflow through the cell (wf) and forest
state on the cell (forest) \in [1,3],
The recent version of mayasim limits value of
ecosystem services to 1 < ecoserv < 250, it also proposes
to include population density (pop_gradient) and precipitation (rain)
"""
# EQUATION ###########################################################
if not self.better_ess:
self.es_ag = self.e_ag * ag
self.es_wf = self.e_wf * wf
self.es_fs = self.e_f * (self.forest_state - 1.)
self.es_sp = self.e_r * self.spaciotemporal_precipitation
self.es_pg = self.e_deg * self.pop_gradient
else:
# change to use forest as proxy for income from agricultural
# productivity. Multiply by 2 to get same per cell levels as
# before
self.es_ag = np.zeros(np.shape(ag))
self.es_wf = self.e_wf * wf
self.es_fs = 2. * self.e_ag * (self.forest_state - 1.) * ag
self.es_sp = self.e_r * self.spaciotemporal_precipitation
self.es_pg = self.e_deg * self.pop_gradient
return (self.es_ag + self.es_wf + self.es_fs + self.es_sp - self.es_pg)
# EQUATION ###########################################################
######################################################################
# The Society
######################################################################
def benefit_cost(self, ag_in):
# Benefit cost assessment
return (self.max_yield *
(1 - self.origin_shift * np.exp(-self.slope_yield * ag_in)))
def get_cells_in_influence(self):
"""
creates a list of cells for each city that are under its influence.
these are the cells that are closer than population^0.8/60 (which is
not explained any further... change denominator to 80 and max value to
30 from eyeballing the results
"""
# EQUATION ####################################################################
self.area_of_influence = [(x**0.8) / 60. for x in self.population]
self.area_of_influence = [
value if value < 40. else 40. for value in self.area_of_influence
]
# EQUATION ####################################################################
for city in self.populated_cities:
distance = np.sqrt((self.cell_width *
(self.settlement_positions[0][city] -
self.coordinates[0]))**2 +
(self.cell_height *
(self.settlement_positions[1][city] -
self.coordinates[1]))**2)
stencil = distance <= self.area_of_influence[city]
self.cells_in_influence[city] = self.coordinates[:, stencil]
self.number_cells_in_influence = [
len(x[0]) for x in self.cells_in_influence
]
return
def get_cropped_cells(self, bca):
"""
Updates the cropped cells for each city with positive population.
Calculates the utility for each cell (depending on distance from
the respective city) If population per cropped cell is lower then
min_people_per_cropped_cell, cells are abandoned.
Cells with negative utility are also abandoned.
If population per cropped cell is higher than
max_people_per_cropped_cell, new cells are cropped.
Newly cropped cells are chosen such that they have highest utility
"""
abandoned = 0
sown = 0
# for each settlement: how many cells are currently cropped ?
self.number_cropped_cells = np.array(
[len(x[0]) for x in self.cropped_cells])
# agricultural population density (people per cropped land)
# determines the number of cells that can be cropped.
ag_pop_density = [
p / (self.number_cropped_cells[c] * self.area)
if self.number_cropped_cells[c] > 0 else 0.
for c, p in enumerate(self.population)
]
# occupied_cells is a mask of all occupied cells calculated as the
# unification of the cropped cells of all settlements.
if len(self.cropped_cells) > 0:
occup = np.concatenate(self.cropped_cells, axis=1).astype('int')
if False:
print('population of cities without agriculture:')
print(
np.array(self.population)[self.number_cropped_cells == 0])
print('pt. migration from cities without agriculture:')
print(np.array(self.out_mig)[self.number_cropped_cells == 0])
print('out migration from cities without agriculture:')
print(np.array(self.migrants)[self.number_cropped_cells == 0])
for index in range(len(occup[0])):
self.occupied_cells[occup[0, index], occup[1, index]] = 1
# the age of settlements is increased here.
self.age = [x + 1 for x in self.age]
# for each settlement: which cells to crop ?
# calculate utility first! This can be accelerated, if calculations
# are only done in 40 km radius.
for city in self.populated_cities:
cells = list(
zip(self.cells_in_influence[city][0],
self.cells_in_influence[city][1]))
# EQUATION ########################################################
utility = [
bca[x, y] - self.estab_cost - (self.ag_travel_cost * np.sqrt(
(self.cell_width * (self.settlement_positions[0][city] -
self.coordinates[0][x, y]))**2 +
(self.cell_height * (self.settlement_positions[1][city] -
self.coordinates[1][x, y]))**2)) /
np.sqrt(self.population[city]) for (x, y) in cells
]
# EQUATION ########################################################
available = [
True if self.occupied_cells[x, y] == 0 else False
for (x, y) in cells
]
# jointly sort utilities, availability and cells such that cells
# with highest utility are first.
sorted_utility, sorted_available, sorted_cells = \
list(zip(*sorted(list(zip(utility, available, cells)),
reverse=True)))
# of these sorted lists, sort filter only available cells
available_util = list(
compress(list(sorted_utility), list(sorted_available)))
available_cells = list(
compress(list(sorted_cells), list(sorted_available)))
# save local copy of all cropped cells
cropped_cells = list(zip(*self.cropped_cells[city]))
# select utilities for these cropped cells
cropped_utils = [
utility[cells.index(cell)] if cell in cells else -1
for cell in cropped_cells
]
# sort utilitites and cropped cells to lowest utilities first
city_has_crops = True if len(cropped_cells) > 0 else False
if city_has_crops:
occupied_util, occupied_cells = \
zip(*sorted(list(zip(cropped_utils, cropped_cells))))
# 1.) include new cells if population exceeds a threshold
# calculate number of new cells to crop
number_of_new_cells = np.floor(ag_pop_density[city]
/ self.max_people_per_cropped_cell) \
.astype('int')
# and crop them by selecting cells with positive utility from the
# beginning of the list
for n in range(min([number_of_new_cells, len(available_util)])):
if available_util[n] > 0:
self.occupied_cells[available_cells[n]] = 1
for dim in range(2):
self.cropped_cells[city][dim] \
.append(available_cells[n][dim])
if city_has_crops:
# 2.) abandon cells if population too low
# after cities age > 5 years
if (ag_pop_density[city] < self.min_people_per_cropped_cell
and self.age[city] > 5):
# There are some inconsistencies here. Cells are abandoned,
# if the 'people per cropped land' is lower then a
# threshold for 'people per cropped cells. Then the
# number of cells to abandon is calculated as 30/people
# per cropped land. Why?! (check the original version!)
number_of_lost_cells = np.ceil(
30 / ag_pop_density[city]).astype('int')
# TO DO: recycle utility and cell list to do this faster.
# therefore, filter cropped cells from utility list
# and delete last n cells.
for n in range(
min([number_of_lost_cells,
len(occupied_cells)])):
dropped_cell = occupied_cells[n]
self.occupied_cells[dropped_cell] = 0
for dim in range(2):
self.cropped_cells[city][dim] \
.remove(dropped_cell[dim])
abandoned += 1
# 3.) abandon cells with utility <= 0
# find cells that have negative utility and belong
# to city under consideration,
useless_cropped_cells = [
occupied_cells[i] for i in range(len(occupied_cells))
if occupied_util[i] < 0
and occupied_cells[i] in zip(*self.cropped_cells[city])
]
# and release them.
for useless_cropped_cell in useless_cropped_cells:
self.occupied_cells[useless_cropped_cell] = 0
for dim in range(2):
try:
self.cropped_cells[city][dim] \
.remove(useless_cropped_cell[dim])
except ValueError:
print('ERROR: Useless cell gone already')
abandoned += 1
# Finally, update list of lists containing cropped cells for each city
# with positive population.
self.number_cropped_cells = [
len(self.cropped_cells[city][0])
for city in range(len(self.population))
]
return abandoned, sown
def get_pop_mig(self):
# gives population and out-migration
# print("number of settlements", len(self.population))
# death rate correlates inversely with real income per capita
death_rate_diff = self.max_death_rate - self.min_death_rate
self.death_rate = [
-death_rate_diff * self.real_income_pc[i] + self.max_death_rate
for i in range(len(self.real_income_pc))
]
self.death_rate = list(
np.clip(self.death_rate, self.min_death_rate, self.max_death_rate))
# if population control,
# birth rate negatively correlates with population size
if self.population_control:
birth_rate_diff = self.max_birth_rate - self.min_birth_rate
self.birth_rate = [
-birth_rate_diff / 10000. * value +
self.shift if value > 5000 else self.birth_rate_parameter
for value in self.population
]
# population grows according to effective growth rate
self.population = [
int((1. + self.birth_rate[i] - self.death_rate[i]) * value)
for i, value in enumerate(self.population)
]
self.population = [
value if value > 0 else 0 for value in self.population
]
mig_rate_diffe = self.max_mig_rate - self.min_mig_rate
# outmigration rate also correlates
# inversely with real income per capita
self.mig_rate = [
-mig_rate_diffe * self.real_income_pc[i] + self.max_mig_rate
for i in range(len(self.real_income_pc))
]
self.mig_rate = list(
np.clip(self.mig_rate, self.min_mig_rate, self.max_mig_rate))
self.out_mig = [
int(self.mig_rate[i] * self.population[i])
for i in range(len(self.population))
]
self.out_mig = [value if value > 0 else 0 for value in self.out_mig]
return
# impact of sociosphere on ecosphere
def update_pop_gradient(self):
# pop gradient quantifies the disturbance of the forest by population
self.pop_gradient = np.zeros((self.rows, self.columns))
for city in self.populated_cities:
distance = np.sqrt(self.area * (
(self.settlement_positions[0][city] - self.coordinates[0])**2 +
(self.settlement_positions[1][city] - self.coordinates[1])**2))
# EQUATION ###################################################################
self.pop_gradient[self.cells_in_influence[city][0],
self.cells_in_influence[city][1]] += \
self.population[city] \
/ (300 * (1 + distance[self.cells_in_influence[city][0],
self.cells_in_influence[city][1]]))
# EQUATION ###################################################################
self.pop_gradient[self.pop_gradient > 15] = 15
def evolve_soil_deg(self):
# soil degrades for cropped cells
cropped = np.concatenate(self.cropped_cells, axis=1).astype('int')
self.soil_deg[cropped[0], cropped[1]] += self.deg_rate
self.soil_deg[self.forest_state == 3] -= self.reg_rate
self.soil_deg[self.soil_deg < 0] = 0
def get_rank(self):
# depending on population ranks are assigned
# attention: ranks are reverted with respect to Netlogo MayaSim !
# 1 => 3 ; 2 => 2 ; 3 => 1
self.rank = [
3
if value > self.thresh_rank_3 else 2 if value > self.thresh_rank_2
else 1 if value > self.thresh_rank_1 else 0
for index, value in enumerate(self.population)
]
return
@property
def build_routes(self):
adj = self.adjacency.copy()
adj[adj == -1] = 0
built_links = 0
lost_links = 0
g = nx.from_numpy_matrix(adj, create_using=nx.DiGraph())
self.degree = g.out_degree()
# cities with rank>0 are traders and establish links to neighbours
for city in self.populated_cities:
if self.degree[city] < self.rank[city]:
distances = \
(np.sqrt(self.area * (+ (self.settlement_positions[0][city]
- self.settlement_positions[0]) ** 2
+ (self.settlement_positions[1][city]
- self.settlement_positions[1]) ** 2
)))
if self.rank[city] == 3:
treshold = 31. * (
self.thresh_rank_3 / self.thresh_rank_3 * 0.5 + 1.)
elif self.rank[city] == 2:
treshold = 31. * (
self.thresh_rank_2 / self.thresh_rank_3 * 0.5 + 1.)
elif self.rank[city] == 1:
treshold = 31. * (
self.thresh_rank_1 / self.thresh_rank_3 * 0.5 + 1.)
else:
treshold = 0
# don't chose yourself as nearest neighbor
distances[city] = 2 * treshold
# collect close enough neighbors and omit those that are
# already connected.
a = distances <= treshold
b = self.adjacency[city] == 0
nearby = np.array(list(map(operator.and_, a, b)))
# if there are traders nearby,
# connect to the one with highest population
if sum(nearby) != 0:
try:
new_partner = np.nanargmax(self.population * nearby)
self.adjacency[city, new_partner] = 1
self.adjacency[new_partner, city] = -1
built_links += 1
except ValueError:
print('ERROR in new partner')
print(np.shape(self.population),
np.shape(self.settlement_positions[0]))
sys.exit(-1)
# cities who cant maintain their trade links, loose them:
elif self.degree[city] > self.rank[city]:
# get neighbors of node
neighbors = g.successors(city)
# find smallest of neighbors
smallest_neighbor = self.population.index(
min([self.population[nb] for nb in neighbors]))
# cut link with him
self.adjacency[city, smallest_neighbor] = 0
self.adjacency[smallest_neighbor, city] = 0
lost_links += 1
return (built_links, lost_links)
def get_comps(self):
# convert adjacency matrix to compressed sparse row format
adjacency_csr = sparse.csr_matrix(np.absolute(self.adjacency))
# extract data vector, row index vector and index pointer vector
a = adjacency_csr.data
# add one to make indexing compatible to fortran
# (where indices start counting with 1)
j_a = adjacency_csr.indices + 1
i_c = adjacency_csr.indptr + 1
# determine length of data vectors
l_a = np.shape(a)[0]
l_ic = np.shape(i_c)[0]
# if data vector is not empty, pass data to fortran routine.
# else, just fill the centrality vector with ones.
if l_a > 0:
tmp_comp_size, tmp_degree = \
f90routines.f90sparsecomponents(i_c, a, j_a,
self.number_settlements,
l_ic, l_a)
self.comp_size, self.degree = list(tmp_comp_size), list(tmp_degree)
elif l_a == 0:
self.comp_size, self.degree = [0] * (l_ic - 1), [0] * (l_ic - 1)
return
def get_centrality(self):
# convert adjacency matrix to compressed sparse row format
adjacency_csr = sparse.csr_matrix(np.absolute(self.adjacency))
# extract data vector, row index vector and index pointer vector
a = adjacency_csr.data
# add one to make indexing compatible to fortran
# (where indices start counting with 1)
j_a = adjacency_csr.indices + 1
i_c = adjacency_csr.indptr + 1
# determine length of data vectors
l_a = np.shape(a)[0]
l_ic = np.shape(i_c)[0]
# print('number of trade links:', sum(a) / 2)
# if data vector is not empty, pass data to fortran routine.
# else, just fill the centrality vector with ones.
if l_a > 0:
tmp_centrality = f90routines \
.f90sparsecentrality(i_c, a, j_a,
self.number_settlements,
l_ic, l_a)
self.centrality = list(tmp_centrality)
elif l_a == 0:
self.centrality = [1] * (l_ic - 1)
return
def get_crop_income(self, bca):
# agricultural benefit of cropping
for city in self.populated_cities:
crops = bca[self.cropped_cells[city][0], self.
cropped_cells[city][1]]
# EQUATION #
if self.crop_income_mode == "mean":
self.crop_yield[city] = self.r_bca_mean \
* np.nanmean(crops[crops > 0])
elif self.crop_income_mode == "sum":
self.crop_yield[city] = self.r_bca_sum \
* np.nansum(crops[crops > 0])
self.crop_yield = [
0 if np.isnan(self.crop_yield[index]) else self.crop_yield[index]
for index in range(len(self.crop_yield))
]
return
def get_eco_income(self, es):
# benefit from ecosystem services of cells in influence
# ##EQUATION###################################################################
for city in self.populated_cities:
if self.eco_income_mode == "mean":
self.eco_benefit[city] = self.r_es_mean \
* np.nanmean(es[self.cells_in_influence[city]])
elif self.eco_income_mode == "sum":
self.eco_benefit[city] = self.r_es_sum \
* np.nansum(es[self.cells_in_influence[city]])
self.s_es_ag[city] = self.r_es_sum \
* np.nansum(self.es_ag[self.cells_in_influence[city]])
self.s_es_wf[city] = self.r_es_sum \
* np.nansum(self.es_wf[self.cells_in_influence[city]])
self.s_es_fs[city] = self.r_es_sum \
* np.nansum(self.es_fs[self.cells_in_influence[city]])
self.s_es_sp[city] = self.r_es_sum \
* np.nansum(self.es_sp[self.cells_in_influence[city]])
self.s_es_pg[city] = self.r_es_sum \
* np.nansum(self.es_pg[self.cells_in_influence[city]])
try:
self.eco_benefit[self.population == 0] = 0
except IndexError:
self.print_variable_lengths()
# ##EQUATION###################################################################
return
def get_trade_income(self):
# ##EQUATION###################################################################
self.trade_income = [
1. / 30. * (1 + self.comp_size[i] / self.centrality[i])**0.9
for i in range(len(self.centrality))
]
self.trade_income = [
self.r_trade if value > 1 else 0 if
(value < 0 or self.degree[index] == 0) else self.r_trade * value
for index, value in enumerate(self.trade_income)
]
# ##EQUATION###################################################################
return
def get_real_income_pc(self):
# combine agricultural, ecosystem service and trade benefit
# EQUATION #
self.real_income_pc = [
(self.crop_yield[index] + self.eco_benefit[index] +
self.trade_income[index]) /
self.population[index] if value > 0 else 0
for index, value in enumerate(self.population)
]
return
def migration(self, es):
# if outmigration rate exceeds threshold, found new settlement
self.migrants = [0] * self.number_settlements
new_settlements = 0
vacant_lands = np.isfinite(es)
influenced_cells = np.concatenate(self.cells_in_influence, axis=1)
vacant_lands[influenced_cells[0], influenced_cells[1]] = 0
vacant_lands = np.asarray(np.where(vacant_lands == 1))
for city in self.populated_cities:
rd = np.random.rand()
if (self.out_mig[city] > 400 and len(vacant_lands[0]) > 0
and np.random.rand() <= 0.5):
mig_pop = self.out_mig[city]
self.migrants[city] = mig_pop
self.population[city] -= mig_pop
self.pioneer_set = \
vacant_lands[:, np.random.choice(len(vacant_lands[0]), 75)]
travel_cost = np.sqrt(
self.area *
((self.settlement_positions[0][city] - self.coordinates[0])
**2 + (self.settlement_positions[1][city] -
self.coordinates[1])**2))
utility = self.mig_ES_pref * es \
+ self.mig_TC_pref * travel_cost
utofpio = utility[self.pioneer_set[0], self.pioneer_set[1]]
new_loc = self.pioneer_set[:, np.nanargmax(utofpio)]
neighbours = \
(np.sqrt(self.area * ((new_loc[0]
- self.settlement_positions[0]) ** 2 +
(new_loc[1]
- self.settlement_positions[1]) ** 2
))) <= 7.5
summe = np.sum(neighbours)
if summe == 0:
self.spawn_city(new_loc[0], new_loc[1], mig_pop)
index = (vacant_lands[0, :] == new_loc[0]) \
& (vacant_lands[1, :] == new_loc[1])
np.delete(vacant_lands, int(np.where(index)[0]), 1)
new_settlements += 1
return new_settlements
def kill_cities(self):
# BUG: cities can be added twice,
# if they have neither population nor cropped cells.
# this might lead to unexpected consequences. see what happenes,
# when after adding all cities, only unique ones are kept
killed_cities = 0
# kill cities if they have either no crops or no inhabitants:
dead_city_indices = [
i for i in range(len(self.population))
if self.population[i] <= self.min_city_size
]
if self.kill_cities_without_crops:
dead_city_indices += [
i for i in range(len(self.population))
if (len(self.cropped_cells[i][0]) <= 0)
]
# the following expression only keeps the unique entries.
# might solve the problem.
dead_city_indices = list(set(dead_city_indices))
# remove entries from variables
# simple lists that can be deleted elementwise
for index in sorted(dead_city_indices, reverse=True):
self.number_settlements -= 1
self.failed += 1
del self.age[index]
del self.birth_rate[index]
del self.death_rate[index]
del self.population[index]
del self.mig_rate[index]
del self.out_mig[index]
del self.number_cells_in_influence[index]
del self.area_of_influence[index]
del self.number_cropped_cells[index]
del self.crop_yield[index]
del self.eco_benefit[index]
del self.rank[index]
del self.degree[index]
del self.comp_size[index]
del self.centrality[index]
del self.trade_income[index]
del self.real_income_pc[index]
del self.cells_in_influence[index]
del self.cropped_cells[index]
del self.s_es_ag[index]
del self.s_es_wf[index]
del self.s_es_fs[index]
del self.s_es_sp[index]
del self.s_es_pg[index]
del self.migrants[index]
killed_cities += 1
# special cases:
self.settlement_positions = \
np.delete(self.settlement_positions,
dead_city_indices, axis=1)
self.adjacency = \
np.delete(np.delete(self.adjacency,
dead_city_indices, axis=0),
dead_city_indices, axis=1)
# update list of indices for populated and dead cities
# a) update list of populated cities
self.populated_cities = [
index for index, value in enumerate(self.population) if value > 0
]
# b) update list of dead cities
self.dead_cities = [
index for index, value in enumerate(self.population) if value == 0
]
return killed_cities
def spawn_city(self, x, y, mig_pop):
"""
Spawn a new city at given location with
given population and append it to all necessary lists.
Parameters
----------
x: int
x location of new city on map
y: int
y location of new city on map
mig_pop: int
initial population of new city
"""
# extend all variables to include new city
self.number_settlements += 1
self.settlement_positions = np.append(self.settlement_positions,
[[x], [y]], 1)
self.cells_in_influence.append([[x], [y]])
self.cropped_cells.append([[x], [y]])
n = len(self.adjacency)
self.adjacency = np.append(self.adjacency, [[0] * n], 0)
self.adjacency = np.append(self.adjacency, [[0]] * (n + 1), 1)
self.age.append(0)
self.birth_rate.append(self.birth_rate_parameter)
self.death_rate.append(0.1 + 0.05 * np.random.rand())
self.population.append(mig_pop)
self.mig_rate.append(0)
self.out_mig.append(0)
self.number_cells_in_influence.append(0)
self.area_of_influence.append(0)
self.number_cropped_cells.append(1)
self.crop_yield.append(0)
self.eco_benefit.append(0)
self.rank.append(0)
self.degree.append(0)
self.trade_income.append(0)
self.real_income_pc.append(0)
self.s_es_ag.append(0)
self.s_es_wf.append(0)
self.s_es_fs.append(0)
self.s_es_sp.append(0)
self.s_es_pg.append(0)
self.migrants.append(0)
def run(self, t_max=1):
"""
Run the model for a given number of steps.
If no number of steps is given, the model is integrated for one step
Parameters
----------
t_max: int
number of steps to integrate the model
"""
# initialize time step
t = 0
# print update about output state
if self.debug:
print('output of settlement and geodata is {} and {}'.format(
self.output_settlement_data, self.output_geographic_data))
# initialize variables
# net primary productivity
npp = np.zeros((self.rows, self.columns))
# water flow
if self.debug and t == 0:
wf = np.zeros((self.rows, self.columns))
elif not self.debug:
wf = np.zeros((self.rows, self.columns))
else:
pass
# agricultural productivity
ag = np.zeros((self.rows, self.columns))
# ecosystem services
es = np.zeros((self.rows, self.columns))
# benefit cost map for agriculture
bca = np.zeros((self.rows, self.columns))
self.init_output()
while t <= t_max:
t += 1
if self.debug:
print(f"time = {t}, population = {sum(self.population)}")
# evolve subselfs
# ecosystem
self.update_precipitation(t)
npp = self.net_primary_prod()
self.forest_evolve(npp)
# this is curious: only waterflow is used,
# water level is abandoned.
wf = self.get_waterflow()[1]
ag = self.get_ag(npp, wf)
es = self.get_ecoserv(ag, wf)
bca = self.benefit_cost(ag)
# society
if len(self.population) > 0:
self.get_cells_in_influence()
abandoned, sown = self.get_cropped_cells(bca)
self.get_crop_income(bca)
self.get_eco_income(es)
self.evolve_soil_deg()
self.update_pop_gradient()
self.get_rank()
(built, lost) = self.build_routes
self.get_comps()
self.get_centrality()
self.get_trade_income()
self.get_real_income_pc()
self.get_pop_mig()
new_settlements = self.migration(es)
killed_settlements = self.kill_cities()
else:
abandoned = sown = cl = 0
self.step_output(t, npp, wf, ag, es, bca, abandoned, sown, built,
lost, new_settlements, killed_settlements)
def init_output(self):
"""initializes data output for trajectory, settlements and geography depending on settings"""
if self.output_trajectory:
self.init_trajectory_output()
self.init_traders_trajectory_output()
if self.output_geographic_data or self.output_settlement_data:
# If output data location is needed and does not exist, create it.
if not os.path.exists(self.output_data_location):
os.makedirs(self.output_data_location)
if not self.output_data_location.endswith('/'):
self.output_data_location += '/'
if self.output_settlement_data:
settlement_init_data = {'shape': (self.rows, self.columns)}
with open(self.settlement_output_path(0), 'wb') as f:
pkl.dump(settlement_init_data, f)
if self.output_geographic_data:
pass
def step_output(self, t, npp, wf, ag, es, bca, abandoned, sown, built,
lost, new_settlements, killed_settlements):
"""
call different data saving routines depending on settings.
Parameters
----------
t: int
Timestep number to append to save file path
npp: numpy array
Net Primary Productivity on cell basis
wf: numpy array
Water flow through cell
ag: numpy array
Agricultural productivity of cell
es: numpy array
Ecosystem services of cell (that are summed and weighted to
calculate ecosystems service income)
bca: numpy array
Benefit cost analysis of agriculture on cell.
abandoned: int
Number of cells that was abandoned in the previous time step
sown: int
Number of cells that was newly cropped in the previous time step
built : int
number of trade links built in this timestep
lost : int
number of trade links lost in this timestep
new_settlements : int
number of new settlements that were spawned during the preceeding
timestep
killed_settlements : int
number of settlements that were killed during the preceeding
timestep
"""
# append stuff to trajectory
if self.output_trajectory:
self.update_trajectory_output(t, [npp, wf, ag, es, bca], built,
lost, new_settlements,
killed_settlements)
self.update_traders_trajectory_output(t)
# save maps of spatial data
if self.output_geographic_data:
self.save_geographic_output(t, npp, wf, ag, es, bca, abandoned,
sown)
# save data on settlement basis
if self.output_settlement_data:
self.save_settlement_output(t)
def save_settlement_output(self, t):
"""
Organize settlement based data in Pandas Dataframe
and save to file.
Parameters
----------
t: int
Timestep number to append to save file path
"""
colums = [
'population', 'real income', 'ag income', 'es income',
'trade income', 'x position', 'y position', 'out migration',
'degree'
]
data = [
self.population, self.real_income_pc, self.crop_yield,
self.eco_benefit, self.trade_income,
list(self.settlement_positions[0]),
list(self.settlement_positions[1]), self.migrants,
[self.degree[city] for city in self.populated_cities]
]
data = list(map(list, zip(*data)))
data_frame = pandas.DataFrame(columns=colums, data=data)
with open(self.settlement_output_path(t), 'wb') as f:
pkl.dump(data_frame, f)
def save_geographic_output(self, t, npp, wf, ag, es, bca, abandoned, sown):
"""
Organize Geographic data in dictionary (for separate layers
of data) and save to file.
Parameters
----------
t: int
Timestep number to append to save file path
npp: numpy array
Net Primary Productivity on cell basis
wf: numpy array
Water flow through cell
ag: numpy array
Agricultural productivity of cell
es: numpy array
Ecosystem services of cell (that are summed and weighted to
calculate ecosystems service income)
bca: numpy array
Benefit cost analysis of agriculture on cell.
abandoned: int
Number of cells that was abandoned in the previous time step
sown: int
Number of cells that was newly cropped in the previous time step
"""
tmpforest = self.forest_state.copy()
tmpforest[np.isnan(self.elev)] = 0
data = {
'forest': tmpforest,
'waterflow': wf,
'cells in influence': self.cells_in_influence,
'number of cells in influence': self.number_cells_in_influence,
'cropped cells': self.cropped_cells,
'number of cropped cells': self.number_cropped_cells,
'abandoned sown': np.array([abandoned, sown]),
'soil degradation': self.soil_deg,
'population gradient': self.pop_gradient,
'adjacency': self.adjacency,
'x positions': list(self.settlement_positions[0]),
'y positions': list(self.settlement_positions[1]),
'population': self.population,
'elev': self.elev,
'rank': self.rank
}
with open(self.geographic_output_path(t), 'wb') as f:
pkl.dump(data, f)
def init_trajectory_output(self):
self.trajectory.append([
'time', 'total_population', 'max_settlement_population',
'total_migrants', 'total_settlements', 'total_agriculture_cells',
'total_cells_in_influence', 'total_trade_links',
'mean_cluster_size', 'max_cluster_size', 'new_settlements',
'killed_settlements', 'built_trade_links', 'lost_trade_links',
'total_income_agriculture', 'total_income_ecosystem',
'total_income_trade', 'mean_soil_degradation',
'forest_state_3_cells', 'forest_state_2_cells',
'forest_state_1_cells', 'es_income_forest', 'es_income_waterflow',
'es_income_agricultural_productivity', 'es_income_precipitation',
'es_income_pop_density', 'MAP', 'max_npp', 'mean_waterflow',
'max_AG', 'max_ES', 'max_bca', 'max_soil_deg', 'max_pop_grad'
])
def init_traders_trajectory_output(self):
self.traders_trajectory.append([
'time', 'total_population', 'total_migrants', 'total_traders',
'total_settlements', 'total_agriculture_cells',
'total_cells_in_influence', 'total_trade_links',
'total_income_agriculture', 'total_income_ecosystem',
'total_income_trade', 'es_income_forest', 'es_income_waterflow',
'es_income_agricultural_productivity', 'es_income_precipitation',
'es_income_pop_density'
])
def update_trajectory_output(self, time, args, built, lost,
new_settlements, killed_settlements):
# args = [npp, wf, ag, es, bca]
total_population = sum(self.population)
try:
max_population = np.nanmax(self.population)
except:
max_population = float('nan')
total_migrangs = sum(self.migrants)
total_settlements = len(self.population)
total_trade_links = sum(self.degree) / 2
income_agriculture = sum(self.crop_yield)
income_ecosystem = sum(self.eco_benefit)
income_trade = sum(self.trade_income)
number_of_components = float(
sum([1 if value > 0 else 0 for value in self.comp_size]))
mean_cluster_size = float(sum(self.comp_size)) / number_of_components \
if number_of_components > 0 else 0
try:
max_cluster_size = max(self.comp_size)
except:
max_cluster_size = 0
self.max_cluster_size = max_cluster_size
total_agriculture_cells = sum(self.number_cropped_cells)
total_cells_in_influence = sum(self.number_cells_in_influence)
self.trajectory.append([
time, total_population, max_population, total_migrangs,
total_settlements, total_agriculture_cells,
total_cells_in_influence, total_trade_links, mean_cluster_size,
max_cluster_size, new_settlements, killed_settlements, built, lost,
income_agriculture, income_ecosystem, income_trade,
np.nanmean(self.soil_deg),
np.sum(self.forest_state == 3),
np.sum(self.forest_state == 2),
np.sum(self.forest_state == 1),
np.sum(self.s_es_fs),
np.sum(self.s_es_wf),
np.sum(self.s_es_ag),
np.sum(self.s_es_sp),
np.sum(self.s_es_pg),
np.nanmean(self.spaciotemporal_precipitation),
np.nanmax(args[0]),
np.nanmean(args[1]),
np.nanmax(args[2]),
np.nanmax(args[3]),
np.nanmax(args[4]),
np.nanmax(self.soil_deg),
np.nanmax(self.pop_gradient)
])
def update_traders_trajectory_output(self, time):
traders = np.where(np.array(self.degree) > 0)[0]
total_population = sum([self.population[c] for c in traders])
total_migrants = sum([self.migrants[c] for c in traders])
total_settlements = len(self.population)
total_traders = len(traders)
total_trade_links = sum(self.degree) / 2
income_agriculture = sum([self.crop_yield[c] for c in traders])
income_ecosystem = sum([self.eco_benefit[c] for c in traders])
income_trade = sum([self.trade_income[c] for c in traders])
income_es_fs = sum([self.s_es_fs[c] for c in traders])
income_es_wf = sum([self.s_es_wf[c] for c in traders])
income_es_ag = sum([self.s_es_ag[c] for c in traders])
income_es_sp = sum([self.s_es_sp[c] for c in traders])
income_es_pg = sum([self.s_es_pg[c] for c in traders])
number_of_components = float(
sum([1 if value > 0 else 0 for value in self.comp_size]))
mean_cluster_size = (float(sum(self.comp_size)) / number_of_components
if number_of_components > 0 else 0)
try:
max_cluster_size = max(self.comp_size)
except:
max_cluster_size = 0
total_agriculture_cells = \
sum([self.number_cropped_cells[c] for c in traders])
total_cells_in_influence = \
sum([self.number_cells_in_influence[c] for c in traders])
self.traders_trajectory.append([
time, total_population, total_migrants, total_traders,
total_settlements, total_agriculture_cells,
total_cells_in_influence, total_trade_links, income_agriculture,
income_ecosystem, income_trade, income_es_fs, income_es_wf,
income_es_ag, income_es_sp, income_es_pg
])
def get_trajectory(self):
try:
trj = np.array(self.trajectory)
columns = trj[0, :]
df = pandas.DataFrame(trj[1:, :], columns=columns)
except IOError:
print('trajectory mode must be turned on')
return df
def get_traders_trajectory(self):
try:
trj = self.traders_trajectory
columns = trj.pop(0)
df = pandas.DataFrame(trj, columns=columns)
except IOError:
print('trajectory mode must be turned on')
return df
def run_test(self, timesteps=5):
import shutil
N = 50
# define saving location
comment = "testing_version"
now = datetime.datetime.now()
location = "output_data/" \
+ "Output_" + comment + '/'
if os.path.exists(location):
shutil.rmtree(location)
os.makedirs(location)
# initialize Model
model = ModelCore(n=N,
debug=True,
output_trajectory=True,
output_settlement_data=True,
output_geographic_data=True,
output_data_location=location)
# run Model
model.crop_income_mode = 'sum'
model.r_es_sum = 0.0001
model.r_bca_sum = 0.1
model.population_control = 'False'
model.run(timesteps)
trj = model.get_trajectory()
plot = trj.plot()
return 1
def print_variable_lengths(self):
for var in dir(self):
if not var.startswith('__') and not callable(getattr(self, var)):
try:
if len(getattr(self, var)) != 432:
print(var, len(getattr(self, var)))
except:
pass
if __name__ == "__main__":
import matplotlib.pyplot as plt
import shutil
N = 10
# define saving location
comment = "testing_version"
now = datetime.datetime.now()
location = "output_data/" \
+ "Output_" + comment + '/'
if os.path.exists(location):
shutil.rmtree(location)
# os.makedirs(location)
# initialize Model
model = ModelCore(n=N,
debug=True,
output_trajectory=True,
output_settlement_data=True,
output_geographic_data=True,
output_data_location=location)
# run Model
timesteps = 300
model.crop_income_mode = 'sum'
model.r_es_sum = 0.0001
model.r_bca_sum = 0.25
model.population_control = 'False'
model.run(timesteps)
trj = model.get_trajectory()
plot = trj[[
'total_population', 'total_settlements', 'total_migrangs'
]].plot()
plt.show()
plt.savefig(plot, location + 'plot')
|
gpl-3.0
| 4,912,365,931,477,879,000 | 38.209344 | 101 | 0.523777 | false |
tapomayukh/projects_in_python
|
rapid_categorization/haptic_map/outlier/hmm_crossvalidation_force.py
|
1
|
19066
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import unittest
import ghmm
import ghmmwrapper
import random
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_length')
from data_variable_length_force import Fmat_original
if __name__ == '__main__' or __name__ != '__main__':
print "Inside outlier HMM model training file"
Fmat = Fmat_original
# Getting mean / covariance
i = 0
number_states = 10
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < 35):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 0:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
#print np.shape(state_1)
#print np.shape(feature_1_final_data[j])
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_rf_force = np.zeros((number_states,1))
sigma_rf = np.zeros((number_states,1))
while (j < number_states):
mu_rf_force[j] = np.mean(feature_1_final_data[j])
sigma_rf[j] = scp.std(feature_1_final_data[j])
j = j+1
i = 35
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < 70):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 35:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_rm_force = np.zeros((number_states,1))
sigma_rm = np.zeros((number_states,1))
while (j < number_states):
mu_rm_force[j] = np.mean(feature_1_final_data[j])
sigma_rm[j] = scp.std(feature_1_final_data[j])
j = j+1
i = 70
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < 105):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 70:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_sf_force = np.zeros((number_states,1))
sigma_sf = np.zeros((number_states,1))
while (j < number_states):
mu_sf_force[j] = np.mean(feature_1_final_data[j])
sigma_sf[j] = scp.std(feature_1_final_data[j])
j = j+1
i = 105
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < 140):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 105:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_sm_force = np.zeros((number_states,1))
sigma_sm = np.zeros((number_states,1))
while (j < number_states):
mu_sm_force[j] = np.mean(feature_1_final_data[j])
sigma_sm[j] = scp.std(feature_1_final_data[j])
j = j+1
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
if number_states == 3:
A = [[0.2, 0.5, 0.3],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0]]
elif number_states == 5:
A = [[0.2, 0.35, 0.2, 0.15, 0.1],
[0.0, 0.2, 0.45, 0.25, 0.1],
[0.0, 0.0, 0.2, 0.55, 0.25],
[0.0, 0.0, 0.0, 0.2, 0.8],
[0.0, 0.0, 0.0, 0.0, 1.0]]
elif number_states == 10:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
elif number_states == 15:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.15, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.10, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.15, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.20, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 1.00]]
elif number_states == 20:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf = [0.0]*number_states
B_rm = [0.0]*number_states
B_sf = [0.0]*number_states
B_sm = [0.0]*number_states
for num_states in range(number_states):
B_rf[num_states] = [mu_rf_force[num_states][0],sigma_rf[num_states][0]]
B_rm[num_states] = [mu_rm_force[num_states][0],sigma_rm[num_states][0]]
B_sf[num_states] = [mu_sf_force[num_states][0],sigma_sf[num_states][0]]
B_sm[num_states] = [mu_sm_force[num_states][0],sigma_sm[num_states][0]]
#print B_sm
#print mu_sm_motion
# pi - initial probabilities per state
if number_states == 3:
pi = [1./3.] * 3
elif number_states == 5:
pi = [0.2] * 5
elif number_states == 10:
pi = [0.1] * 10
elif number_states == 15:
pi = [1./15.] * 15
elif number_states == 20:
pi = [0.05] * 20
# generate RF, RM, SF, SM models from parameters
model_rf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf, pi) # Will be Trained
model_rm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm, pi) # Will be Trained
model_sf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf, pi) # Will be Trained
model_sm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm, pi) # Will be Trained
trial_number = 1
rf_final = np.matrix(np.zeros((28,1)))
rm_final = np.matrix(np.zeros((28,1)))
sf_final = np.matrix(np.zeros((28,1)))
sm_final = np.matrix(np.zeros((28,1)))
total_seq = Fmat
for i in range(140):
total_seq[i][:] = sum(total_seq[i][:],[])
while (trial_number < 6):
# For Training
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[1:5]
total_seq_rm = total_seq[36:40]
total_seq_sf = total_seq[71:75]
total_seq_sm = total_seq[106:110]
#print total_seq_rf
while (j < 35):
total_seq_rf = total_seq_rf+total_seq[j+1:j+5]
total_seq_rm = total_seq_rm+total_seq[j+36:j+40]
total_seq_sf = total_seq_sf+total_seq[j+71:j+75]
total_seq_sm = total_seq_sm+total_seq[j+106:j+110]
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = [total_seq[0]]+total_seq[2:5]
total_seq_rm = [total_seq[35]]+total_seq[37:40]
total_seq_sf = [total_seq[70]]+total_seq[72:75]
total_seq_sm = [total_seq[105]]+total_seq[107:110]
#print total_seq_rf
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j+0]]+total_seq[j+2:j+5]
total_seq_rm = total_seq_rm+[total_seq[j+35]]+total_seq[j+37:j+40]
total_seq_sf = total_seq_sf+[total_seq[j+70]]+total_seq[j+72:j+75]
total_seq_sm = total_seq_sm+[total_seq[j+105]]+total_seq[j+107:j+110]
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = total_seq[0:2]+total_seq[3:5]
total_seq_rm = total_seq[35:37]+total_seq[38:40]
total_seq_sf = total_seq[70:72]+total_seq[73:75]
total_seq_sm = total_seq[105:107]+total_seq[108:110]
while (j < 35):
total_seq_rf = total_seq_rf+total_seq[j+0:j+2]+total_seq[j+3:j+5]
total_seq_rm = total_seq_rm+total_seq[j+35:j+37]+total_seq[j+38:j+40]
total_seq_sf = total_seq_sf+total_seq[j+70:j+72]+total_seq[j+73:j+75]
total_seq_sm = total_seq_sm+total_seq[j+105:j+107]+total_seq[j+108:j+110]
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = total_seq[0:3]+total_seq[4:5]
total_seq_rm = total_seq[35:38]+total_seq[39:40]
total_seq_sf = total_seq[70:73]+total_seq[74:75]
total_seq_sm = total_seq[105:108]+total_seq[109:110]
while (j < 35):
total_seq_rf = total_seq_rf+total_seq[j+0:j+3]+total_seq[j+4:j+5]
total_seq_rm = total_seq_rm+total_seq[j+35:j+38]+total_seq[j+39:j+40]
total_seq_sf = total_seq_sf+total_seq[j+70:j+73]+total_seq[j+74:j+75]
total_seq_sm = total_seq_sm+total_seq[j+105:j+108]+total_seq[j+109:j+110]
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[0:4]
total_seq_rm = total_seq[35:39]
total_seq_sf = total_seq[70:74]
total_seq_sm = total_seq[105:109]
while (j < 35):
total_seq_rf = total_seq_rf+total_seq[j+0:j+4]
total_seq_rm = total_seq_rm+total_seq[j+35:j+39]
total_seq_sf = total_seq_sf+total_seq[j+70:j+74]
total_seq_sm = total_seq_sm+total_seq[j+105:j+109]
j = j+5
train_seq_rf = total_seq_rf
train_seq_rm = total_seq_rm
train_seq_sf = total_seq_sf
train_seq_sm = total_seq_sm
#print train_seq_rf[27]
final_ts_rf = ghmm.SequenceSet(F,train_seq_rf)
final_ts_rm = ghmm.SequenceSet(F,train_seq_rm)
final_ts_sf = ghmm.SequenceSet(F,train_seq_sf)
final_ts_sm = ghmm.SequenceSet(F,train_seq_sm)
model_rf.baumWelch(final_ts_rf)
model_rm.baumWelch(final_ts_rm)
model_sf.baumWelch(final_ts_sf)
model_sm.baumWelch(final_ts_sm)
# For Testing
if (trial_number == 1):
j = 5
total_seq_rf = [total_seq[0]]
total_seq_rm = [total_seq[35]]
total_seq_sf = [total_seq[70]]
total_seq_sm = [total_seq[105]]
#print np.shape(total_seq_rf)
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j]]
total_seq_rm = total_seq_rm+[total_seq[j+35]]
total_seq_sf = total_seq_sf+[total_seq[j+70]]
total_seq_sm = total_seq_sm+[total_seq[j+105]]
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = [total_seq[1]]
total_seq_rm = [total_seq[36]]
total_seq_sf = [total_seq[71]]
total_seq_sm = [total_seq[106]]
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j+1]]
total_seq_rm = total_seq_rm+[total_seq[j+36]]
total_seq_sf = total_seq_sf+[total_seq[j+71]]
total_seq_sm = total_seq_sm+[total_seq[j+106]]
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = [total_seq[2]]
total_seq_rm = [total_seq[37]]
total_seq_sf = [total_seq[72]]
total_seq_sm = [total_seq[107]]
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j+2]]
total_seq_rm = total_seq_rm+[total_seq[j+37]]
total_seq_sf = total_seq_sf+[total_seq[j+72]]
total_seq_sm = total_seq_sm+[total_seq[j+107]]
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = [total_seq[3]]
total_seq_rm = [total_seq[38]]
total_seq_sf = [total_seq[73]]
total_seq_sm = [total_seq[108]]
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j+3]]
total_seq_rm = total_seq_rm+[total_seq[j+38]]
total_seq_sf = total_seq_sf+[total_seq[j+73]]
total_seq_sm = total_seq_sm+[total_seq[j+108]]
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = [total_seq[4]]
total_seq_rm = [total_seq[39]]
total_seq_sf = [total_seq[74]]
total_seq_sm = [total_seq[109]]
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j+4]]
total_seq_rm = total_seq_rm+[total_seq[j+39]]
total_seq_sf = total_seq_sf+[total_seq[j+74]]
total_seq_sm = total_seq_sm+[total_seq[j+109]]
j = j+5
trial_number = trial_number + 1
print "Outlier HMM model trained"
|
mit
| 6,581,055,320,970,993,000 | 41.844944 | 133 | 0.48993 | false |
rwl/muntjac
|
muntjac/demo/sampler/features/embedded/ImageEmbed.py
|
1
|
1210
|
from muntjac.demo.sampler.features.embedded.FlashEmbed import FlashEmbed
from muntjac.demo.sampler.APIResource import APIResource
from muntjac.demo.sampler.features.embedded.WebEmbed import WebEmbed
from muntjac.demo.sampler.Feature import Feature, Version
from muntjac.ui.embedded import Embedded
from muntjac.terminal.theme_resource import ThemeResource
from muntjac.terminal.class_resource import ClassResource
from muntjac.terminal.external_resource import ExternalResource
class ImageEmbed(Feature):
def getSinceVersion(self):
return Version.V62
def getName(self):
return 'Image'
def getDescription(self):
return ('Add images to your applications using the Embedded '
'component. You can use all the different Resource types '
'Muntjac offers. ThemeResource is usually the easiest choice.')
def getRelatedAPI(self):
return [
APIResource(Embedded),
APIResource(ThemeResource),
APIResource(ClassResource),
APIResource(ExternalResource)
]
def getRelatedFeatures(self):
return [FlashEmbed, WebEmbed]
def getRelatedResources(self):
return None
|
apache-2.0
| 19,035,337,279,808,780 | 27.809524 | 75 | 0.722314 | false |
rksaxena/hacker_ramp
|
scoring.py
|
1
|
2740
|
import find_overlap
scores = {
'zara': 0.5,
'vogue': 0.5,
'google': 0.5
}
def find_keyword_occurences_in_source(map, source):
freq = 0
for key, value in map.iteritems():
for k, v in value.iteritems():
if v['source'] == source:
freq += 1
# print freq
return freq
# return update_internal_score(map, freq, source)
def count_matching_score_items(map, source):
score_map = dict()
for key, values in map.iteritems():
for k, v in values.iteritems():
if v['source'] == source:
if v['count'] in score_map:
score_map[v['count']] += 1
else:
score_map[v['count']] = 1
return score_map
def calculate_rank(map, freq):
keylist = map.values()
new_list = list()
for i in range(len(keylist)):
new_list.append(sum(keylist[:i]))
for k, v in map.iteritems():
map[k] = float((new_list[k-1] + (0.5 * map[k]))/freq)
# print float(new_list[k - 1] + (0.5 * map[k]) / freq)
return map
def calculate_percentiles(map):
vogue_freq = find_keyword_occurences_in_source(map, 'vogue')
zara_freq = find_keyword_occurences_in_source(map, 'zara')
zara_score_map = count_matching_score_items(map, 'zara')
vogue_score_map = count_matching_score_items(map, 'vogue')
zara_new_score_map = calculate_rank(zara_score_map, zara_freq)
vogue_new_score_map = calculate_rank(vogue_score_map, vogue_freq)
total_freq = zara_freq + vogue_freq
print total_freq
itf_vogue = float(zara_freq)/float(total_freq)
itf_zara = float(vogue_freq)/float(total_freq)
print itf_vogue, itf_zara
# print zara_new_score_map, vogue_new_score_map
for key, values in map.iteritems():
for k, v in values.iteritems():
if v['source'] == 'zara':
v['percentile'] = zara_new_score_map[v['count']]
v['itf_score'] = float(itf_zara * v['count'])
if v['source'] == 'vogue':
v['percentile'] = vogue_new_score_map[v['count']]
v['itf_score'] = float(itf_vogue * v['count'])
# print map
return map
def update_internal_score(map, freq, source):
for key, value in map.iteritems():
for k, v in value.iteritems():
if v['source'] == source:
v['internal_ratio'] = float(v['count']/freq)
return map
# data1 = find_overlap.vogue_data
# data2 = find_overlap.zara_data
# print
# print find_keyword_occurences_in_source(find_overlap.merge_dicts(data1, data2), 'vogue')
# merged_map = find_overlap.merge_dicts(data1, data2)
# calculate_percentiles(merged_map)
# count_matching_score_items(merged_map, 'zara')
|
apache-2.0
| 3,741,584,056,191,492,600 | 32.024096 | 90 | 0.587956 | false |
ryfeus/lambda-packs
|
pytorch/source/caffe2/python/transformations.py
|
1
|
2103
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import caffe2.python._import_c_extension as C
class Transformer(object):
def __init__(self):
pass
@classmethod
def runTransform(cls, transform_name, net):
pb = net.Proto().SerializeToString()
if C.transform_exists(transform_name):
output = C.run_transform(transform_name, pb)
elif C.workspace_transform_exists(transform_name):
output = C.run_workspace_transform(transform_name, pb)
else:
raise AttributeError('Transformation {} not found.'.format(transform_name))
net.Proto().ParseFromString(output)
def __getattr__(self, transform_name):
return lambda net : self.runTransform(transform_name, net)
def fuseNNPACKConvRelu(net):
net.Proto().ParseFromString(
C.transform_fuseNNPACKConvRelu(net.Proto().SerializeToString())
)
def sinkMaxPool(net):
net.Proto().ParseFromString(
C.transform_sinkMaxPool(net.Proto().SerializeToString())
)
def optimizeForIDEEP(net, training_mode = False):
net.Proto().ParseFromString(
C.transform_optimizeForIDEEP(net.Proto().SerializeToString(), training_mode)
)
def fuseConvBN(net):
net.Proto().ParseFromString(
C.transform_fuseConvBN(net.Proto().SerializeToString())
)
|
mit
| 2,730,161,065,556,882,400 | 31.859375 | 87 | 0.675226 | false |
Wengex/PyDirectory
|
pydirectory/activedirectory/objects/types.py
|
1
|
3640
|
from pydirectory.ldap.objects import types
class object(types.object):
_type = {}
class user(object):
_type = {
'objectClass' : [b'top', b'person', b'organizationalPerson', b'user']
}
@property
def is_enable(self):
mod = int(self.useraccountcontrol.value) % 8
if mod == 0:
return True
else:
return False
@property
def is_disable(self):
return not self.is_enable
def enable(self):
self.useraccountcontrol = ["NORMAL_ACCOUNT"]
self.save()
def disable(self):
'''Method to disable User in Active Directory'''
self.useraccountcontrol = ["NORMAL_ACCOUNT","ACCOUNTDISABLE"]
self.save()
def setPassword(self,value):
self.unicodePwd = value
self.save()
@property
def whenPasswordExpires(self):
import decimal, datetime
pwdLastSet = int(self.pwdlastset.raw[0])
if self.useraccountcontrol.value == '66048':
return None #Object password not expire
if (pwdLastSet == 0):
return 0
try:
maxPwdAge = int(self._objects.base.maxpwdage.raw[0])
mod = int(maxPwdAge) % 4294967296
except:
mod = 0
if mod == 0:
return None #Domain not expire object passwords
pwdExpire = decimal.Decimal(pwdLastSet) - decimal.Decimal(maxPwdAge)
expiryts = int((pwdExpire / 10000000) - 11644473600)
return datetime.datetime.fromtimestamp(expiryts)
class group(object):
_type = {
'objectClass' : [b'top', b'group']
}
def addMember(self,object):
if object.dn == None:
return None
try:
return self._objects._engine._worker.modify(self.dn.value,{'member':[('MODIFY_ADD',[object.dn.value])]})
except self._exceptions.LDAPEntryAlreadyExistsResult:
return False
def delMember(self,object):
if object.dn == None:
return None
try:
return self._objects._engine._worker.modify(self.dn.value,{'member':[('MODIFY_DELETE',[object.dn.value])]})
except self._exceptions.LDAPUnwillingToPerformResult:
return False
def isMember(self,object):
if (object.dn != None) and (self.dn != None):
ingroup = [object.dn.value]
if object.primarygroupid != None:
primarygroup = object.primarygroupid.value
if self.objectsid.value == primarygroup.objectsid.value:
return True
ingroup.append(primarygroup.dn.value)
#Check if object is member or object primarygroup is member
members = self._objects.search(self._objects.setQuery('or',in_group=ingroup))
for member in members:
if (member.dn.value.strip().lower() == self.dn.value.strip().lower()):
return True
return False
@property
def allMembers(self):
if self._allmembers != None:
return self._allmembers
self._allmembers = []
primarygroupid=self.objectsid.value.replace(self._objects.base.objectsid.value+'-','')
members = []
if self.member != None:
members.extend(self.member.raw)
memberlst = self._objects.search(self._objects.setQuery(primarygroupid=primarygroupid))
if len(memberlst) > 0:
members.extend(memberlst)
self._allmembers = members
return members
class computer(object):
_type = {
'objectClass' : [b'top',b'person', b'organizationalPerson', b'user', b'computer']
}
@property
def is_enable(self):
mod = int(self.useraccountcontrol.value) % 8
if mod == 0:
return True
else:
return False
@property
def is_disable(self):
return not self.is_enable
def enable(self):
self.useraccountcontrol = ["PASSWD_NOTREQD","WORKSTATION_TRUST_ACCOUNT"]
self.save()
def disable(self):
'''Method to disable User in Active Directory'''
self.useraccountcontrol = ["ACCOUNTDISABLE","PASSWD_NOTREQD","WORKSTATION_TRUST_ACCOUNT"]
self.save()
class ou(object):
_type = {
'objectClass' : [b'top', b'organizationalUnit']
}
|
gpl-2.0
| 3,301,856,316,181,458,400 | 25 | 110 | 0.698901 | false |
wsyzxcn/tornado
|
tornado/ioloop.py
|
1
|
42597
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function
import collections
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
import math
from tornado.concurrent import TracebackFuture, is_future
from tornado.log import app_log, gen_log
from tornado.platform.auto import set_close_exec, Waker
from tornado import stack_context
from tornado.util import PY3, Configurable, errno_from_exception, timedelta_to_seconds
try:
import signal
except ImportError:
signal = None
if PY3:
import _thread as thread
else:
import thread
try:
import asyncio
except ImportError:
asyncio = None
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server:
.. testcode::
import errno
import functools
import tornado.ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
.. testoutput::
:hide:
By default, a newly-constructed `IOLoop` becomes the thread's current
`IOLoop`, unless there already is a current `IOLoop`. This behavior
can be controlled with the ``make_current`` argument to the `IOLoop`
constructor: if ``make_current=True``, the new `IOLoop` will always
try to become current and it raises an error if there is already a
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
In general, an `IOLoop` cannot survive a fork or be shared across
processes in any way. When multiple processes are being used, each
process should create its own `IOLoop`, which also implies that
any objects which depend on the `IOLoop` (such as
`.AsyncHTTPClient`) must also be created in the child processes.
As a guideline, anything that starts processes (including the
`tornado.process` and `multiprocessing` modules) should do so as
early as possible, ideally the first thing the application does
after loading its configuration in ``main()``.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Deprecated alias for `IOLoop.current()`.
.. versionchanged:: 5.0
Previously, this method returned a global singleton
`IOLoop`, in contrast with the per-thread `IOLoop` returned
by `current()`. In nearly all cases the two were the same
(when they differed, it was generally used from non-Tornado
threads to communicate back to the main thread's `IOLoop`).
This distinction is not present in `asyncio`, so in order
to facilitate integration with that package `instance()`
was changed to be an alias to `current()`. Applications
using the cross-thread communications aspect of
`instance()` should instead set their own global variable
to point to the `IOLoop` they want to use.
.. deprecated:: 5.0
"""
return IOLoop.current()
@staticmethod
def initialized():
"""Returns true if there is a current IOLoop.
.. versionchanged:: 5.0
Redefined in terms of `current()` instead of `instance()`.
.. deprecated:: 5.0
This method only knows about `IOLoop` objects (and not, for
example, `asyncio` event loops), so it is of limited use.
"""
return IOLoop.current(instance=False) is not None
def install(self):
"""Deprecated alias for `make_current()`.
.. versionchanged:: 5.0
Previously, this method would set this `IOLoop` as the
global singleton used by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`, `install()`
is an alias for `make_current()`.
.. deprecated:: 5.0
"""
self.make_current()
@staticmethod
def clear_instance():
"""Deprecated alias for `clear_current()`.
.. versionchanged:: 5.0
Previously, this method would clear the `IOLoop` used as
the global singleton by `IOLoop.instance()`. Now that
`instance()` is an alias for `current()`,
`clear_instance()` is an alias for `clear_instance()`.
.. deprecated:: 5.0
"""
IOLoop.clear_current()
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop` and ``instance`` is true, creates one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
.. versionchanged:: 5.0
The ``instance`` argument now controls whether an `IOLoop`
is created automatically when there is none, instead of
whether we fall back to `IOLoop.instance()` (which is now
an alias for this method)
"""
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
current = None
if asyncio is not None:
from tornado.platform.asyncio import AsyncIOLoop, AsyncIOMainLoop
if IOLoop.configured_class() is AsyncIOLoop:
current = AsyncIOMainLoop()
if current is None:
current = IOLoop()
if IOLoop._current.instance is not current:
raise RuntimeError("new IOLoop did not become current")
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
"""Clears the `IOLoop` for the current thread.
Intended primarily for use by test frameworks in between tests.
"""
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if asyncio is not None:
from tornado.platform.asyncio import AsyncIOLoop
return AsyncIOLoop
return PollIOLoop
def initialize(self, make_current=None):
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
if IOLoop.current(instance=False) is not None:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either a yieldable object or
``None``. If the function returns a yieldable object, the
`IOLoop` will run until the yieldable is resolved (and
`run_sync()` will return the yieldable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-yieldable value is now an error.
"""
future_cell = [None]
def run():
try:
result = func()
if result is not None:
from tornado.gen import convert_yielded
result = convert_yielded(result)
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None:
from tornado import gen
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
try:
ret = gen.convert_yielded(ret)
except gen.BadYieldError:
# It's not unusual for add_callback to be used with
# methods returning a non-None and non-yieldable
# result, which should just be ignored.
pass
else:
self.add_future(ret, self._discard_future_result)
except Exception:
self.handle_callback_exception(callback)
def _discard_future_result(self, future):
"""Avoid unhandled-exception warnings from spawned coroutines."""
future.result()
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None, **kwargs):
super(PollIOLoop, self).initialize(**kwargs)
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = collections.deque()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._pid = os.getpid()
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
@classmethod
def configurable_base(cls):
return PollIOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def close(self, all_fds=False):
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in list(self._handlers.values()):
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
if os.getpid() != self._pid:
raise RuntimeError("Cannot share PollIOLoops across processes")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If.0.00to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError:
# Non-main thread, or the previous value of wakeup_fd
# is no longer valid.
old_wakeup_fd = None
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
ncallbacks = len(self._callbacks)
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512 and
self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for i in range(ncallbacks):
self._run_callback(self._callbacks.popleft())
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that modify self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
if self._closing:
return
# Blindly insert into self._callbacks. This is safe even
# from signal handlers because deque.append is atomic.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if thread.get_ident() != self._thread_ident:
# This will write one byte but Waker.consume() reads many
# at once, so it's ok to write even when not strictly
# necessary.
self._waker.wake()
else:
# If we're on the IOLoop's thread, we don't need to wake anyone.
pass
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tdeadline']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tdeadline = (deadline, next(io_loop._timeout_counter))
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return self.tdeadline < other.tdeadline
def __le__(self, other):
return self.tdeadline <= other.tdeadline
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
Note that the timeout is given in milliseconds, while most other
time-related functions in Tornado use seconds.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
def __init__(self, callback, callback_time):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
if self._next_timeout <= current_time:
callback_time_sec = self.callback_time / 1000.0
self._next_timeout += (math.floor((current_time - self._next_timeout) /
callback_time_sec) + 1) * callback_time_sec
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
|
apache-2.0
| 2,620,420,939,369,841,000 | 37.61922 | 93 | 0.597272 | false |
shearichard/spellsplash
|
splsplsh_project/splsplsh_project/settings/HIDE_s_e_t_t_i_n_g_s.py
|
1
|
2002
|
"""
Django settings for splsplsh_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9x+a74cd9=c1i%+j)mn3+%om@^&_9j(grm^4)(aj-*s962rof_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'splsplsh_project.urls'
WSGI_APPLICATION = 'splsplsh_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
gpl-3.0
| 7,098,006,736,481,004,000 | 23.414634 | 71 | 0.722278 | false |
cristian99garcia/agubrowser-activity
|
Modulos/Pantalla_Completa.py
|
1
|
3372
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Arma una pantalla completa con la ventana de AguBrowse
# Pantalla_Completa.py por:
# Agustin Zuiaga <aguszs97@gmail.com>
# Python Joven - Utu Rafael Peraza
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = "Agustin Zubiaga"
__date__ = "8 de marzo del 2011, 16:48"
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
class Armar_Pantalla_Completa():
def conteo_cb(self):
self.window.hide()
return self.continuar
def mostrar_boton1(self, event, other): # Cuando se mueve el mouse
if self.show_window:
self.continuar = False # Desactivar conteo
self.window.show_all() # Mostrar ventana emergente
self.continuar = True # Volver a activar conteo
def salir(self, widget):
self.bar.show_all() # Mostrar barras
self.show_window = False # Nos aseguramos de que la ventana no se mostrara mas
self.window.hide() # Ocultar la Ventana
self.abrowse.unfullscreen() # Salir de pantalla completa
# Nota: Cuando se destruye una ventana emergente esta luego aparece como un cuadrado gris
# y por eso nunca la destruyo si no que no la muestro
def show_bars(self, widget):
if not self.st:
self.bar.show_all()
widget.set_label("No mostrar barra de herramientas")
self.st = True
else:
self.bar.hide()
widget.set_label("Mostrar barra de herramientas")
self.st = False
def __init__(self, abrowse):
self.show_window = True
abrowse.fullscreen()
abrowse.add_events(Gdk.EventMask.POINTER_MOTION_MASK) # Agregamos eventos
abrowse.connect("motion_notify_event", self.mostrar_boton1) # Cuando se mueve el puntero del mouse
self.abrowse = abrowse
self.st = False
bar = abrowse.main.get_child1()
bar.hide()
self.bar = bar
boton_mostrar = Gtk.Button("Mostrar barra de herramientas")
boton_mostrar.connect("clicked", self.show_bars)
boton_salir = Gtk.Button(None, stock=Gtk.STOCK_LEAVE_FULLSCREEN)
boton_salir.connect("clicked", self.salir)
hbox = Gtk.HBox(False, 10)
hbox.add(boton_mostrar)
hbox.add(boton_salir)
##hbox.modify_bg(Gtk.StateType.NORMAL, Gdk.Color(0,0,0,1))
self.window = Gtk.Window(Gtk.WindowType.POPUP)
self.window.add(hbox)
##self.window.modify_bg(Gtk.StateType.NORMAL, Gdk.Color(0, 0, 0, 1))
hbox.show_all()
self.continuar = False
self.conteo = GObject.timeout_add_seconds(6, self.conteo_cb)
|
gpl-2.0
| -8,409,454,308,710,824,000 | 33.060606 | 106 | 0.658956 | false |
dav-stott/phd-thesis
|
spectra_thesis_ais.py
|
1
|
70177
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 25 08:48:28 2014
@author: david
"""
#*************** IMPORT DEPENDANCIES*******************************************
import numpy as np
#import spec_gdal4 as spg
from osgeo import gdal
import os
import csv
#import h5py
import datetime
import numpy.ma as ma
#from StringIO import StringIO
#import shapely
#import r2py
from osgeo import gdal_array
from osgeo import gdalconst
from osgeo.gdalconst import *
from osgeo import ogr
from osgeo import osr
from scipy.spatial import ConvexHull
from scipy.signal import find_peaks_cwt
from scipy.signal import savgol_filter
from scipy import interpolate
import matplotlib.pyplot as plt
#from shapely.geometry import LineString
################# Functions ###################################################
'''These here are functions that are not part of any specific class- these
are used by the data import classes for functions such as smoothing'''
def smoothing(perc_out, block_start, block_end, kparam, weight, sparam):
#D
sm_spline_block = perc_out[block_start:block_end,:]
sm_x = sm_spline_block[:,0]
sm_y = sm_spline_block[:,1]
sm_len = sm_x.shape
sm_weights = np.zeros(sm_len)+weight
sm_spline = interpolate.UnivariateSpline(sm_x,
sm_y,
k=kparam,
w=sm_weights,
s=sparam)
spline = sm_spline(sm_x)
spline = np.column_stack((sm_x,spline))
return spline
def interpolate_gaps(array1, array2):
array_end = array1.shape[0]-1
array1_endx = array1[array_end, 0]
#get the start point of the second array
array2_start = array2[0,0]
#get the length of the area to be interpolated
x_len = array2_start-array1_endx+1
#generate x values to use for the array
xvals = np.linspace(array1_endx, array2_start, num=x_len)
#y val for the start of the interpolated area
yval_array1 = array1[array_end,1]
# y val for the end of interpolated area
yval_array2 = array2[0,1]
#stack the values into a new array
xin = np.append(array1_endx, array2_start)
yin = np.append(yval_array1, yval_array2)
#numpy.interp(x, xp, fp)
gap_filling = np.interp(xvals, xin, yin)
filled_x = np.column_stack((xvals, gap_filling))
print (filled_x.shape)
return filled_x
class absorption_feature():
'''this class is used for the characterisation of spectral absortion features,
and their investigation using continuum removal'''
def __init__(self, spectra, feat_start, feat_end, feat_centre):
self.wl = spectra[:,0]
self.values = spectra[:,1]
print ('CALL TO ABSORPTION FEATURE')
# start of absorption feature
self.feat_start = feat_start
# end of absorption feature
self.feat_end = feat_end
# approximate 'centre' of feature
self.feat_centre = feat_centre
#get the range of the data
self.min_wl = self.wl[0]
self.max_wl = self.wl[-1]
print ('Absorption feature',self.feat_start,self.feat_end)
#define feature name
self.feat_name = str(self.feat_start)+'_'+str(self.feat_end)
'''# if the feature is within the range of the sensor, do stuff
if self.feat_start > self.min_wl and self.feat_end < self.max_wl:
print 'can do stuff with this data'
try:
self.abs_feature()
print ('Absorption feature analysis sussceful')
except:
print ('ERROR analysing absorption feature', self.feat_name)
pass
else:
print ('Cannot define feature: Out of range')'''
########## Methods ##################################################
def abs_feature(self):
print ('Call to abs_feature made')
# Meffod to calculate the end points of the absorption feature
# Does this using the Qhull algorithim form scipy spatial
#use the initial defintnion of the absorption feature as a staring point
# get the indices for these
cont_rem_stacked = None
ft_def_stacked = None
start_point = np.argmin(np.abs(self.wl-self.feat_start))
end_point = np.argmin(np.abs(self.wl-self.feat_end))
centre = np.argmin(np.abs(self.wl-self.feat_centre))
#find the index minima of reflectance
minima = np.argmin(self.values[start_point:end_point])+start_point
# if the minima = the start point then the start point is the minima
if minima == start_point:
left = minima
#if not then the left side of the feature is the maixima on the left of the minima
elif minima <= centre:
left = start_point+np.argmax(self.values[start_point:centre])
else:
left = start_point+np.argmax(self.values[start_point:minima])
#right is the maxima on the right of the absorption feature
if minima == end_point:
right = minima
else:
right = minima+np.argmax(self.values[minima:end_point])
# use left and right to create a 2D array of points
hull_in = np.column_stack((self.wl[left:right],self.values[left:right]))
#determine the minima of the points
hull_min = minima-left
if hull_min <= 0:
hull_min=0
#find the wavelength at minima
hull_min_wl = hull_in[hull_min,0]
# define the wavelength ranges we'll use to select simplices
ft_left_wl = hull_min_wl-((hull_min_wl-hull_in[0,0])/2)
ft_right_wl = hull_min_wl+((hull_in[-1,0]-hull_min_wl)/2)
#use scipy.spatial convex hull to determine the convex hull of the points
hull = ConvexHull(hull_in)
# get the simplex tuples from the convex hull
simplexes = hull.simplices
# create an empty list to store simplices potentially related to our feature
feat_pos = []
#iterate through the simplices
for simplex in simplexes:
#extract vertices from simplices
vertex1 = simplex[0]
vertex2 = simplex[1]
#print 'VERT!',hull_in[vertex1,0],hull_in[vertex2,0]
''' We're only interested in the upper hull. Qhull moves counter-
clockwise. Therefore we're only interested in those points where
vertex 1 is greater than vertex 2'''
'''The above may be total bollocks'''
if not vertex1 < vertex2:
'''We then use the wavelength ranges to determine which simplices
relate to our absorption feature'''
if hull_in[vertex2,0] <= ft_left_wl and \
hull_in[vertex2,0] >= self.wl[left] and \
hull_in[vertex1,0] >= ft_right_wl and \
hull_in[vertex1,0] <= self.wl[right]:
# append the vertices to the list
print (hull_in[vertex2,0])
print (hull_in[vertex1,0])
feat_pos.append((vertex2,vertex1))
print ('feat_pos length:',len(feat_pos), type(feat_pos))
#print feat_pos[0],feat_pos[1]
else:
continue
'''We only want one feature here. If there's more than one or less
than one we're not interested as we're probably not dealing with
vegetation'''
# If there's less than one feature...
if len(feat_pos) < 1:
print ('Absorption feature cannot be defined:less than one feature')
ft_def_stacked = None
ft_def_hdr = None
cont_rem_stacked = None
elif len(feat_pos) == 1:
feat_pos=feat_pos[0]
print ('£££££',feat_pos, type(feat_pos))
else:
#if theres more than one fid the widest one. this is not optimal.
if len(feat_pos) >1:
feat_width = []
for pair in feat_pos:
feat_width.append(pair[1]-pair[0])
print ('feat width:', feat_width)
#feat_width = np.asarray(feat_width)
print (feat_width)
f_max = feat_width.index(max(feat_width))
print (f_max)
feat_pos = feat_pos[f_max]
print (type(feat_pos))
if not feat_pos==None:
feat_pos = feat_pos[0], feat_pos[1]
print ('DOES MY FEAT_POS CONVERSION WORK?', feat_pos)
print ('Analysing absorption feature')
#slice
feature = hull_in[feat_pos[0]:feat_pos[1],:]
print ('Feature shape',feature.shape,'start:',feature[0,0],'end:',feature[-1,0])
#get the minima in the slice
minima_pos = np.argmin(feature[:,1])
#continuum removal
contrem = self.continuum_removal(feature,minima_pos)
# set up single value outputs
# start of feature
refined_start = feature[0,0]
# end of feature
refined_end = feature[-1,0]
# wavelength at minima
minima_WL = feature[minima_pos,0]
# reflectance at minima
minima_R = feature[minima_pos,1]
# area of absorption feature
feat_area = contrem[4]
# two band normalised index of minima and start of feature
left_tbvi = (refined_start-minima_R)/(refined_start+minima_R)
# two band normalised index of minima and right of feature
right_tbvi = (refined_end-minima_R)/(refined_end+minima_R)
# gradient of the continuum line
cont_gradient = np.mean(np.gradient(contrem[0]))
# area of continuum removed absorption feature
cont_rem_area = contrem[3]
# maxima of continuum removed absorption feature
cont_rem_maxima = np.max(contrem[1])
# wavelength of maxima of continuum removed absorption feature
cont_rem_maxima_wl = feature[np.argmax(contrem[1]),0]
#area of left part of continuum removed feature
cont_area_l = contrem[5]
if cont_area_l == None:
cont_area_l=0
#are aof right part of continuum removed feature
cont_area_r = contrem[6]
#stack these into a lovely array
ft_def_stacked = np.column_stack((refined_start,
refined_end,
minima_WL,
minima_R,
feat_area,
left_tbvi,
right_tbvi,
cont_gradient,
cont_rem_area,
cont_rem_maxima,
cont_rem_maxima_wl,
cont_area_l,
cont_area_r))
ft_def_hdr = str('"Refined start",'+
'"Refined end",'+
'"Minima Wavelenght",'+
'"Minima Reflectance",'+
'"Feature Area",'+
'"Left TBVI",'+
'"Right TBVI",'+
'"Continuum Gradient",'+
'"Continuum Removed Area",'+
'"Continuum Removed Maxima",'+
'"Continuum Removed Maxima WL",'+
'"Continuum Removed Area Left",'+
'"Continuum Removed Area Right",')
#print ft_def_stacked.shape #save the stacked outputs as hdf
# stack the 2d continuum removed outputs
cont_rem_stacked = np.column_stack((feature[:,0],
feature[:,1],
contrem[0],
contrem[1],
contrem[2]))
print ('CREM', cont_rem_stacked.shape)
return ft_def_stacked, ft_def_hdr, cont_rem_stacked
def continuum_removal(self,feature,minima):
#method to perform continuum r=<emoval
#pull out endmenmbers
end_memb = np.vstack((feature[0,:],feature[-1,:]))
#interpolate between the endmembers using x intervals
continuum_line = np.interp(feature[:,0], end_memb[:,0], end_memb[:,1])
#continuum removal
continuum_removed = continuum_line/feature[:,1]
#stack into coord pairs so we can measure the area of the feature
ft_coords = np.vstack((feature,
np.column_stack((feature[:,0],continuum_line))))
#get the area
area = self.area(ft_coords)
#get the area of the continuum removed feature
cont_rem_2d = np.column_stack((feature[:,0],continuum_removed))
cont_r_area = self.area(cont_rem_2d)
#band-normalised by area continuum removal
cont_BNA = (1-(feature[:,1]/continuum_line))/area
#continuum removed area on left of minima
cont_area_left = self.area(cont_rem_2d[0:minima,:])
#continuum removed area on right of minima
cont_area_right = self.area(cont_rem_2d[minima:,:])
return (continuum_line,
continuum_removed,
cont_BNA,
cont_r_area,
area,
cont_area_left,
cont_area_right)
#define area of 2d polygon- using shoelace formula
def area(self, coords2d):
#setup counter
total = 0.0
#get the number of coorsinate pairs
N = coords2d.shape[0]
#iterate through these
for i in range(N):
#define the first coordinate pair
vertex1 = coords2d[i]
#do the second
vertex2 = coords2d[(i+1) % N]
#append the first & second distance to the toatal
total += vertex1[0]*vertex2[1] - vertex1[1]*vertex2[0]
#return area
return abs(total/2)
class Indices():
#class that does vegetation indices
def __init__(self,spectra):
self.wl = spectra[:,0]
self.values = spectra[:,1]
self.range = (np.min(self.wl),np.max(self.wl))
'''So, the init method here checks the range of the sensor and runs
the appropriate indices within that range, and saves them as hdf5.
The indices are all defined as methods of this class'''
def visnir(self):
# Sensor range VIS-NIR
if self.range[0] >= 350 and \
self.range[0] <= 500 and \
self.range[1] >= 900:
vis_nir = np.column_stack((self.sr700_800(),
self.ndvi694_760(),
self.ndvi695_805(),
self.ndvi700_800(),
self.ndvi705_750(),
self.rdvi(),
self.savi(),
self.msavi2(),
self.msr(),
self.msrvi(),
self.mdvi(),
self.tvi(),
self.mtvi(),
self.mtvi2(),
self.vog1vi(),
self.vog2(),
self.prsi(),
self.privi(),
self.sipi(),
self.mcari(),
self.mcari1(),
self.mcari2(),
self.npci(),
self.npqi(),
self.cri1(),
self.cri2(),
self.ari1(),
self.ari2(),
self.wbi()))
vis_nir_hdr=str('"sr700_800",'+
'"ndvi694_760",'+
'"ndvi695_805",'+
'"ndvi700_800",'+
'"ndvi705_750",'+
'"rdvi",'+
'"savi",'+
'"msavi2",'+
'"msr",'+
'"msrvi",'+
'"mdvi",'+
'"tvi",'+
'"mtvi",'+
'"mtvi2",'+
'"vog1vi",'+
'"vog2",'+
'"prsi"'+
'"privi",'+
'"sipi",'+
'"mcari",'+
'"mcari1",'+
'"mcari2",'+
'"npci",'+
'"npqi",'+
'"cri1",'+
'"cri2",'+
'"ari1",'+
'"ari2",'+
'"wbi"')
else:
vis_nir = None
vis_nir_hdr = None
return vis_nir,vis_nir_hdr
#Range NIR-SWIR
def nir_swir(self):
if self.range[0] <= 900 and self.range[1] >=2000:
nir_swir = np.column_stack((self.ndwi(),
self.msi(),
self.ndii()))
nir_swir_hdr = str('"ndwi",'+
'"msi",'+
'"ndii"')
else:
#continue
print ('not nir-swir')
nir_swir=None
nir_swir_hdr=None
return nir_swir, nir_swir_hdr
#range SWIR
def swir(self):
if self.range[1] >=2000:
swir = np.column_stack((self.ndni(),
self.ndli()))
swir_hdr=str('"ndni",'+
'"ndli"')
else:
print ('swir-nir')
swir = None
swir_hdr = None
#continue
return swir,swir_hdr
#||||||||||||||||||||| Methods |||||||||||||||||||||||||||||||||||||||||||||||
# function to run every permutation of the NDVI type index across the Red / IR
# ...... VIS / NIR methods ....
def multi_tbvi (self, red_start=650, red_end=750, ir_start=700, ir_end=850):
# get the indicies of the regions we're going to use.
# we've added default values here, but they can happily be overidden
#start of red
red_l =np.argmin(np.abs(self.wl-red_start))
#end of red
red_r = np.argmin(np.abs(self.wl-red_end))
#start of ir
ir_l = np.argmin(np.abs(self.wl-ir_start))
#end of ir
ir_r = np.argmin(np.abs(self.wl-ir_end))
#slice
left = self.values[red_l:red_r]
right = self.values[ir_l:ir_r]
#set up output
values = np.empty(3)
#set up counter
l = 0
#loop throught the values in the red
for lvalue in left:
l_wl = self.wl[l+red_l]
r = 0
l = l+1
#then calculate the index with each wl in the NIR
for rvalue in right:
value = (rvalue-lvalue)/(rvalue+lvalue)
r_wl = self.wl[r+ir_l]
out = np.column_stack((l_wl,r_wl,value))
values = np.vstack((values, out))
out = None
r = r+1
return values[1:,:]
def sr700_800 (self, x=700, y=800):
index = self.values[np.argmin(np.abs(self.wl-x))]/self.values[np.argmin(np.abs(self.wl-y))]
return index
def ndvi705_750 (self, x=705, y=750):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def ndvi700_800 (self, x=700, y=800):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def ndvi694_760 (self, x=694, y=760):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def ndvi695_805 (self, x=695, y=805):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def npci (self, x=430, y=680):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def npqi (self, x=415, y=435):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
#mSRvi
#= (750-445)/(705+445)
def msrvi (self):
x = 750
y = 445
z = 705
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
msrvi_val = (x_val-y_val)/(z_val+y_val)
return msrvi_val
#Vogelmann Red Edge 1
#740/720
def vog1vi (self):
x = 740
y = 720
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
vog1vi_val = (x_val/y_val)
return vog1vi_val
#Vogelmann Red Edge 2
#= (734-747)/(715+726)
def vog2 (self):
v = 734
x = 747
y = 715
z = 726
v_val = self.values[np.argmin(np.abs(self.wl-v))]
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
vog2_val = (v_val-x_val)/(y_val+z_val)
return vog2_val
#PRI
# (531-570)/(531+570)
def privi (self):
x = 531
y = 570
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
privi_val = (x_val-y_val)/(x_val+y_val)
return privi_val
#SIPI
#(800-445)/(800-680)
def sipi (self):
x = 800
y = 445
z = 680
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
sipi_val = (x_val-y_val)/(x_val+z_val)
return sipi_val
#Water band index
# WBI = 900/700
def wbi (self):
x = 900
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
wbi_val = (x_val/y_val)
return wbi_val
#mNDVI
#= (750-705)/((750+705)-(445))
def mdvi (self):
x = 750
y = 705
z = 445
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mdvi_val = (x_val-y_val)/((x_val+y_val)-z_val)
return mdvi_val
#Carotenid Reflectance Index
#CRI1 = (1/510)-(1/550)
def cri1 (self):
x = 510
y = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
cri1_val = (1/x_val)-(1/y_val)
return cri1_val
#CRI2 = (1/510)-(1/700)
def cri2 (self):
x = 510
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
cri2_val = (1/x_val)-(1/y_val)
return cri2_val
#Anthocyanin
#ARI1 = (1/550)-(1/700)
def ari1 (self):
x = 550
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ari1_val = (1/x_val)-(1/y_val)
return ari1_val
#ARI2 = 800*((1/550)-(1/700)_))
def ari2 (self):
x = 510
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ari2_val = 800*((1/x_val)-(1/y_val))
return ari2_val
#MSR
#=((800/670)-1)/SQRT(800+670)
def msr (self):
x = 800
y = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
msr_val = ((x_val/y_val)-1)/(np.sqrt(x_val+y_val))
return msr_val
#SAVI
#= (1+l)(800-670)/(800+670+l)
def savi (self, l=0.5):
x = 800
y = 670
l = 0.5
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
savi_val = ((1+l)*(x_val-y_val))/(x_val+y_val+l)
return savi_val
#MSAVI
#=1/2(sqrt(2*800)+1)-SQRT(((2*800+1)sqr)-8*(800-670)
def msavi2 (self):
x = 800
y = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
msavi2_top1 = (2*x_val+1)
msavi2_top2 = (np.sqrt(np.square(2*x_val+1)-(8*(x_val-y_val))))
msavi2_top = msavi2_top1-msavi2_top2
msavi2_val = msavi2_top/2
return msavi2_val
#Modified clhoropyll absorption indec
#MCARI = ((700-670)-0.2*(700-550))*(700/670)
def mcari (self):
x = 700
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mcari_val = (x_val-y_val)-(0.2*(x_val-z_val)*(x_val/y_val))
return mcari_val
#Triangular vegetation index
#TVI 0.5*(120*(750-550))-(200*(670-550))
def tvi (self):
x = 750
y = 550
z = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
tvi_val = 0.5*((120*(x_val-y_val))-(200*(z_val+y_val)))
return tvi_val
#MCAsavRI1 = 1.2*(2.5*(800-67-)-(1.3*800-550)
def mcari1 (self):
x = 800
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mcari1_val = (1.2*((2.5*(x_val-y_val)))-(1.3*(x_val+z_val)))
return mcari1_val
#MTVI1
#=1.2*((1.2*(800-550))-(2.5(670-550)))
def mtvi (self):
x = 800
y = 550
z = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mtvi_val = (1.2*(12*(x_val-y_val)))-(2.5*(z_val-y_val))
return mtvi_val
def mcari2 (self):
x = 800
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mcari2_top = (1.5*(2.5*(x_val-y_val)))-(1.3*(x_val-z_val))
mcari2_btm = np.sqrt((np.square(2*x_val)+1)-((6*x_val)-(5*(np.sqrt(y_val))))-0.5)
mcari2_val = mcari2_top/mcari2_btm
return mcari2_val
#MTVI2=(1.5*(2.5(800-670)-2.5*(800-550))/sqrt((2*800+1s)sq)-((6*800)-(5*sqrt670))-0.5
def mtvi2 (self):
x = 800
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mtvi2_top = (1.5*(2.5*(x_val-z_val)))-(1.3*(x_val-z_val))
mtvi2_btm = np.sqrt((np.square(2*x_val)+1)-((6*x_val)-(5*(np.sqrt(y_val))))-0.5)
mtvi2_val = mtvi2_top/mtvi2_btm
return mtvi2_val
#Renormalised DVI
#RDVI = (800-670)/sqrt(800+670)
def rdvi (self):
x = 800
y = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
rdvi_val = (x_val-y_val)/np.sqrt(x_val+y_val)
return rdvi_val
#Plant senescance reflectance index
#PRSI = (680-500)/750
def prsi (self):
x = 680
y = 500
z = 750
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
prsi_val = (x_val-y_val)/z_val
return prsi_val
#||||||||||||||||||||||| SWIR methods ||||||||||||||||||||||||||||||||||||
#Cellulose Absorption Index
#CAI =0.5*(2000-2200)/2100
def cai (self):
x = 2000
y = 2200
z = 2100
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
cai_val = 0.5*(x_val-y_val)-z_val
return cai_val
#Normalized Lignin Difference
#NDLI = (log(1/1754)-log(1/1680))/(log(1/1754)+log(1/1680))
def ndli (self):
x = 1754
y = 2680
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndli_val = (np.log(1/x_val)-np.log(1/y_val))/(np.log(1/x_val)+np.log(1/y_val))
return ndli_val
#Canopy N
#NDNI =(log(1/1510)-log(1/1680))/(log(1/1510)+log(1/1680))
def ndni (self):
x = 1510
y = 1680
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndni_val = (np.log(1/x_val)-np.log(1/y_val))/(np.log(1/x_val)+np.log(1/y_val))
return ndni_val
#|||||||||||||||||||||| Full spectrum (VIS-SWIR)||||||||||||||||||||||||||||
#Normalised Difference IR index
#NDII = (819-1649)/(819+1649)#NDII = (819-1649)/(819+1649)
def ndii (self):
x = 819
y = 1649
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndii_val = (x_val-y_val)/(x_val+y_val)
return ndii_val
#Moisture Stress Index
#MSI = 1599/819http://askubuntu.com/questions/89826/what-is-tumblerd
def msi (self):
x = 1599
y = 810
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
msi_val = (x_val/y_val)
return msi_val
#NDWI
#(857-1241)/(857+1241)
def ndwi (self):
x = 857
y = 1241
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndwi_val = (x_val-y_val)/(x_val+y_val)
return ndwi_val
class red_edge():
'''Class to derive red edge position using a number of different methods'''
def __init__(self, spectra):
self.wl = spectra[:,0]
self.values = spectra[:,1]
self.range = (np.min(self.wl),np.max(self.wl))
'''Again, the mehtod that initialises this class uses the range of the
sensor to check to see if it falls within the red-edge reigion. If so,
it will derive the red edge using the differnet methods and save these
as seprate hdf5 datasets in the appropriate group'''
if self.range[0] <= 670 and self.range[1] >=750:
self.redge_vals = np.column_stack((self.redge_linear(),
self.redge_lagrange(),
self.redge_linear_extrapolation()))
print (self.redge_vals)
print (self.redge_linear,self.redge_lagrange,self.redge_linear_extrapolation)
self.redge_hdr = str('"linear",'+
'"lagrange",'+
'"extrapolated"')
else:
print ('red_edge out of range')
self.redge_vals = None
self.redge_hdr = None
##################### METHODS #########################################
#linear- defined by clevers et al 1994:
def redge_linear(self):
r670 = self.values[np.argmin(np.abs(self.wl-670))]
r780 = self.values[np.argmin(np.abs(self.wl-780))]
r700 = self.values[np.argmin(np.abs(self.wl-700))]
r740 = self.values[np.argmin(np.abs(self.wl-740))]
r_edge = (r670+r780)/2
lin_rep =700+40*((r_edge-r700)/(r740-r700))
print ('REDGE_LINEAR',lin_rep)
return lin_rep
#Lagrangian method, after Dawson & Curran 1998
def redge_lagrange(self):
#select the red edge region of the first derviative and associate this
#with wavelength
x = 680
y = 730
first_diff = np.diff(self.values, 1)
spec_in = np.column_stack((self.wl[1:], first_diff))
l680 = np.argmin(np.abs(spec_in[:,0]-x))
r680 = spec_in[l680,0]
l730 = np.argmin(np.abs(spec_in[:,0]-y))
r730 = spec_in[l730,0]
redge_region_sel = np.where(np.logical_and(spec_in[:,0]>r680-1,
spec_in[:,0]<r730+1))
redge_region = spec_in[redge_region_sel]
#find the maximum first derivative, return index
dif_max = np.argmax(redge_region[:,1], axis=0)
#find band with the max derivative -1, return index
dif_max_less = (np.argmax(redge_region[:,1], axis=0))-1
#find band with the max derivative +1, return index
dif_max_more = (np.argmax(redge_region[:,1], axis=0))+1
if dif_max_more >= redge_region.shape[0]:
dif_max_more = redge_region.shape[0]-1
#use these indeces to slice the array
rmax = redge_region[dif_max]
rmax_less =redge_region[dif_max_less]
rmax_more =redge_region[dif_max_more]
#lagrangian interpolation with three points
#this has been expanded to make the syntax easier
a = rmax_less[1]/(rmax_less[0]-rmax[0])*(rmax_less[0]-rmax_more[0])
b = rmax[1]/(rmax[0]-rmax_less[0])*(rmax[0]-rmax_more[0])
c = rmax_more[1]/(rmax_more[0]-rmax_less[0])*(rmax_more[0]-rmax[0])
d = a*(rmax[0]+rmax_more[0])
e = b*(rmax_less[0]+rmax_more[0])
f = c*(rmax_less[0]+rmax[0])
lg_rep = (d+e+f)/(2*(a+b+c))
print ('Lagrangian', lg_rep)
return lg_rep
#Linear extrapolation- after Cho & Skidmore 2006, Cho et al 2007
def redge_linear_extrapolation(self):
diff = np.diff(self.values)
d680 = diff[np.argmin(np.abs(self.wl-680+1))]
d694 = diff[np.argmin(np.abs(self.wl-694+1))]
d724 = diff[np.argmin(np.abs(self.wl-724+1))]
d760 = diff[np.argmin(np.abs(self.wl-760+1))]
red_slope = ((d694-d680)/(694-680))
ir_slope = ((d760-d724)/(760-724))
red_inter = d680-(red_slope*680)
ir_inter = d724-(ir_slope*724)
wl = (ir_inter-red_inter)/(ir_slope-red_slope)
print ('^!!!!!!!!! Linear:',wl)
return np.abs(wl)
class fluorescence():
'''this class is inteded to look for evidence of photosynthetic flourescence
currently this is limited to simple reflectance indices. This should be
expanded to take in other more complex methods to invesitgae fluorescence'''
def __init__(self, spectra):
self.wl = spectra[:,0]
self.values = spectra[:,1]
self.range = (np.min(self.wl),np.max(self.wl))
print ('call to fluor')
'''The init method checks the range to establish if it overlaps with
region of chlorophyll flourescence. If so it will will perform the
analysis methods and output to hdf5'''
def wl_selector(self, x):
'''this method finds the index of the wavelength closest to that
specified for reflectance'''
value = self.values[np.argmin(np.abs(self.wl-x))]
return value
def d_wl_selector(self, x):
'''this method finds the index of the wavelength closest to that
specified for the first derivative'''
diff = np.diff(self.values)
value = diff[np.argmin(np.abs(self.wl-x))+1]
return value
def wl_max_d(self):
'''method to extract wavelength of the maxima of the first derivative
and return this'''
start = np.argmin(np.abs(self.wl-650))
end = np.argmin(np.abs(self.wl-760))
diff = np.diff(self.values[start:end])
maxdiff = np.argmax(diff)
maxdiffwl = self.wl[maxdiff+start+1]
return maxdiffwl, diff[maxdiff]
def simple_ratios(self):
''' This method runs flourescence indices ratios and returns them as a
stacked numpy array'''
#r680/r630
r680r630 = self.wl_selector(680)/self.wl_selector(630)
print (r680r630)
#r685/r630
r685r630 = self.wl_selector(685)/self.wl_selector(630)
print (r685r630)
#r685/r655
r685r655 = self.wl_selector(685)/self.wl_selector(655)
print (r685r655)
#r687/r630
r687r630 = self.wl_selector(687)/self.wl_selector(630)
print (r687r630)
#r690/r630
r690r630 = self.wl_selector(690)/self.wl_selector(630)
print (r690r630)
#r750/r800
r750r800 = self.wl_selector(750)/self.wl_selector(800)
print (r750r800)
#sq(r685)/(r675-r690)
sqr685 = np.square(self.wl_selector(685))/(self.wl_selector(675)-self.wl_selector(690))
print (sqr685)
#(r675-r690)/sq(r683) Zarco-Tejada 2000
r675r690divsq683 = (self.wl_selector(675)-self.wl_selector(690))/np.square(self.wl_selector(683))
print (r675r690divsq683)
#d705/d722
d705d722 = self.d_wl_selector(705)/self.d_wl_selector(722)
print (d705d722)
#d730/d706
d730d706 = self.d_wl_selector(730)/self.d_wl_selector(706)
print (d730d706)
#(d688-d710)/sq(d697)
d686d710sq697 = (self.d_wl_selector(688)-self.d_wl_selector(710))\
/np.square(self.d_wl_selector(697))
print (d686d710sq697)
#wl at max d / d720
maxdd720 = self.wl_max_d()[1]/self.d_wl_selector(720)
print (maxdd720)
#wl at max d / d703
maxdd703 = self.wl_max_d()[1]/self.d_wl_selector(703)
print (maxdd703)
#wl at max d / d(max d+12)
print (self.wl_max_d()[0])
maxd12 = self.wl_max_d()[1]/self.d_wl_selector(self.wl_max_d()[0]+12)
print (maxd12)
combined = np.vstack((r680r630,
r685r630,
r685r655,
r687r630,
r690r630,
r750r800,
sqr685,
r675r690divsq683,
d705d722,
d730d706,
d686d710sq697,
maxdd720,
maxdd703,
maxd12))
fluo_hdr = str('"r680r630",'+
'"r685r630",'+
'"r685r655",'+
'"r687r630",'+
'"r690r630",'+
'"r750r800",'+
'"sqr685",'+
'"r675r690divsq683",'+
'"d705d722",'+
'"d730d706",'+
'"d686d710sq697",'+
'"maxdd720",'+
'"maxdd703",'+
'"maxd12"')
return combined, fluo_hdr
def dual_peak(self):
'''This fuction loogs for a dual peak in the red-edge region. If it's
there it measures the depth of the feature between the two peaks.
UNTESTED'''
start = self.wl_selector(640)
end = self.wl_selector(740)
d1_region = np.diff(self.values[start:end])
#d2_region = np.diff(self.values[start:end], n=2)
peak_finder = find_peaks_cwt(d1_region, np.arange(3,10))
peak_wl = wavelengths[peak_finder]
fluor_peaks = []
for peak in peak_finder:
if peak_wl[peak] == self.wl[self.wl_selector(668)]:
print ('found flourescence peak at 668nm')
fluor_peaks.append(peak)
elif peak_wl[peak] == self.wl[self.wl_selector(735)]:
print ('found flourescence peak at 735nm')
fluor_peaks.append[peak]
else:
print ('unknown peak')
'''if len(fluor_peaks) == 2:
something = 'something'''
class load_asd():
def __init__(self, indir, output_dir):
data_list = os.listdir(indir)
print (data_list)
#output_dir = os.path.join(indir,'output')
if not os.path.exists(output_dir):
os.mkdir(output_dirx)
for directory in data_list:
parent = os.path.join(indir, directory)
spectra_dir = os.path.join(parent, 'raw_spectra')
reading_info_dir = os.path.join(parent, 'reading_info')
sensor_name = 'ASD FieldSpec Pro'
sensor_type = 'SPR'
sensor_units = 'nm'
sensor_range = [350,2500]
os.chdir(reading_info_dir)
reading_info_file = open('reading_atributes.txt','rb')
reading_info = csv.DictReader(reading_info_file)
reading_info_array = np.empty(12)
readings_list = [row for row in reading_info]
for reading in readings_list[:]:
reading_filename = str(reading['reading_id']+'.txt')
reading_info_line = np.column_stack((reading['reading_id'],
reading['dartField'],
reading['transect'],
reading['transectPosition'],
reading['reading_type'],
reading['reading_coord_osgb_x'],
reading['reading_coord_osgb_y'],
reading['dateOfAcquisition'],
reading['timeOfAcquisition'],
reading['instrument_number'],
reading['dark_current'],
reading['white_ref']))
#print reading_info_line
if reading['reading_type']== 'REF':
reading_info_array = np.vstack((reading_info_array,reading_info_line))
#print reading_info_array
print ('*********** Loading File', reading_filename, '***********')
os.chdir(spectra_dir)
spec = np.genfromtxt(reading_filename,
delimiter=', ',
skiprows=30)
spec = np.column_stack((spec[:,0],spec[:,1]*100))
nir_start = 0
nir_end = 990
nir_weight = 3.5
nir_k = 4.9
nir_s =45
swir1_start = 1080
swir1_end = 1438
swir1_weight = 8.5
swir1_k = 3.5
swir1_s = 35
swir2_start = 1622
swir2_end = 2149
swir2_weight = 1.2
swir2_s = 92
swir2_k = 2.8
#smoothing(perc_out, block_start, block_end, kparam, weight, sparam)
nir_smoothed = smoothing(spec, nir_start, nir_end, nir_k, nir_weight, nir_s)
swir1_smoothed = smoothing(spec, swir1_start, swir1_end, swir1_k, swir1_weight, swir1_s)
swir2_smoothed = smoothing(spec, swir2_start, swir2_end, swir2_k, swir2_weight, swir2_s)
print ('Smoothed array shape', nir_smoothed.shape,swir1_smoothed.shape,swir2_smoothed.shape)
nir_swir_gap = interpolate_gaps(nir_smoothed,swir1_smoothed)
swir2_gap = interpolate_gaps(swir1_smoothed,swir2_smoothed)
spec_smoothed = np.vstack((nir_smoothed,
nir_swir_gap,
swir1_smoothed,
swir2_gap,
swir2_smoothed))
print ('Spec SHAPE:', spec.shape)
survey_dir = os.path.join(output_dir, directory)
if not os.path.exists(survey_dir):
os.mkdir(survey_dir)
os.chdir(survey_dir)
try:
abs470 = absorption_feature(spec_smoothed,400,518,484)
print (abs470.abs_feature()[0])
abs470_ftdef = abs470.abs_feature()[0]
print (abs470_ftdef)
abs470_crem = abs470.abs_feature()[2]
if not abs470_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs470_ftdef.txt',
abs470_ftdef,
header=abs470.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs470_crem.txt',
abs470_crem,
delimiter=',')
except:
pass
try:
abs670 = absorption_feature(spec_smoothed,548,800,670)
abs670_ftdef = abs670.abs_feature()[0]
abs670_crem = abs670.abs_feature()[2]
if not abs670_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs670_ftdef.txt',
abs670_ftdef,
header=abs670.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs670_crem.txt',
abs670_crem,
delimiter=',')
except:
pass
try:
abs970 = absorption_feature(spec_smoothed,880,1115,970)
abs970_ftdef = abs970.abs_feature()[0]
abs970_crem = abs970.abs_feature()[2]
if not abs970_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs970_ftdef.txt',
abs970_ftdef,
header=abs970.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs970_crem.txt',
abs970_crem,
delimiter=',')
except:
pass
try:
abs1200 = absorption_feature(spec_smoothed,1080,1300,1190)
abs1200_ftdef = abs1200.abs_feature()[0]
abs1200_crem = abs1200.abs_feature()[2]
if not abs1200_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs1200_ftdef.txt',
abs1200_ftdef,
header=abs1200.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs1200_crem.txt',
abs1200_crem,
delimiter=',')
except:
pass
try:
abs1730 = absorption_feature(spec_smoothed,1630,1790,1708)
abs1730_ftdef = abs1730.abs_feature()[0]
abs1730_crem = abs1730.abs_feature()[2]
if not abs1730_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs1730_ftdef.txt',
abs1730_ftdef,
header=abs1730.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs1730_crem.txt',
abs1730_crem,
delimiter=',')
except:
pass
print (spec_smoothed.shape)
try:
abs2100 = absorption_feature(spec_smoothed,2001,2196,2188)
abs2100_ftdef = abs2100.abs_feature()[0]
abs2100_crem = abs2100.abs_feature()[2]
if not abs2100_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs2100_ftdef.txt',
abs2100_ftdet,
header=abs2100.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs2100_crem.txt',
abs2100_crem,
delimiter=',')
except:
pass
veg_indices = Indices(spec_smoothed)
indices = np.column_stack((veg_indices.visnir()[0],
veg_indices.nir_swir()[0],
veg_indices.swir()[0]))
print (veg_indices.visnir()[1],veg_indices.nir_swir()[1],veg_indices.swir()[1])
hdr = str(veg_indices.visnir()[1]+','+veg_indices.nir_swir()[1]+','+veg_indices.swir()[1])
np.savetxt(reading_filename[0:-4]+'_indices.txt',
indices,
header=hdr,
delimiter=',')
mtbvi = veg_indices.multi_tbvi()
np.savetxt(reading_filename[0:-4]+'_mtbvi.txt',
mtbvi,
delimiter=',')
redge = red_edge(spec_smoothed)
print (redge.redge_vals.shape)
print (redge.redge_vals)
np.savetxt(reading_filename[0:-4]+'_redge.txt',
redge.redge_vals,
delimiter=',')
fluo = fluorescence(spec_smoothed)
np.savetxt(reading_filename[0:-4]+'_flou.txt',
np.transpose(fluo.simple_ratios()[0]),
header = fluo.simple_ratios()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_spec.txt',
spec_smoothed,
delimiter=',')
class load_image():
def __init__(self, wavlengths_dir,image_dir,out_dir):
os.chdir(wavelengths_dir)
wavelengths = np.genfromtxt('wavelengths.txt')
print ('wavelengths array', wavelengths)
os.chdir(image_dir)
image_list = os.listdir(image_dir)
for image in image_list:
import_image = self.get_image(image)
image_name = image[:-4]
print ('IMAGE NAME:', image_name)
row = 1
img_array = import_image[0]
print ('Image_array', img_array)
projection = import_image[1]
print ('Projection',projection)
x_size = import_image[2]
print ('Xdim',x_size)
y_size = import_image[3]
print ('Ydim', y_size)
spatial = import_image[4]
print (spatial)
x_top_left = spatial[0]
ew_pix_size = spatial[1]
rotation_ew = spatial[2]
y_top_left = spatial[3]
rotation_y = spatial[4]
ns_pixel_size = spatial[5]
print ('Spatial', x_top_left,ew_pix_size,rotation_ew,y_top_left,rotation_y,ns_pixel_size)
print ('IMAGE ARRAY SHAPE',img_array.shape)
img_dims = img_array.shape
print (img_dims[0],'/',img_dims[1])
#indices+29
indices_out = np.zeros((img_dims[0],img_dims[1],29), dtype=np.float32)
#print indices_out
#redge=3
redge_out = np.zeros((img_dims[0],img_dims[1]),dtype=np.float32)
#fluo=14
fluo_out=np.zeros((img_dims[0],img_dims[1],14), dtype=np.float32)
print ('fluo out', fluo_out.shape)
ft470_out = np.zeros((img_dims[0],img_dims[1],13), dtype=np.float32)
ft670_out = np.zeros((img_dims[0],img_dims[1],13), dtype=np.float32)
ft970_out = np.zeros((img_dims[0],img_dims[1],13), dtype=np.float32)
x470 = np.argmin(np.abs(wavelengths-400))
y470 = np.argmin(np.abs(wavelengths-518))
len470 = y470-x470
cr470_out = np.zeros((img_dims[0],img_dims[1],len470), dtype=np.float32)
x670 = np.argmin(np.abs(wavelengths-548))
y670 = np.argmin(np.abs(wavelengths-800))
len670 = y670-x670
cr670_out = np.zeros((img_dims[0],img_dims[1],len670), dtype=np.float32)
print (cr670_out)
x970 = np.argmin(np.abs(wavelengths-880))
y970 = np.argmin(np.abs(wavelengths-1000))
len970 = y970-x970
cr970_out = np.zeros((img_dims[0],img_dims[1],len970), dtype=np.float32)
#print cr970_out
print (wavelengths)
row = 0
print ('***', row, img_dims[0])
for i in range(0,img_dims[0]):
print (i)
column = 0
#print 'COL',column
for j in range(0,img_dims[1]):
print ('COLUMN',column)
#print 'Pixel',pixel
name = '%s_pix-%s_%s' % (image_name,row,column)
print ('NAME',name)
pixel = img_array[row,column,:]
#smoothed = savgol_filter(pixel,5,2)
#spec_smoothed = np.column_stack((wavelengths,smoothed))
spec_smoothed = np.column_stack((wavelengths,pixel))
print (spec_smoothed)
veg_indices = Indices(spec_smoothed)
indices = veg_indices.visnir()[0]
print ('(*&)(*)(*&&^)^)^)*&^)*^)*&', indices)
indices_out[row,column,:]=indices
fluo = fluorescence(spec_smoothed)
fluo_out[row,column,:]=np.transpose(fluo.simple_ratios()[0])
redge = red_edge(spec_smoothed)
print (redge.redge_vals.shape)
redge_out[row,column]= redge.redge_vals[0,2]
try:
abs470 = absorption_feature(spec_smoothed,400,518,484)
abs470_ftdef = abs470.abs_feature()[0]
abs470_crem = abs470.abs_feature()[2]
abs470_crem = np.column_stack((abs470_crem[:,0],abs470_crem[:,4]))
print ('!*!*!*!*!&!*!*', abs470_crem)
crem470_fill = self.crem_fill(x470,y470,abs470_crem,wavelengths)
ft470_out[row,column,:]=abs470_ftdef
cr470_out[row,column,:]=crem470_fill
except:
pass
try:
abs670 = absorption_feature(spec_smoothed,548,800,670)
abs670_ftdef = abs670.abs_feature()[0]
abs670_crem = abs670.abs_feature()[2]
abs670_crem = np.column_stack((abs670_crem[:,0],abs670_crem[:,4]))
ft670_out[row,column,:]=abs670_ftdef
crem670_fill = self.crem_fill(x670,y670,abs670_crem,wavelengths)
cr670_out[row,column,:]=crem670_fill
except:
pass
try:
abs970 = absorption_feature(spec_smoothed,880,1000,970)
abs970_ftdef = abs970.abs_feature()[0]
abs970_crem = abs970.abs_feature()[2]
abs970_crem = np.column_stack((abs970_crem[:,0],abs970_crem[:,4]))
crem970_fill = self.crem_fill(x970,y970,abs970_crem,wavelengths)
ft970_out[row,column,:]=abs970_ftdef
cr970_out[row,column,:]=crem970_fill
except:
pass
column = column+1
print (pixel.shape)
row = row+1
self.writeimage(out_dir,image+'_indices.tif',indices_out,spatial)
self.writeimage(out_dir,image+'_fluo.tif',fluo_out,spatial)
self.writeimage(out_dir,image+'_redge.tif',redge_out,spatial)
self.writeimage(out_dir,image+'_ft470.tif',ft470_out,spatial)
self.writeimage(out_dir,image+'_cr470.tif',cr470_out,spatial)
self.writeimage(out_dir,image+'_ft670.tif',ft670_out,spatial)
self.writeimage(out_dir,image+'_cr670.tif',cr670_out,spatial)
self.writeimage(out_dir,image+'_ft970.tif',ft970_out,spatial)
self.writeimage(out_dir,image+'_cr970.tif',cr970_out,spatial)
def crem_fill(self,xwl,ywl,bna,wavelengths):
bna_out=np.zeros((ywl-xwl))
bna_wvl = bna[:,0]
bna_refl= bna[:,1]
full_wl = wavelengths[xwl:ywl]
index = np.argmin(np.abs(wavelengths-bna_wvl[0]))
bna_out[index:]=bna_refl
return bna_out
def get_image(self, image):
print ('call to get_image')
# open the dataset
dataset = gdal.Open(image, GA_ReadOnly)
print ('Dataset',dataset)
# if there's nothign there print error
if dataset is None:
print ('BORK: Could not load file: %s' %(image))
# otherwise do stuff
else:
#get the format
driver = dataset.GetDriver().ShortName
#get the x dimension
xsize = dataset.RasterXSize
#get the y dimension
ysize = dataset.RasterYSize
#get the projection
proj = dataset.GetProjection()
#get the number of bands
bands = dataset.RasterCount
#get the geotransform Returns a list object. This is standard GDAL ordering:
#spatial[0] = top left x
#spatial[1] = w-e pixel size
#spatial[2] = rotation (should be 0)
#spatial[3] = top left y
#spatial[4] = rotation (should be 0)
#spatial[5] = n-s pixel size
spatial = dataset.GetGeoTransform()
#print some stuff to console to show we're paying attention
print ('Found raster in %s format. Raster has %s bands' %(driver,bands))
print ('Projected as %s' %(proj))
print ('Dimensions: %s x %s' %(xsize,ysize))
#instantiate a counter
count = 1
#OK. This is the bit that catually loads the bands in in a while loop
# Loop through bands as long as count is equal to or less than total
while (count<=bands):
#show that your computer's fans are whining for a reason
print ('Loading band: %s of %s' %(count,bands))
#get the band
band = dataset.GetRasterBand(count)
# load this as a numpy array
data_array = band.ReadAsArray()
'''data_array = ma.masked_where(data_array == 0, data_array)
data_array = data_array.filled(-999)'''
data_array = data_array.astype(np.float32, copy=False)
# close the band object
band = None
#this bit stacks the bands into a combined numpy array
#if it's the first band copy the array directly to the combined one
if count == 1:
stacked = data_array
#else combine these
else:
stacked = np.dstack((stacked,data_array))
#stacked = stacked.filled(-999)
#just to check it's working
#print stacked.shape
# increment the counter
count = count+1
#stacked = stacked.astype(np.float32, copy=False)
return stacked,proj,xsize,ysize,spatial
def writeimage(self,
outpath,
outname,
image,
spatial):
data_out = image
print ('ROWS,COLS',image.shape)
print ('Call to write image')
os.chdir(outpath)
print ('OUTPATH',outpath)
print ('OUTNAME',outname)
#load the driver for the format of choice
driver = gdal.GetDriverByName("Gtiff")
#create an empty output file
#get the number of bands we'll need
try:
bands = image.shape[2]
except:
bands=1
print ('BANDS OUT', bands)
#file name, x columns, y columns, bands, dtype
out = driver.Create(outname, image.shape[1], image.shape[0], bands, gdal.GDT_Float32)
#define the location using coords of top-left corner
# minimum x, e-w pixel size, rotation, maximum y, n-s pixel size, rotation
out.SetGeoTransform(spatial)
srs = osr.SpatialReference()
#get the coodrinate system using the ESPG code
srs.SetWellKnownGeogCS("EPSG:27700")
#set pstackedstackedstackedtojection of output file
out.SetProjection(srs.ExportToWkt())
band = 1
if bands == 1:
out.GetRasterBand(band).WriteArray(data_out)
#set the no data value
out.GetRasterBand(band).SetNoDataValue(-999)
#apend the statistics to dataset
out.GetRasterBand(band).GetStatistics(0,1)
print ('Saving %s/%s' % (band,bands))
else:
while (band<=bands):
data = data_out[:,:,band-1]
#write values to empty array
out.GetRasterBand(band).WriteArray( data )
#set the no data value
out.GetRasterBand(band).SetNoDataValue(-999)
#apend the statistics to dataset
out.GetRasterBand(band).GetStatistics(0,1)
print ('Saving %s/%s' % (band,bands))
band = band+1
out = None
print ('Processing of %s complete' % (outname))
return outname
if __name__ == "__main__":
#dir_path = os.path.dirname(os.path.abspath('...'))
#data_root = os.path.join(dir_path, 'data')
data_root = '/home/dav/data/temp/test/test_spec'
for folder in os.listdir(data_root):
input_dir = os.path.join(data_root,folder)
print (input_dir)
surveys_list = os.listdir(input_dir)
print (surveys_list)
for survey_dir in surveys_list:
print (survey_dir)
site_dir=os.path.join(input_dir,survey_dir)
print (site_dir)
image_path = os.path.join(site_dir, 'image')
print (image_path)
wavelengths_dir = os.path.join(site_dir, 'wavelengths')
print (wavelengths_dir)
out_dir = os.path.join(site_dir,'output')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
load_image(wavelengths_dir,image_path,out_dir)
|
mit
| 2,521,095,999,925,145,000 | 38.961845 | 116 | 0.457732 | false |
fivezh/Keepgoing
|
py_spider/py_spider.py
|
1
|
1520
|
#!/bin/python
# -*- coding: utf-8 -*-
import urllib
import urllib2
import HTMLParser
import re
import urlparse
import threading
import ConfigParser
import argparse
import time
import logging
import sys
import logger
import config_load
import seedfile_load
import crawl_thread
if __name__ == "__main__":
logger.init_log("./log/spider", level=logging.DEBUG)
# Arguments parser
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--version", help="show current script version", \
action="version", version="%(prog)s 1.0")
parser.add_argument("-c", "--conf", help="set config file", required="true")
args = parser.parse_args()
if args.conf:
conf = args.conf
logging.info("Success to read conf args : %s", conf)
else:
logging.error("Fail to read conf args")
sys.exit(1)
# Config parser
spider_conf = config_load.SpiderConf()
res = spider_conf.conf_parse(conf)
if res is not None:
logging.error("Fail to parse the config(%s)")
sys.exit(1)
logging.info('Success to parse the config.')
# Seed file load
urls = seedfile_load.seedfile_load(spider_conf.url_list_file)
if urls is None:
logging.error("Fail to load the urls seed. Check the log for more info")
sys.exit(1)
logging.info('Success to load the url seed.')
# Multi-Threads crawl webpage
crawl_thread.crawl_thread_control(urls, spider_conf)
logging.info("Finish all the multi-threading crawl requests.")
|
mit
| 3,997,408,198,390,845,400 | 27.679245 | 80 | 0.665132 | false |
TheAlgorithms/Python
|
project_euler/problem_006/sol4.py
|
1
|
1038
|
"""
Project Euler Problem 6: https://projecteuler.net/problem=6
Sum square difference
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of the first ten
natural numbers and the square of the sum is 3025 - 385 = 2640.
Find the difference between the sum of the squares of the first one
hundred natural numbers and the square of the sum.
"""
def solution(n: int = 100) -> int:
"""
Returns the difference between the sum of the squares of the first n
natural numbers and the square of the sum.
>>> solution(10)
2640
>>> solution(15)
13160
>>> solution(20)
41230
>>> solution(50)
1582700
"""
sum_of_squares = n * (n + 1) * (2 * n + 1) / 6
square_of_sum = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares)
if __name__ == "__main__":
print(f"{solution() = }")
|
mit
| -9,040,634,359,307,308,000 | 24.317073 | 72 | 0.620424 | false |
awbirdsall/popmodel
|
src/popmodel/absprofile.py
|
1
|
3828
|
'''popmodel module for AbsProfile class
'''
from __future__ import division
from . import ohcalcs as oh
import logging
import numpy as np
from scipy.constants import k as kb
from scipy.constants import c
class AbsProfile(object):
'''absorbance line profile, initially defined in __init__ by a center
wavenumber `wnum` and a `binwidth`. Calling self.makeprofile then generates
two 1D arrays:
abs_freq : bins of frequencies (Hz)
pop : relative population absorbing in each frequency bin
pop is generated from abs_freq and the Voigt profile maker ohcalcs.voigt,
which requires parameters that are passed through as makeprofile arguments
(default are static parameters in ohcalcs). The formation of the two arrays
is iterative, widening the abs_freq range by 50% until the edges of the pop
array have less than 1% of the center.
'''
def __init__(self, wnum, binwidth=1.e6):
self.logger = logging.getLogger('popmodel.absprofile.AbsProfile')
self.wnum = wnum # cm^-1
self.freq = wnum*c*100 # Hz
self.binwidth = binwidth # Hz
# instance attributes calculated in makeprofile
self.abs_freq = None
self.pop = None
self.fwhm = None
# intpop calculated in Sweep.alignbins() call
self.intpop = None
def __str__(self):
return 'Absorbance feature centered at '+str(self.wnum)+' cm^-1'
def makeprofile(self, abswidth=1000.e6, press=oh.OP_PRESS, T=oh.TEMP,
g_air=oh.G_AIR, mass=oh.MASS, edgecutoff=0.01):
''' Use oh.voigt to create absorption profile.
Writes to self.abs_freq and self.pop.
Parameters:
-----------
abswidth : float
Minimum width of profile, Hz. Starting value that then expands if
this does not capture 'enough' of the profile (defined as <1% of
peak height at edges).
press : float
Operating pressure, torr. Defaults to ohcalcs value.
T : float
Temperature. Defaults to ohcalcs value
g_air : float
Air-broadening coefficient provided in HITRAN files, cm^-1 atm^-1.
Defaults to ohcalcs value (only appropriate for IR line).
mass : float
Mass of molecule of interest, kg. Defaults to ohcalcs value
edgecutoff : float
Cut-off for acceptable relative intensity at edge of profile
compared to peak. If relative intensity is larger than this value,
Voigt profile will be recalculated over a 50% broader frequency
range.
'''
sigma = oh.doppler_sigma(self.freq, T, mass)
gamma = oh.pressure_gamma(g_air, press)
# Make abs_freq profile, checking pop at edge <1% of peak
enoughwidth = False
while enoughwidth == False:
abs_freq = np.arange(-abswidth/2,
abswidth/2+self.binwidth,
self.binwidth)
raw_pop = oh.voigt(abs_freq, 1, 0, sigma, gamma, True)
norm_factor = 1/np.sum(raw_pop)
pop = raw_pop * norm_factor # makes sum of pops = 1.
if pop[0] >= edgecutoff*np.max(pop):
abswidth = abswidth*1.5
else:
enoughwidth = True
self.abs_freq = abs_freq
self.pop = pop
startfwhm = abs_freq[pop >= np.max(pop)*0.5][0]
endfwhm = abs_freq[pop >= np.max(pop)*0.5][-1]
self.fwhm = endfwhm - startfwhm
self.logger.info('makeprofile: made abs profile')
self.logger.info('makeprofile: abs profile has FWHM = %.2g MHz',
self.fwhm/1e6)
self.logger.info('makeprofile: total width of stored array = %.2g MHz',
abswidth/1e6)
|
mit
| -1,456,483,203,360,930,300 | 38.061224 | 79 | 0.605799 | false |
jcrocholl/nxdom
|
tests/views.py
|
1
|
2372
|
from datetime import datetime, timedelta
from ragendja.template import render_to_response
from ragendja.dbutils import get_object_or_404
from tests.models import Comparison
def statistics(path):
results = {'path': path}
missing = []
seconds1 = []
seconds2 = []
total = errors = failures = 0
query = Comparison.all().filter('path', path)
query.filter('timestamp >', datetime.now() - timedelta(hours=24))
for comparison in query.fetch(400):
if not comparison.path or not comparison.path.startswith(path):
continue
if comparison.message and 'message' not in results:
results['message'] = comparison.message
total += 1
if comparison.message == 'error':
errors += 1
elif comparison.message:
failures += 1
if comparison.missing1:
missing.append(comparison.missing1)
if comparison.missing2:
missing.append(comparison.missing2)
if comparison.seconds1:
seconds1.append(comparison.seconds1)
if comparison.seconds2:
seconds2.append(comparison.seconds2)
results['total'] = total
if not total:
results['error_percent'] = 0.0
results['failure_percent'] = 0.0
return results
results['error_percent'] = 100.0 * errors / total
results['failure_percent'] = 100.0 * failures / total
missing.sort()
if missing:
results['missing_min'] = min(missing)
results['missing_median'] = missing[len(missing) / 2]
results['missing_max'] = max(missing)
seconds1.sort()
seconds2.sort()
results['seconds1_min'] = min(seconds1)
results['seconds2_min'] = min(seconds2)
results['seconds1_median'] = seconds1[len(seconds1) / 2]
results['seconds2_median'] = seconds2[len(seconds2) / 2]
results['seconds1_max'] = max(seconds1)
results['seconds2_max'] = max(seconds2)
return results
def index(request):
statistics_list = [
statistics('/domains/descending/'),
statistics('/dns/descending/'),
]
return render_to_response(request, 'tests/index.html', locals())
def detail(request, path):
query = Comparison.all().filter('path', path).order('-timestamp')
comparisons_list = query.fetch(100)
return render_to_response(request, 'tests/detail.html', locals())
|
mit
| 6,682,742,311,232,221,000 | 33.376812 | 71 | 0.636172 | false |
omanor/MUSiCC
|
tests/test_musicc.py
|
1
|
5038
|
#!/usr/bin/env python
"""
This is the testing unit for MUSiCC
"""
# to comply with both Py2 and Py3
from __future__ import absolute_import, division, print_function
import unittest
import os
import pandas as pd
import musicc
from musicc.core import correct_and_normalize
class MUSiCCTestCase(unittest.TestCase):
"""Tests for `musicc.py`."""
path_to_data = os.path.dirname(musicc.__file__)
def test_is_output_correct_for_normalization_only(self):
"""Does MUSiCC produce the correct output for normalization of the example case?"""
print(MUSiCCTestCase.path_to_data)
# define the arguments needed by MUSiCC
musicc_args = {'input_file': MUSiCCTestCase.path_to_data + '/examples/simulated_ko_relative_abundance.tab',
'output_file': MUSiCCTestCase.path_to_data + '/examples/test1.tab',
'input_format': 'tab', 'output_format': 'tab', 'musicc_inter': True,
'musicc_intra': 'None', 'compute_scores': True, 'verbose': False}
# run the MUSiCC correction
correct_and_normalize(musicc_args)
# assert that the result is equal to the example (up to small difference due to OS/Other)
example = pd.read_table(MUSiCCTestCase.path_to_data + '/examples/simulated_ko_MUSiCC_Normalized.tab', index_col=0)
output = pd.read_table(MUSiCCTestCase.path_to_data + '/examples/test1.tab', index_col=0)
example_vals = example.values
output_vals = output.values
self.assertTrue(example_vals.shape[0] == output_vals.shape[0])
self.assertTrue(example_vals.shape[1] == output_vals.shape[1])
for i in range(example_vals.shape[0]):
for j in range(example_vals.shape[1]):
self.assertTrue(abs(example_vals[i, j] - output_vals[i, j]) < 1)
os.remove(MUSiCCTestCase.path_to_data + '/examples/test1.tab')
def test_is_output_correct_for_normalization_correction_use_generic(self):
"""Does MUSiCC produce the correct output for normalization and correction of the example case?"""
# define the arguments needed by MUSiCC
musicc_args = {'input_file': MUSiCCTestCase.path_to_data + '/examples/simulated_ko_relative_abundance.tab',
'output_file': MUSiCCTestCase.path_to_data + '/examples/test2.tab',
'input_format': 'tab', 'output_format': 'tab', 'musicc_inter': True,
'musicc_intra': 'use_generic', 'compute_scores': True, 'verbose': False}
# run the MUSiCC correction
correct_and_normalize(musicc_args)
# assert that the result is equal to the example (up to small difference due to OS/Other)
example = pd.read_table(MUSiCCTestCase.path_to_data + '/examples/simulated_ko_MUSiCC_Normalized_Corrected_use_generic.tab', index_col=0)
output = pd.read_table(MUSiCCTestCase.path_to_data + '/examples/test2.tab', index_col=0)
example_vals = example.values
output_vals = output.values
self.assertTrue(example_vals.shape[0] == output_vals.shape[0])
self.assertTrue(example_vals.shape[1] == output_vals.shape[1])
for i in range(example_vals.shape[0]):
for j in range(example_vals.shape[1]):
self.assertTrue(abs(example_vals[i, j] - output_vals[i, j]) < 1)
os.remove(MUSiCCTestCase.path_to_data + '/examples/test2.tab')
def test_is_output_correct_for_normalization_correction_learn_model(self):
"""Does MUSiCC produce the correct output for normalization and correction of the example case?"""
# define the arguments needed by MUSiCC
musicc_args = {'input_file': MUSiCCTestCase.path_to_data + '/examples/simulated_ko_relative_abundance.tab',
'output_file': MUSiCCTestCase.path_to_data + '/examples/test3.tab',
'input_format': 'tab', 'output_format': 'tab', 'musicc_inter': True,
'musicc_intra': 'learn_model', 'compute_scores': True, 'verbose': False}
# run the MUSiCC correction
correct_and_normalize(musicc_args)
# assert that the result is equal to the example (up to small difference due to de novo learning)
example = pd.read_table(MUSiCCTestCase.path_to_data + '/examples/simulated_ko_MUSiCC_Normalized_Corrected_learn_model.tab', index_col=0)
output = pd.read_table(MUSiCCTestCase.path_to_data + '/examples/test3.tab', index_col=0)
example_vals = example.values
output_vals = output.values
self.assertTrue(example_vals.shape[0] == output_vals.shape[0])
self.assertTrue(example_vals.shape[1] == output_vals.shape[1])
for i in range(example_vals.shape[0]):
for j in range(example_vals.shape[1]):
self.assertTrue(abs(example_vals[i, j] - output_vals[i, j]) < 1)
os.remove(MUSiCCTestCase.path_to_data + '/examples/test3.tab')
################################################
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
| 6,936,621,393,585,861,000 | 53.76087 | 144 | 0.641921 | false |
aktech/codeplay
|
playcode/views.py
|
1
|
1029
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.http import JsonResponse
import requests
# Constants
RUN_URL = u'https://api.hackerearth.com/v3/code/run/'
CLIENT_SECRET = 'cd70993ac2dbee9e7c7b2c533a104a7d621632fa'
def home(request):
if request.method == 'POST':
# POST goes here . is_ajax is must to capture ajax requests.
if request.is_ajax():
lang = request.POST.get('lang')
source = request.POST.get('source')
data = {"lang": lang, "source": source}
data = {
'client_secret': CLIENT_SECRET,
'async': 0,
'source': source,
'lang': lang,
'time_limit': 5,
'memory_limit': 262144,
}
# Post data to HackerEarth API
r = requests.post(RUN_URL, data=data)
return JsonResponse(r.json(), safe=False)
# Get goes here
return render(request, 'home.html')
|
mit
| 249,148,959,412,249,570 | 30.181818 | 68 | 0.578231 | false |
inveniosoftware/invenio-pages
|
tests/test_rest.py
|
1
|
1969
|
from invenio_pages import InvenioPagesREST
from invenio_pages.rest import blueprint
# mock the rest API url prefix
blueprint.url_prefix = '/api/{}'.format(blueprint.url_prefix)
def test_page_content(pages_fixture):
"""Test page content."""
app = pages_fixture
app.register_blueprint(blueprint)
with app.app_context():
with app.test_client() as client:
resp = client.get('/api/pages/1')
assert resp.status_code == 200
assert resp.json == {
'title': 'Page for Dogs!', 'description': '',
'url': '/dogs', 'content': 'Generic dog.',
'id': '1', 'links': {
'self': 'http://localhost/api/pages/1'
}
}
def test_html_content(pages_fixture):
"""Test page content."""
app = pages_fixture
app.register_blueprint(blueprint)
with app.app_context():
with app.test_client() as client:
resp = client.get('/api/pages/4')
assert resp.status_code == 200
assert resp.json == {
'title': 'Page for modern dogs!', 'description': '',
'url': '/htmldog',
'content': '<h1>HTML aware dog.</h1>.\n'
'<p class="test">paragraph<br /></p>',
'id': '4', 'links': {
'self': 'http://localhost/api/pages/4'
}
}
def test_page_etag(pages_fixture):
"""Test page content."""
app = pages_fixture
app.register_blueprint(blueprint)
with app.app_context():
with app.test_client() as client:
resp = client.get('/api/pages/1')
assert resp.status_code == 200
resp = client.get('/api/pages/1',
headers=(
('If-None-Match',
resp.headers.get('ETag')),))
assert resp.status_code == 304
|
gpl-2.0
| 9,188,939,193,106,529,000 | 31.278689 | 68 | 0.499238 | false |
ATenderholt/cclib
|
doc/sphinx/coverage.py
|
1
|
4455
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Generate the coverage.rst and coverage.rst files from test results."""
from __future__ import print_function
import os
import sys
from docs_common import check_cclib
# Import cclib and check we are using the version from a subdirectory.
import cclib
check_cclib(cclib)
def generate_coverage():
"""Generate a string containing a reStructuredTest table
representation of which parsers support which attributes, based on
test results.
"""
lines = []
# Change directory to where tests are and add it to the path. Because there are
# separate directories for different branches/versions, and we use a symlink to
# point to the one we want, we need to test the real path this link resolves to.
if "cclib_prod" in os.path.realpath('cclib'):
testpath = "_build/cclib_prod"
else:
assert "cclib_dev" in os.path.realpath('cclib')
testpath = "_build/cclib_dev"
os.chdir(testpath)
thispath = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(1, thispath)
from test.test_data import (all_modules, all_parsers, parser_names, DataSuite)
import inspect
ds_args = inspect.getargspec(DataSuite.__init__).args
logpath = thispath + "/coverage.tests.log"
try:
with open(logpath, "w") as flog:
stdout_backup = sys.stdout
sys.stdout = flog
alltests = {}
for p in parser_names:
assert 'parsers' in ds_args
suite = DataSuite(parsers={p: all_parsers[p]}, modules=all_modules, stream=flog)
suite.testall()
alltests[p] = [{'data': t.data} for t in suite.alltests]
sys.stdout = stdout_backup
except Exception as e:
print("Unit tests did not run correctly. Check log file for errors:")
with open(logpath) as fh:
print(fh.read())
print(e)
sys.exit(1)
ncols = len(parser_names) + 1
colwidth = 20
colfmt = "%%-%is" % colwidth
dashes = ("=" * (colwidth - 1) + " ") * ncols
lines.append(dashes)
lines.append(colfmt * ncols % tuple(["attributes"] + parser_names))
lines.append(dashes)
# Eventually we want to move this to cclib, too.
not_applicable = {
'ADF' : ['aonames', 'ccenergies', 'mpenergies'],
'DALTON' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'GAMESS' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'GAMESSUK' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'Gaussian' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'Jaguar' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'Molpro' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'NWChem' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'ORCA' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'Psi' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
'QChem' : ['fonames', 'fooverlaps', 'fragnames', 'frags'],
}
not_possible = {
'Psi' : ['aooverlaps', 'vibirs'],
'QChem' : ['aooverlaps', 'etrotats'],
}
# For each attribute, get a list of Boolean values for each parser that flags
# if it has been parsed by at least one unit test. Substitute an OK sign or
# T/D appropriately, with the exception of attributes that have been explicitely
# designated as N/A.
attributes = sorted(cclib.parser.data.ccData._attrlist)
for attr in attributes:
parsed = [any([attr in t['data'].__dict__ for t in alltests[p]]) for p in parser_names]
for ip, p in enumerate(parsed):
if p:
parsed[ip] = "√"
else:
if attr in not_applicable.get(parser_names[ip], []):
parsed[ip] = "N/A"
elif attr in not_possible.get(parser_names[ip], []):
parsed[ip] = "N/P"
else:
parsed[ip] = "T/D"
lines.append(colfmt*ncols % tuple(["`%s`_" % attr] + parsed))
lines.append(dashes)
lines.append("")
for attr in attributes:
lines.append(".. _`%s`: data_notes.html#%s" % (attr, attr))
return "\n".join(lines)
if __name__ == "__main__":
print(generate_coverage())
|
bsd-3-clause
| -4,281,879,993,898,752,500 | 35.801653 | 96 | 0.592859 | false |
yookoala/ibus-cangjie
|
tests/test_cangjie.py
|
1
|
12361
|
# Copyright (c) 2013 - The IBus Cangjie authors
#
# This file is part of ibus-cangjie, the IBus Cangjie input method engine.
#
# ibus-cangjie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-cangjie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-cangjie. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
from gi.repository import IBus
from ibus_cangjie.engine import *
class CangjieTestCase(unittest.TestCase):
def setUp(self):
self.engine = EngineCangjie()
def tearDown(self):
del self.engine
def test_single_key(self):
self.engine.do_process_key_event(IBus.a, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 1)
def test_single_key_space_single_char(self):
self.engine.do_process_key_event(IBus.d, 0, 0)
self.engine.do_process_key_event(IBus.space, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 0)
self.assertEqual(len(self.engine._mock_committed_text), 1)
def test_single_key_space_two_candidates(self):
self.engine.do_process_key_event(IBus.a, 0, 0)
self.engine.do_process_key_event(IBus.space, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 1)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 2)
def test_two_candidates_space(self):
self.engine.do_process_key_event(IBus.a, 0, 0)
self.engine.do_process_key_event(IBus.space, 0, 0)
# Keep track of the first candidate, check later if it was committed
expected = self.engine.lookuptable.get_candidate(0).text
self.engine.do_process_key_event(IBus.space, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 0)
self.assertEqual(len(self.engine._mock_committed_text), 1)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
self.assertEqual(self.engine._mock_committed_text, expected)
def test_two_candidates_continue_input(self):
self.engine.do_process_key_event(IBus.a, 0, 0)
self.engine.do_process_key_event(IBus.space, 0, 0)
# Keep track of the first candidate, check later if it was committed
expected = self.engine.lookuptable.get_candidate(0).text
self.engine.do_process_key_event(IBus.a, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 1)
self.assertEqual(len(self.engine._mock_committed_text), 1)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
self.assertEqual(self.engine._mock_committed_text, expected)
def test_max_input(self):
# Get to max char
self.engine.do_process_key_event(IBus.a, 0, 0)
self.engine.do_process_key_event(IBus.a, 0, 0)
self.engine.do_process_key_event(IBus.a, 0, 0)
self.engine.do_process_key_event(IBus.a, 0, 0)
self.engine.do_process_key_event(IBus.a, 0, 0)
# Try adding one more and get the error bell
self.engine.do_process_key_event(IBus.a, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 5)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
if os.environ["IBUS_CANGJIE_TESTS_HAVE_PYCANBERRA"] == "true":
self.assertEqual(len(self.engine.canberra._mock_played_events), 1)
# And once more
self.engine.do_process_key_event(IBus.a, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 5)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
if os.environ["IBUS_CANGJIE_TESTS_HAVE_PYCANBERRA"] == "true":
self.assertEqual(len(self.engine.canberra._mock_played_events), 2)
def test_inexistent_combination(self):
self.engine.do_process_key_event(IBus.z, 0, 0)
self.engine.do_process_key_event(IBus.z, 0, 0)
self.engine.do_process_key_event(IBus.z, 0, 0)
self.engine.do_process_key_event(IBus.z, 0, 0)
self.engine.do_process_key_event(IBus.z, 0, 0)
self.engine.do_process_key_event(IBus.space, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 5)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
if os.environ["IBUS_CANGJIE_TESTS_HAVE_PYCANBERRA"] == "true":
self.assertEqual(len(self.engine.canberra._mock_played_events), 1)
def test_wildcard(self):
self.engine.do_process_key_event(IBus.d, 0, 0)
self.engine.do_process_key_event(IBus.asterisk, 0, 0)
self.engine.do_process_key_event(IBus.d, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 3)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
self.engine.do_process_key_event(IBus.space, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 3)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertTrue(self.engine.lookuptable.get_number_of_candidates() > 1)
def test_wildcard_first(self):
self.engine.do_process_key_event(IBus.asterisk, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 0)
self.assertEqual(len(self.engine._mock_committed_text), 1)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
def test_wildcard_last(self):
self.engine.do_process_key_event(IBus.d, 0, 0)
self.engine.do_process_key_event(IBus.asterisk, 0, 0)
self.engine.do_process_key_event(IBus.space, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 2)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
if os.environ["IBUS_CANGJIE_TESTS_HAVE_PYCANBERRA"] == "true":
self.assertEqual(len(self.engine.canberra._mock_played_events), 1)
def test_backspace(self):
self.engine.do_process_key_event(IBus.a, 0, 0)
self.engine.do_process_key_event(IBus.BackSpace, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 0)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
def test_backspace_on_multiple_keys(self):
self.engine.do_process_key_event(IBus.a, 0, 0)
self.engine.do_process_key_event(IBus.a, 0, 0)
self.engine.do_process_key_event(IBus.BackSpace, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 1)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
def test_backspace_on_candidates(self):
self.engine.do_process_key_event(IBus.a, 0, 0)
self.engine.do_process_key_event(IBus.space, 0, 0)
self.engine.do_process_key_event(IBus.BackSpace, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 0)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
def test_backspace_on_multiple_keys_and_candidates(self):
self.engine.do_process_key_event(IBus.d, 0, 0)
self.engine.do_process_key_event(IBus.asterisk, 0, 0)
self.engine.do_process_key_event(IBus.d, 0, 0)
self.engine.do_process_key_event(IBus.space, 0, 0)
self.engine.do_process_key_event(IBus.BackSpace, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 2)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
def test_escape(self):
self.engine.do_process_key_event(IBus.d, 0, 0)
self.engine.do_process_key_event(IBus.d, 0, 0)
self.engine.do_process_key_event(IBus.Escape, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 0)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
def test_escape_on_candidates(self):
self.engine.do_process_key_event(IBus.d, 0, 0)
self.engine.do_process_key_event(IBus.asterisk, 0, 0)
self.engine.do_process_key_event(IBus.d, 0, 0)
self.engine.do_process_key_event(IBus.space, 0, 0)
self.engine.do_process_key_event(IBus.Escape, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 0)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
def test_autoclear_on_error(self):
# First make an error on purpose
self.engine.do_process_key_event(IBus.z, 0, 0)
self.engine.do_process_key_event(IBus.z, 0, 0)
self.engine.do_process_key_event(IBus.space, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 2)
if os.environ["IBUS_CANGJIE_TESTS_HAVE_PYCANBERRA"] == "true":
self.assertEqual(len(self.engine.canberra._mock_played_events), 1)
# Now go on inputting
self.engine.do_process_key_event(IBus.z, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 1)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
def test_autoclear_on_error_max_input(self):
# First make an error on purpose
self.engine.do_process_key_event(IBus.z, 0, 0)
self.engine.do_process_key_event(IBus.z, 0, 0)
self.engine.do_process_key_event(IBus.z, 0, 0)
self.engine.do_process_key_event(IBus.z, 0, 0)
self.engine.do_process_key_event(IBus.z, 0, 0)
self.engine.do_process_key_event(IBus.space, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 5)
if os.environ["IBUS_CANGJIE_TESTS_HAVE_PYCANBERRA"] == "true":
self.assertEqual(len(self.engine.canberra._mock_played_events), 1)
# Now go on inputting
self.engine.do_process_key_event(IBus.z, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 1)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
def test_symbol(self):
self.engine.do_process_key_event(IBus.at, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 0)
self.assertEqual(len(self.engine._mock_committed_text), 1)
self.assertEqual(self.engine.lookuptable.get_number_of_candidates(), 0)
def test_multiple_punctuation(self):
self.engine.do_process_key_event(IBus.comma, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 1)
self.assertEqual(len(self.engine._mock_committed_text), 0)
self.assertTrue(self.engine.lookuptable.get_number_of_candidates() > 1)
def test_char_then_multiple_punctuation(self):
self.engine.do_process_key_event(IBus.d, 0, 0)
self.engine.do_process_key_event(IBus.comma, 0, 0)
self.assertEqual(len(self.engine._mock_auxiliary_text), 1)
self.assertEqual(len(self.engine._mock_committed_text), 1)
self.assertTrue(self.engine.lookuptable.get_number_of_candidates() > 1)
def test_punctuation_then_punctuation(self):
self.engine.do_process_key_event(IBus.comma, 0, 0)
self.engine.do_process_key_event(IBus.comma, 0, 0)
|
gpl-3.0
| -6,609,231,990,879,438,000 | 43.624549 | 79 | 0.673247 | false |
Wireless-Innovation-Forum/Spectrum-Access-System
|
src/harness/reference_models/geo/census_tract.py
|
1
|
1899
|
# Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Driver for access to Census tract data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from reference_models.geo import CONFIG
class CensusTractDriver(object):
def __init__(self, census_tract_directory=None):
self.SetCensusTractDirectory(census_tract_directory)
def SetCensusTractDirectory(self, census_tract_directory):
"""Configures the Census Tracts data directory."""
self._census_tract_dir = census_tract_directory
if self._census_tract_dir is None:
self._census_tract_dir = CONFIG.GetCensusTractsDir()
def GetCensusTract(self, fips_code):
"""Returns the census tract (as a Python object) for the given FIPS code (as a str)."""
fips_code = str(fips_code)
census_tract_file_path = os.path.join(self._census_tract_dir, "%s.json" % fips_code)
if os.path.exists(census_tract_file_path):
with open(census_tract_file_path, 'r') as census_tract_file:
try:
return json.load(census_tract_file)
except:
raise IOError('Error occurred in opening Census Tract File: %s' % census_tract_file.name)
else:
raise Exception("Census Tract data not found with FIPS Code: %s" % fips_code)
|
apache-2.0
| 1,746,755,792,508,643,600 | 40.282609 | 99 | 0.7109 | false |
Wasper256/locallibrary
|
catalog/views.py
|
1
|
6065
|
"""Main file that describes all of views."""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.contrib.auth.decorators import permission_required
from django.contrib.auth.mixins import (LoginRequiredMixin,
PermissionRequiredMixin)
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse_lazy
from django.views import generic
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from .forms import RenewBookForm
from .models import Author, Book, BookInstance
def index(request):
"""View function for home page of site."""
# Generate counts of some of the main objects
num_books = Book.objects.all().count()
num_instances = BookInstance.objects.all().count()
# Available books (status = 'a')
num_instances_available = (
BookInstance.objects.filter(status__exact='a').count())
num_authors = Author.objects.count() # The 'all()' is implied by default.
# Number of visits to this view, as counted in the session variable.
num_visits = request.session.get('num_visits', 0)
request.session['num_visits'] = num_visits + 1
# Render the HTML template index.html with the data in the context variable
return render(
request,
'index.html',
context={
'num_books': num_books, 'num_instances': num_instances,
'num_instances_available': num_instances_available,
'num_authors': num_authors, 'num_visits': num_visits})
class BookListView(generic.ListView):
"""Generic class-based view for a list of books."""
model = Book
paginate_by = 10
class BookDetailView(generic.DetailView):
"""Class to view book details."""
model = Book
def book_detail_view(self, request, pk):
"""Main method to view book details."""
try:
book_id = Book.objects.get(pk=pk)
except Book.DoesNotExist:
raise get_object_or_404("Book does not exist")
# book_id=get_object_or_404(Book, pk=pk)
return render(
request,
'catalog/book_detail.html',
context={'book': book_id, }
)
class AuthorListView(generic.ListView):
"""Generic class-based list view for a list of authors."""
model = Author
paginate_by = 10
class AuthorDetailView(generic.DetailView):
"""Generic class-based detail view for an author."""
model = Author
# class MyView(LoginRequiredMixin, View):
# login_url = '/login/'
# redirect_field_name = 'redirect_to'
class LoanedBooksByUserListView(LoginRequiredMixin, generic.ListView):
"""Generic class-based view listing books on loan to current user."""
model = BookInstance
template_name = 'catalog/bookinstance_list_borrowed_user.html'
paginate_by = 10
def get_queryset(self):
"""Main method to look on loaned books with details."""
return BookInstance.objects.filter(borrower=self.request.user).filter(
status__exact='o').order_by('due_back')
class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView):
"""
Generic class-based view listing all books on loan.
Only visible to users with can_mark_returned permission.
"""
model = BookInstance
permission_required = 'catalog.can_mark_returned'
template_name = 'catalog/bookinstance_list_borrowed_all.html'
paginate_by = 10
def get_queryset(self):
"""Function to return book instances but only on loan."""
return BookInstance.objects.filter(
status__exact='o').order_by('due_back')
@permission_required('catalog.can_mark_returned')
def renew_book_librarian(request, pk):
"""View function for renewing a specific BookInstance by librarian."""
book_inst = get_object_or_404(BookInstance, pk=pk)
# If this is a POST request then process the Form data
if request.method == 'POST':
# Create a form instance and populate it with data from the request
form = RenewBookForm(request.POST)
# Check if the form is valid:
if form.is_valid():
# process the data in form.cleaned_data as required
# (here we just write it to the model due_back field)
book_inst.due_back = form.cleaned_data['renewal_date']
book_inst.save()
# redirect to a new URL:
return HttpResponseRedirect(reverse('all-borrowed'))
# If this is a GET (or any other method) create the default form
else:
proposed_renewal_date = (
datetime.date.today() + datetime.timedelta(weeks=3))
form = RenewBookForm(initial={'renewal_date': proposed_renewal_date, })
return render(
request, 'catalog/book_renew_librarian.html', {
'form': form, 'bookinst': book_inst})
class AuthorCreate(CreateView):
"""Author creation functionality."""
model = Author
fields = '__all__'
initial = {'date_of_death': '2016-12-10', }
class AuthorUpdate(UpdateView):
"""Author update functionality."""
model = Author
fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death']
class AuthorDelete(DeleteView):
"""Author delete functionality."""
model = Author
success_url = reverse_lazy('authors')
class BookCreate(PermissionRequiredMixin, CreateView):
"""Book creation functionality."""
model = Book
fields = '__all__'
initial = {'date_of_death': '2016-12-10', }
permission_required = 'catalog.can_mark_returned'
class BookUpdate(PermissionRequiredMixin, UpdateView):
"""Book update functionality."""
model = Book
fields = '__all__'
permission_required = 'catalog.can_mark_returned'
class BookDelete(PermissionRequiredMixin, DeleteView):
"""Book delete functionality."""
model = Book
success_url = reverse_lazy('books')
permission_required = 'catalog.can_mark_returned'
|
gpl-3.0
| 3,097,310,997,405,115,000 | 29.943878 | 79 | 0.66249 | false |
lmazuel/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/virtual_machine_scale_set_update_network_configuration.py
|
1
|
2800
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class VirtualMachineScaleSetUpdateNetworkConfiguration(SubResource):
"""Describes a virtual machine scale set network profile's network
configurations.
:param id: Resource Id
:type id: str
:param name: The network configuration name.
:type name: str
:param primary: Whether this is a primary NIC on a virtual machine.
:type primary: bool
:param enable_accelerated_networking: Specifies whether the network
interface is accelerated networking-enabled.
:type enable_accelerated_networking: bool
:param network_security_group: The network security group.
:type network_security_group:
~azure.mgmt.compute.v2017_03_30.models.SubResource
:param dns_settings: The dns settings to be applied on the network
interfaces.
:type dns_settings:
~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetNetworkConfigurationDnsSettings
:param ip_configurations: The virtual machine scale set IP Configuration.
:type ip_configurations:
list[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetUpdateIPConfiguration]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'enable_accelerated_networking': {'key': 'properties.enableAcceleratedNetworking', 'type': 'bool'},
'network_security_group': {'key': 'properties.networkSecurityGroup', 'type': 'SubResource'},
'dns_settings': {'key': 'properties.dnsSettings', 'type': 'VirtualMachineScaleSetNetworkConfigurationDnsSettings'},
'ip_configurations': {'key': 'properties.ipConfigurations', 'type': '[VirtualMachineScaleSetUpdateIPConfiguration]'},
}
def __init__(self, **kwargs):
super(VirtualMachineScaleSetUpdateNetworkConfiguration, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.primary = kwargs.get('primary', None)
self.enable_accelerated_networking = kwargs.get('enable_accelerated_networking', None)
self.network_security_group = kwargs.get('network_security_group', None)
self.dns_settings = kwargs.get('dns_settings', None)
self.ip_configurations = kwargs.get('ip_configurations', None)
|
mit
| 2,217,715,932,702,342,000 | 48.122807 | 125 | 0.671429 | false |
trilan/stencil
|
stencil/variables.py
|
1
|
1115
|
import os
from optparse import make_option
class Variable(object):
def __init__(self, name, default=None, environ=None, help=None,
metavar=None, prompt=None):
self.name = name
if environ:
self.default = os.environ.get(environ, default)
else:
self.default = default
self.help = help
self.metavar = metavar
self.prompt_text = prompt
def is_valid(self, value):
return value or self.default is not None
def prompt(self):
text = self.prompt_text or self.name.replace('_', ' ').capitalize()
if self.default:
text = '{0} [{1}]'.format(text, self.default)
while True:
value = raw_input('{0}: '.format(text)).strip()
if self.is_valid(value):
return value or self.default
class String(Variable):
def add_to_parser(self, parser):
parser.add_argument('--{0}'.format(self.name.replace('_', '-')),
action='store', dest=self.name,
help=self.help, metavar=self.metavar)
|
bsd-2-clause
| 6,171,686,205,864,813,000 | 29.972222 | 75 | 0.552466 | false |
gchiam/gchiam_cc
|
gchiam_cc/config/settings.py
|
1
|
13827
|
# -*- coding: utf-8 -*-
"""
Django settings for gchiam_cc project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import join
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from configurations import Configuration, values
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
class Common(Configuration):
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'south', # Database migration helpers:
'crispy_forms', # Form layouts
'avatar', # for user avatars
)
# Apps specific for this project go here.
LOCAL_APPS = (
'users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
INSTALLED_APPS += (
# Needs to come last for now because of a weird edge case between
# South and allauth
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
########## END APP CONFIGURATION
########## MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = values.BooleanValue(True)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = "CHANGEME!!!"
########## END SECRET CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
########## END FIXTURE CONFIGURATION
########## EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
########## END EMAIL CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Gordon Chiam', 'gordon.chiam@gmail.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = values.DatabaseURLValue('sqlite:////%s/gchiam_cc.db' % BASE_DIR)
########## END DATABASE CONFIGURATION
########## CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
# memcacheify is what's used in Production
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
########## END CACHING
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
########## END TEMPLATE CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## URL Configuration
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
########## End URL Configuration
########## AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
########## END AUTHENTICATION CONFIGURATION
########## Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
########## END Custom user app defaults
########## SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify"
########## END SLUGLIFIER
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## Your common stuff: Below this line define 3rd party libary settings
class Local(Common):
########## INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
########## END INSTALLED_APPS
########## Mail settings
EMAIL_HOST = "localhost"
EMAIL_PORT = 1025
EMAIL_BACKEND = values.Value('django.core.mail.backends.console.EmailBackend')
########## End mail settings
########## django-debug-toolbar
MIDDLEWARE_CLASSES = Common.MIDDLEWARE_CLASSES + ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TEMPLATE_CONTEXT': True,
}
########## end django-debug-toolbar
########## Your local stuff: Below this line define 3rd party libary settings
class Production(Common):
########## INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
########## END INSTALLED_APPS
########## SECRET KEY
SECRET_KEY = values.SecretValue()
########## END SECRET KEY
########## django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
########## end django-secure
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
########## END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
########## STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ("collectfast", )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIREY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIREY,
AWS_EXPIREY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
########## END STORAGE CONFIGURATION
########## EMAIL
DEFAULT_FROM_EMAIL = values.Value(
'gchiam_cc <gchiam_cc-noreply@example.com>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[gchiam_cc] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
########## END TEMPLATE CONFIGURATION
########## CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
########## END CACHING
########## Your production stuff: Below this line define 3rd party libary settings
|
bsd-3-clause
| 3,527,297,772,813,317,600 | 34.093909 | 119 | 0.639763 | false |
ubuntu-core/snapcraft
|
tests/unit/sources/test_base.py
|
1
|
4911
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import requests
from unittest import mock
from testtools.matchers import Contains, Equals
from snapcraft.internal.sources import _base, errors
from tests import unit
class TestFileBase(unit.TestCase):
def get_mock_file_base(self, source, dir):
file_src = _base.FileBase(source, dir)
setattr(file_src, "provision", mock.Mock())
return file_src
@mock.patch("snapcraft.internal.sources._base.FileBase.download")
def test_pull_url(self, mock_download):
mock_download.return_value = "dir"
file_src = self.get_mock_file_base("http://snapcraft.io/snapcraft.yaml", "dir")
file_src.pull()
mock_download.assert_called_once_with()
file_src.provision.assert_called_once_with(
file_src.source_dir, src="dir", clean_target=False
)
@mock.patch("shutil.copy2")
def test_pull_copy(self, mock_shutil_copy2):
file_src = self.get_mock_file_base("snapcraft.yaml", "dir")
file_src.pull()
expected = os.path.join(file_src.source_dir, "snapcraft.yaml")
mock_shutil_copy2.assert_called_once_with(file_src.source, expected)
file_src.provision.assert_called_once_with(
file_src.source_dir, src=expected, clean_target=False
)
@mock.patch("shutil.copy2", side_effect=FileNotFoundError())
def test_pull_copy_source_does_not_exist(self, mock_shutil_copy2):
file_src = self.get_mock_file_base("does-not-exist.tar.gz", ".")
raised = self.assertRaises(errors.SnapcraftSourceNotFoundError, file_src.pull)
self.assertThat(
str(raised), Contains("Failed to pull source: 'does-not-exist.tar.gz'")
)
@mock.patch("snapcraft.internal.sources._base.requests")
@mock.patch("snapcraft.internal.sources._base.download_requests_stream")
@mock.patch("snapcraft.internal.sources._base.download_urllib_source")
def test_download_file_destination(self, dus, drs, req):
file_src = self.get_mock_file_base("http://snapcraft.io/snapcraft.yaml", "dir")
self.assertFalse(hasattr(file_src, "file"))
file_src.pull()
self.assertThat(
file_src.file,
Equals(
os.path.join(file_src.source_dir, os.path.basename(file_src.source))
),
)
@mock.patch("snapcraft.internal.common.get_url_scheme", return_value=False)
@mock.patch("requests.get", side_effect=requests.exceptions.ConnectionError("foo"))
def test_download_error(self, mock_get, mock_gus):
base = self.get_mock_file_base("", "")
base.source_checksum = False
raised = self.assertRaises(errors.SnapcraftRequestError, base.download, None)
self.assertThat(str(raised), Contains("Network request error"))
@mock.patch("snapcraft.internal.sources._base.download_requests_stream")
@mock.patch("snapcraft.internal.sources._base.requests")
def test_download_http(self, mock_requests, mock_download):
file_src = self.get_mock_file_base("http://snapcraft.io/snapcraft.yaml", "dir")
mock_request = mock.Mock()
mock_requests.get.return_value = mock_request
file_src.pull()
mock_requests.get.assert_called_once_with(
file_src.source, stream=True, allow_redirects=True
)
mock_request.raise_for_status.assert_called_once_with()
mock_download.assert_called_once_with(mock_request, file_src.file)
@mock.patch("snapcraft.internal.sources._base.download_urllib_source")
def test_download_ftp(self, mock_download):
file_src = self.get_mock_file_base("ftp://snapcraft.io/snapcraft.yaml", "dir")
file_src.pull()
mock_download.assert_called_once_with(file_src.source, file_src.file)
@mock.patch("snapcraft.internal.indicators.urlretrieve")
def test_download_ftp_url_opener(self, mock_urlretrieve):
file_src = self.get_mock_file_base("ftp://snapcraft.io/snapcraft.yaml", "dir")
file_src.pull()
self.assertThat(mock_urlretrieve.call_count, Equals(1))
self.assertThat(mock_urlretrieve.call_args[0][0], Equals(file_src.source))
self.assertThat(mock_urlretrieve.call_args[0][1], Equals(file_src.file))
|
gpl-3.0
| 9,121,078,470,473,435,000 | 38.926829 | 87 | 0.677866 | false |
letuananh/visualkopasu
|
visko/tools.py
|
1
|
13876
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Database setup script for VisualKopasu
"""
# This code is a part of visualkopasu (visko): https://github.com/letuananh/visualkopasu
# :copyright: (c) 2012 Le Tuan Anh <tuananh.ke@gmail.com>
# :license: GPLv3, see LICENSE for more details.
import sys
import os
import argparse
from lxml import etree
from texttaglib.chirptext import confirm, header
from coolisf.model import Document
from visko import __version__
from visko.config import ViskoConfig as vkconfig
from visko.kopasu.bibman import Biblioteca
from visko.merchant.redwood import parse_document
from visko.merchant.morph import xml2db
def get_raw_doc_folder(collection_name, corpus_name, doc_name):
return os.path.join(vkconfig.DATA_FOLDER, "raw", collection_name, corpus_name, doc_name)
def convert_document(collection_name, corpus_name, doc_name, answer=None, active_only=False, use_raw=False):
''' Convert XML to DB '''
raw_folder = get_raw_doc_folder(collection_name, corpus_name, doc_name)
collection_folder = os.path.join(vkconfig.BIBLIOTECHE_ROOT, collection_name)
print("Attempting to parse document from raw text into XML")
print("Source folder: %s" % raw_folder)
print("Collection folder: %s" % collection_folder)
print("Biblioteca: %s" % collection_name)
print("Corpus name: %s" % corpus_name)
print("Document name: %s" % doc_name)
# Convert XML to SQLite3
if use_raw:
parse_document(raw_folder, collection_folder, corpus_name, doc_name)
# create a bib
xml2db(collection_name, corpus_name, doc_name)
print("All Done!")
return answer
def import_xml(args):
if not args.file:
answer = convert_document(args.biblioteca, args.corpus, args.doc, answer=args.yes, active_only=args.active, use_raw=args.raw)
return answer
else:
xml2db(args.biblioteca, args.corpus, args.doc, archive_file=args.file)
def wipe_doc(args):
''' Delete all sentences in a document '''
bib = Biblioteca(args.biblioteca, root=args.root)
dao = bib.sqldao
# corpus = dao.get_corpus(args.corpus)
doc = dao.get_doc(args.doc)
sents = dao.get_sents(doc.ID)
if not args.yes:
ans = confirm("Do you really want to wipe out {} sentences in document {} (yes/no)? ".format(len(sents), doc.name))
if not ans:
print("Program aborted")
with dao.ctx() as ctx:
for sent in sents:
dao.delete_sent(sent.ID, ctx=ctx)
print("Done!")
def export_sqlite(args):
bib = Biblioteca(args.biblioteca, root=args.root)
dao = bib.sqldao
corpus = dao.get_corpus(args.corpus)
doc = dao.get_doc(args.doc)
if os.path.exists(args.filename):
print("Output path exists. Cannot export data")
return False
elif corpus is None or doc is None or corpus.ID != doc.corpusID:
print("Document does not exist ({}/{}/{} was provided)".format(args.biblioteca, args.corpus, args.doc))
else:
# found doc
sents = dao.get_sents(doc.ID)
doc_node = etree.Element("document")
doc_node.set("id", str(doc.ID))
doc_node.set("name", doc.name)
if doc.title:
doc_node.set("title", doc.title)
print("Reading sentences from SQLite")
for sentinfo in sents:
sent = dao.get_sent(sentinfo.ID)
sent_node = sent.to_xml_node() # to_isf().to_visko_xml()
doc_node.append(sent_node)
print("Saving {} sentences to {}".format(len(sents), args.filename))
with open(args.filename, 'wb') as outfile:
outfile.write(etree.tostring(doc_node, pretty_print=True, encoding="utf-8"))
print("Done")
def store_report(args):
bib = Biblioteca(args.biblioteca, root=args.root)
dao = bib.sqldao
corpus = dao.get_corpus(args.corpus)
doc = dao.get_doc(args.doc)
report_loc = bib.textdao.getCorpusDAO(args.corpus).getDocumentDAO(args.doc).path + ".report.xml"
warning_list = []
if not os.path.exists(report_loc):
print("There is no report to import.")
return False
elif corpus is None or doc is None or corpus.ID != doc.corpusID:
print("Document does not exist ({}/{}/{} was provided)".format(args.biblioteca, args.corpus, args.doc))
return False
else:
with dao.ctx() as ctx:
# read doc sents
sents = dao.get_sents(doc.ID, ctx=ctx)
sent_map = {s.ident: s for s in sents}
# read comments
tree = etree.iterparse(report_loc)
for event, element in tree:
if event == 'end' and element.tag == 'sentence':
id = element.get('ID')
ident = element.get('ident')
if ident in sent_map:
sent = sent_map[ident]
# Only import comments to sentences with empty comment
comment = element.find('comment').text
if comment and ident in sent_map and not sent_map[ident].comment:
print("comment to #{} ({}): {}".format(id, ident, comment))
dao.note_sentence(sent.ID, comment.strip(), ctx=ctx)
# import flag as well
flag = element.get('flag')
if flag:
if not sent.flag:
warning_list.append((sent.ident, sent.flag, flag))
print("Flag #{} with {}".format(sent.ID, flag))
dao.flag_sent(sent.ID, int(flag), ctx=ctx)
element.clear()
for w in warning_list:
print("WARNING: updating flag for #{} from {} to {}".format(*w))
def gen_report(args):
bib = Biblioteca(args.biblioteca, root=args.root)
dao = bib.sqldao
corpus = dao.get_corpus(args.corpus)
doc = dao.get_doc(args.doc)
report_loc = bib.textdao.getCorpusDAO(args.corpus).getDocumentDAO(args.doc).path + ".report.xml"
if os.path.exists(report_loc) and not confirm("Report file exists. Do you want to continue (Y/N)? "):
print("Program aborted.")
return False
elif corpus is None or doc is None or corpus.ID != doc.corpusID:
print("Document does not exist ({}/{}/{} was provided)".format(args.biblioteca, args.corpus, args.doc))
else:
# found doc
sents = dao.get_sents(doc.ID)
doc_node = etree.Element("document")
doc_node.set("id", str(doc.ID))
doc_node.set("collection", args.biblioteca)
doc_node.set("corpus", corpus.name)
doc_node.set("name", doc.name)
if doc.title:
doc_node.set("title", doc.title)
# save comments
for sent in sents:
if args.concise and not (sent.comment or sent.flag):
continue
sent_node = etree.SubElement(doc_node, 'sentence')
sent_node.set('ID', str(sent.ID))
sent_node.set('ident', str(sent.ident))
if sent.flag:
sent_node.set('flag', str(sent.flag))
text_node = etree.SubElement(sent_node, 'text')
text_node.text = sent.text
comment_node = etree.SubElement(sent_node, 'comment')
cmt = '\n{}\n'.format(sent.comment) if sent.comment else ''
comment_node.text = etree.CDATA(cmt)
print("Saving sentences to {}".format(report_loc))
with open(report_loc, 'wb') as outfile:
outfile.write(etree.tostring(doc_node, pretty_print=True, encoding="utf-8"))
print("Done")
def archive_doc(bib, corpus, doc_name, ctx=None):
doc = bib.sqldao.get_doc(doc_name, ctx=ctx)
if doc is None:
print("WARNING: Document {}/{}/{} does not exist".format(bib.name, corpus.name, doc_name))
return False
print("Backing up doc {}/{}/{}".format(bib.name, corpus.name, doc.name))
docDAO = bib.textdao.getCorpusDAO(corpus.name).getDocumentDAO(doc.name)
if docDAO.is_archived():
if not confirm("Archive for {}/{}/{} exists. Do you want to proceed (y/n)? ".format(bib.name, corpus.name, doc.name)):
print("Document {}/{}/{} is skipped.".format(args.biblioteca, corpus.name, doc.name))
return False
for s in bib.sqldao.get_sents(docID=doc.ID):
sent = bib.sqldao.get_sent(sentID=s.ID, ctx=ctx)
doc.add(sent)
print("Archiving ...")
docDAO = bib.textdao.getCorpusDAO(corpus.name).getDocumentDAO(doc.name)
docDAO.archive(doc)
print("Done")
def archive_corpus(bib, corpus, ctx):
header("Archiving corpus {}".format(corpus.name))
docs = bib.sqldao.get_docs(corpus.ID, ctx=ctx)
for doc in docs:
archive_doc(bib, corpus, doc.name, ctx=ctx)
def archive_collection(bib, ctx):
header("Archiving collection {}".format(bib.name), level="h0")
corpuses = ctx.corpus.select()
for corpus in corpuses:
archive_corpus(bib, corpus, ctx)
def archive_data(args):
bib = Biblioteca(args.biblioteca, root=args.root)
print("Archive info: collection={} | corpus={} | doc={}".format(args.biblioteca, args.corpus, args.doc))
with bib.sqldao.ctx() as ctx:
if args.corpus:
corpus = bib.sqldao.get_corpus(args.corpus, ctx=ctx)
if corpus is None:
print("WARNING: Corpus '{}' does not exist".format(args.corpus))
if args.doc:
archive_doc(bib, corpus, args.doc, ctx=ctx)
else:
print("Backup corpus: {}".format(corpus))
else:
print("Backup collection: {}".format(args.biblioteca))
archive_collection(bib, ctx)
def show_version(args):
if args.short:
print(__version__)
else:
print(f"Visual kopasu, version {__version__}")
def main():
parser = argparse.ArgumentParser(description="Visko toolbox")
tasks = parser.add_subparsers(help='Task to be done')
# import XML => SQLite
import_task = tasks.add_parser("import", help="Import sentences in XML format")
import_task.add_argument('biblioteca', help='Biblioteca name')
import_task.add_argument('corpus', help='Corpus name')
import_task.add_argument('doc', help='Document name')
import_task.add_argument('-f', '--file', help='XML file (a big XML file instead of many small XML files)')
import_task.add_argument('-a', '--active', help='Only import active readings', action='store_true')
import_task.add_argument('-R', '--raw', help='Import data in FCB format', action='store_true')
import_task.add_argument('-y', '--yes', help='Say yes to everything', action='store_true')
import_task.set_defaults(func=import_xml)
# Clear a document
wipe_task = tasks.add_parser("wipe", help="Delete all sentences in a document")
wipe_task.add_argument('biblioteca', help='Biblioteca name')
wipe_task.add_argument('corpus', help='Corpus name')
wipe_task.add_argument('doc', help='Document name')
wipe_task.add_argument('--root', help="Biblioteche root", default=vkconfig.BIBLIOTECHE_ROOT)
wipe_task.add_argument('-y', '--yes', help='Say yes to everything', action='store_true')
wipe_task.set_defaults(func=wipe_doc)
# archive document
archive_task = tasks.add_parser("archive", help="Archive data")
archive_task.add_argument('biblioteca', help='Biblioteca name')
archive_task.add_argument('corpus', help='Corpus name', nargs="?", default=None)
archive_task.add_argument('doc', help='Document name', nargs="?", default=None)
archive_task.add_argument('--root', help="Biblioteche root", default=vkconfig.BIBLIOTECHE_ROOT)
archive_task.set_defaults(func=archive_data)
# export SQLite => XML
export_task = tasks.add_parser("export", help="Export SQLite to XML")
export_task.add_argument('biblioteca', help='Biblioteca name')
export_task.add_argument('corpus', help='Corpus name')
export_task.add_argument('doc', help='Document name')
export_task.add_argument('filename', help='Backup filename')
export_task.add_argument('--root', help="Biblioteche root", default=vkconfig.BIBLIOTECHE_ROOT)
export_task.set_defaults(func=export_sqlite)
# generate report (using comments)
report_task = tasks.add_parser("report", help="Generate report")
report_task.add_argument('biblioteca', help='Biblioteca name')
report_task.add_argument('corpus', help='Corpus name')
report_task.add_argument('doc', help='Document name')
report_task.add_argument('--root', help="Biblioteche root", default=vkconfig.BIBLIOTECHE_ROOT)
report_task.add_argument('--concise', help="Only report commented sentences", default=True, action='store_true')
report_task.set_defaults(func=gen_report)
# store report into document
store_report_task = tasks.add_parser("comment", help="Import comments")
store_report_task.add_argument('biblioteca', help='Biblioteca name')
store_report_task.add_argument('corpus', help='Corpus name')
store_report_task.add_argument('doc', help='Document name')
store_report_task.add_argument('--root', help="Biblioteche root", default=vkconfig.BIBLIOTECHE_ROOT)
store_report_task.add_argument('--concise', help="Only report commented sentences", default=True, action='store_true')
store_report_task.set_defaults(func=store_report)
version_task = tasks.add_parser("version", help="Show version")
version_task.add_argument('-s', '--short', help="Show version only (e.g. '0.3.2')", default=False, action='store_true')
version_task.set_defaults(func=show_version)
if len(sys.argv) == 1:
# User didn't pass any value in, show help
parser.print_help()
else:
# Parse input arguments
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
gpl-3.0
| 4,573,911,064,198,840,300 | 42.498433 | 133 | 0.630657 | false |
admk/soap
|
tests/test_discoverer.py
|
1
|
2076
|
import unittest
from soap.context import context
from soap.datatype import float_type
from soap.expression import Variable
from soap.parser import parse
from soap.semantics.state.box import BoxState
from soap.semantics.state.meta import flow_to_meta_state
from soap.semantics.functions.arithmetic import arith_eval
from soap.semantics.functions.fixpoint import unroll_fix_expr
class TestUnroller(unittest.TestCase):
def test_unroll_fix_expr(self):
program = """
#pragma soap output x
float x = 0.0;
while (x < 10)
x = x + 1.0;
"""
x = Variable('x', float_type)
fix_expr = flow_to_meta_state(parse(program))[x]
unrolled = list(unroll_fix_expr(fix_expr, 2))
self.assertEqual(fix_expr, unrolled[0])
program = """
#pragma soap output x
float x = 0.0;
while (x < 10)
if (x + 1.0 < 10)
x = (x + 1.0) + 1.0;
else
x = x + 1.0;
"""
test_expr = flow_to_meta_state(parse(program))[x]
self.assertEqual(test_expr, unrolled[1])
def test_unroll_for_loop(self):
program = """
#pragma soap output x
float x = 1.0;
for (int i = 0; i < 9; i++)
x = x + 2.0;
"""
x = Variable('x', float_type)
fix_expr = flow_to_meta_state(parse(program))[x]
depth = 3
unrolled = list(unroll_fix_expr(fix_expr, depth))
program = """
#pragma soap output x
float x = 1.0;
for (int i = 0; i < 8; i += 2)
x = (x + 2.0) + 2.0;
x = x + 2.0;
"""
test_expr = flow_to_meta_state(parse(program))[x]
self.assertEqual(test_expr, unrolled[1])
inputs = BoxState(bottom=True)
with context.local(fast_factor=0):
for unrolled_expr in unrolled:
self.assertEqual(
arith_eval(fix_expr, inputs),
arith_eval(unrolled_expr, inputs))
class TestDiscoverer(unittest.TestCase):
pass
|
mit
| -4,344,007,507,859,457,000 | 30.454545 | 61 | 0.547688 | false |
echonesis/mAiLab_Class
|
mAiLab_0002/RandomNumber.py
|
1
|
1753
|
#!/usr/bin/python
def simpleGenRandomNumber(n, llimit=0, ulimit=1):
import random
result = [random.uniform(llimit, ulimit) for i in xrange(n)]
return result
if __name__ == '__main__':
# For basic questions
# Basic #1
# In this part, the built-in Python functions would be used.
num_random = 5
print 'Basic 1> Generate', num_random, 'Random Number'
print simpleGenRandomNumber(num_random)
# Basic #2
import numpy as np
import time
print 'Basic 2>'
n1 = [10**1, 10**2, 10**3, 10**4, 10**5]
usedTime = list()
for iCnt in n1:
t1 = time.time()
result1 = simpleGenRandomNumber(iCnt, -1, 1)
usedTime.append(time.time() - t1)
print 'Case for N =', iCnt
print 'Mean =', np.mean(result1), '; STD =', np.std(result1)
# Advanced #1
print 'Advanced 1>'
for i in range(len(n1)):
print 'Case for N =', n1[i]
print 'Used Sys Time =', usedTime[i], '(s)'
'''
Sample Output:
Basic 1> Generate 5 Random Number
[0.8517352415235713, 0.9608042046044872, 0.1512693660183837, 0.6074746239442333, 0.5267800150194317]
Basic 2>
Case for N = 10
Mean = -0.240647969028 ; STD = 0.424100623283
Case for N = 100
Mean = -0.0732104451873 ; STD = 0.596035030544
Case for N = 1000
Mean = 0.0287190524504 ; STD = 0.58627480244
Case for N = 10000
Mean = -0.00509101610347 ; STD = 0.578908223166
Case for N = 100000
Mean = 0.00342896915716 ; STD = 0.576555864097
Advanced 1>
Case for N = 10
Used Sys Time = 1.00135803223e-05 (s)
Case for N = 100
Used Sys Time = 4.10079956055e-05 (s)
Case for N = 1000
Used Sys Time = 0.000274896621704 (s)
Case for N = 10000
Used Sys Time = 0.00268888473511 (s)
Case for N = 100000
Used Sys Time = 0.0347440242767 (s)
'''
|
mit
| 6,842,228,498,127,733,000 | 27.737705 | 100 | 0.64575 | false |
kinegratii/minesweeper
|
minesweeper/helpers.py
|
1
|
1874
|
# coding=utf8
"""
This module contains some hard-coding data for level map.
"""
from __future__ import unicode_literals
import random
from collections import OrderedDict
from core import Map
class GameHelpers(object):
@staticmethod
def create_from_mine_index_list(height, width, mine_index_list):
return Map(height, width, ((index // width, index % width) for index in mine_index_list))
@staticmethod
def create_from_mine_number(height, width, mine_number):
map_size = height * width
mine_index_list = random.sample(range(0, map_size), mine_number)
return GameHelpers.create_from_mine_index_list(height, width, mine_index_list)
class LevelMapMeta(object):
def __init__(self, name, verbose, height, width, mine_number):
self.name = name
self.verbose = verbose
self.height = height
self.width = width
self.mine_number = mine_number
@property
def description(self):
return '{0}({1}x{2}-{3})'.format(self.verbose, self.height, self.width, self.mine_number)
class LevelConfig(object):
def __init__(self):
self.data = OrderedDict()
def add_level_map(self, name, **kwargs):
kwargs.update({'name': name})
self.data[name] = LevelMapMeta(**kwargs)
@property
def choices(self):
return [(l.name, l.description) for l in self.data.values()]
def map(self, name):
meta = self.data[name]
return GameHelpers.create_from_mine_number(meta.height, meta.width, meta.mine_number)
level_config = LevelConfig()
level_config.add_level_map(name='primary', verbose='初级', height=9, width=9, mine_number=10)
level_config.add_level_map(name='secondary', verbose='中级', height=20, width=30, mine_number=100)
level_config.add_level_map(name='tertiary', verbose='高级', height=25, width=40, mine_number=400)
|
gpl-2.0
| 7,142,724,297,714,212,000 | 31.666667 | 97 | 0.669173 | false |
tensorflow/benchmarks
|
scripts/tf_cnn_benchmarks/allreduce_test.py
|
1
|
19185
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf_cnn_benchmark.allreduce."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as pycoll
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
import allreduce
class AllReduceTest(tf.test.TestCase):
def testGroupKey(self):
d0 = ['/job:worker/replica:0/task:0/device:GPU:1',
'/job:worker/replica:0/task:0/device:GPU:0',
'/job:worker/replica:0/task:0/device:GPU:3',]
d1 = ['/job:worker/replica:0/task:1/device:GPU:1',
'/job:worker/replica:0/task:1/device:GPU:0',
'/job:worker/replica:0/task:1/device:GPU:3',]
d2 = ['/job:worker/replica:0/task:1/device:GPU:1',
'/job:worker/replica:0/task:1/device:GPU:3',
'/job:worker/replica:0/task:1/device:GPU:0',]
d3 = ['/job:worker/replica:0/task:1/device:GPU:1',
'/job:worker/replica:0/task:1/device:GPU:3',
'/job:worker/replica:0/task:1/device:GPU:2',]
d4 = ['/job:worker/task:0/device:GPU:1',
'/job:worker/task:0/device:GPU:2',
'/job:worker/task:0/device:GPU:3',]
d5 = ['/job:worker/task:0/device:CPU:1',
'/job:worker/task:0/device:CPU:2']
d6 = ['/job:worker/task:0/device:CPU:2',
'/job:worker/task:0/device:CPU:1']
g0 = allreduce.collective_group_key(d0)
g1 = allreduce.collective_group_key(d1)
g2 = allreduce.collective_group_key(d2)
g3 = allreduce.collective_group_key(d3)
g4 = allreduce.collective_group_key(d4)
g5 = allreduce.collective_group_key(d5)
g6 = allreduce.collective_group_key(d6)
self.assertEqual(g0, g1)
self.assertEqual(g0, g2)
self.assertNotEqual(g0, g3)
self.assertEqual(g3, g4)
self.assertEqual(g5, g6)
self.assertNotEqual(g4, g5)
def testExtractRanges(self):
x = []
expected_ranges = []
expected_singles = []
ranges, singles = allreduce.extract_ranges(x)
self.assertEqual(expected_ranges, ranges)
self.assertEqual(expected_singles, singles)
x = [1, 3, 4, 6, 7, 8, 9]
expected_ranges = [[3, 4], [6, 9]]
expected_singles = [1]
ranges, singles = allreduce.extract_ranges(x)
self.assertEqual(expected_ranges, ranges)
self.assertEqual(expected_singles, singles)
x = [1, 2, 3, 4, 6, 7, 8, 9]
expected_ranges = [[1, 4], [6, 9]]
expected_singles = []
ranges, singles = allreduce.extract_ranges(x)
self.assertEqual(expected_ranges, ranges)
self.assertEqual(expected_singles, singles)
x = [1, 3, 4, 6, 7, 9]
expected_ranges = [[3, 4], [6, 7]]
expected_singles = [1, 9]
ranges, singles = allreduce.extract_ranges(x)
self.assertEqual(expected_ranges, ranges)
self.assertEqual(expected_singles, singles)
x = [1, 3, 6, 9]
expected_ranges = []
expected_singles = [1, 3, 6, 9]
ranges, singles = allreduce.extract_ranges(x)
self.assertEqual(expected_ranges, ranges)
self.assertEqual(expected_singles, singles)
def testPackRange(self):
packing = {}
t0 = tf.constant([0, 1, 2, 3], dtype=tf.float32)
t1 = tf.constant([4, 5, 6, 7], dtype=tf.float32)
gv = [(t0, 'v0'), (t1, 'v1')]
new_t = allreduce.pack_range('0:0', packing, gv, [0, 1])
self.assertEqual(1, new_t.shape.ndims)
self.assertEqual(8, new_t.shape.dims[0])
self.assertEqual(
packing, {
'0:0':
allreduce.GradPackTuple(
indices=range(2),
vars=['v0', 'v1'],
shapes=[tf.TensorShape([4]),
tf.TensorShape([4])])
})
t2 = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=tf.float32)
t3 = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=tf.float32)
gv = [(t0, 'v0'), (t1, 'v1'), (t2, 'v2'), (t3, 'v3')]
packing = {}
new_t = allreduce.pack_range('1:0', packing, gv, [0, 3])
self.assertEqual(1, new_t.shape.ndims)
self.assertEqual(26, new_t.shape.dims[0])
self.assertEqual(
packing, {
'1:0':
allreduce.GradPackTuple(
indices=range(4),
vars=['v0', 'v1', 'v2', 'v3'],
shapes=[
tf.TensorShape([4]),
tf.TensorShape([4]),
tf.TensorShape([3, 3]),
tf.TensorShape([3, 3])
])
})
def testUnpackGradTuple(self):
packing = {
'0:0':
allreduce.GradPackTuple(
indices=range(4),
vars=['v0', 'v1', 'v2', 'v3'],
shapes=[
tf.TensorShape([4]),
tf.TensorShape([4]),
tf.TensorShape([3, 3]),
tf.TensorShape([3, 3])
])
}
tc = tf.constant([0, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6, 7, 8,
0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=tf.float32)
packed_gv = [tc, 'packing_var_placeholder']
gv = allreduce.unpack_grad_tuple(packed_gv, packing['0:0'])
self.assertLen(gv, 4)
self.assertEqual('v0', gv[0][1])
self.assertEqual('v1', gv[1][1])
self.assertEqual('v2', gv[2][1])
self.assertEqual('v3', gv[3][1])
self.assertEqual(1, gv[0][0].shape.ndims)
self.assertEqual(4, gv[0][0].shape.dims[0])
self.assertEqual(1, gv[1][0].shape.ndims)
self.assertEqual(4, gv[1][0].shape.dims[0])
self.assertEqual(2, gv[2][0].shape.ndims)
self.assertEqual(3, gv[2][0].shape.dims[0])
self.assertEqual(3, gv[2][0].shape.dims[1])
def testPackSmallTensors(self):
t0 = tf.constant([0, 1, 2, 3], dtype=tf.float32)
t1 = tf.constant([4, 5, 6, 7], dtype=tf.float32)
t2 = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=tf.float32)
t3 = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=tf.float32)
tower_grads = []
for d in range(0, 3):
gv = [(t0, 'v_%d_0' % d), (t1, 'v_%d_1' %d), (t2, 'v_%d_2' %d),
(t3, 'v_%d_3' % d)]
tower_grads.append(gv)
# 1) Set the size limit so small that nothing gets concatenated.
new_tower_grads, packing = allreduce.pack_small_tensors(
tower_grads, max_bytes=12,
max_group=10)
self.assertEqual(tower_grads, new_tower_grads)
self.assertIs(packing, None)
# 2) Set the size limit so only the first two tensors get concatenated
new_tower_grads, packing = allreduce.pack_small_tensors(
tower_grads, max_bytes=16, # 16 bytes == 4 elements
max_group=10)
self.assertLen(new_tower_grads, 3)
self.assertLen(tower_grads[0], 4)
first_tower = new_tower_grads[0]
self.assertLen(first_tower, 3)
self.assertEqual(1, first_tower[0][0].shape.ndims)
self.assertEqual(8, first_tower[0][0].shape.dims[0])
self.assertEqual(packing,
{'0:0': allreduce.GradPackTuple(
indices=range(2),
vars=['v_0_0', 'v_0_1'],
shapes=[tf.TensorShape([4]),
tf.TensorShape([4])]),
'1:0': allreduce.GradPackTuple(
indices=range(2),
vars=['v_1_0', 'v_1_1'],
shapes=[tf.TensorShape([4]),
tf.TensorShape([4])]),
'2:0': allreduce.GradPackTuple(
indices=range(2),
vars=['v_2_0', 'v_2_1'],
shapes=[tf.TensorShape([4]),
tf.TensorShape([4])])})
# 3) Set the size limit so all tensors get concatenated
new_tower_grads, packing = allreduce.pack_small_tensors(
tower_grads, max_bytes=256, # bytes = 64 elements
max_group=10)
self.assertLen(new_tower_grads, 3)
self.assertLen(tower_grads[0], 4)
self.assertLen(new_tower_grads[0], 1)
first_tower = new_tower_grads[0]
self.assertEqual(1, first_tower[0][0].shape.ndims)
self.assertEqual(26, first_tower[0][0].shape.dims[0])
self.assertEqual(packing,
{'0:0': allreduce.GradPackTuple(
indices=range(4),
vars=['v_0_0', 'v_0_1', 'v_0_2', 'v_0_3'],
shapes=[tf.TensorShape([4]),
tf.TensorShape([4]),
tf.TensorShape([3, 3,]),
tf.TensorShape([3, 3,])]),
'1:0': allreduce.GradPackTuple(
indices=range(4),
vars=['v_1_0', 'v_1_1', 'v_1_2', 'v_1_3'],
shapes=[tf.TensorShape([4]),
tf.TensorShape([4]),
tf.TensorShape([3, 3,]),
tf.TensorShape([3, 3,])]),
'2:0': allreduce.GradPackTuple(
indices=range(4),
vars=['v_2_0', 'v_2_1', 'v_2_2', 'v_2_3'],
shapes=[tf.TensorShape([4]),
tf.TensorShape([4]),
tf.TensorShape([3, 3,]),
tf.TensorShape([3, 3,])])})
def testUnpackSmallTensors(self):
packing = {'0:0': allreduce.GradPackTuple(indices=range(2),
vars=['v_0_0', 'v_0_1'],
shapes=[tf.TensorShape([4]),
tf.TensorShape([4])]),
'0:1': allreduce.GradPackTuple(indices=range(3, 5),
vars=['v_0_3', 'v_0_4'],
shapes=[tf.TensorShape([3, 3,]),
tf.TensorShape([3, 3,])]),
'1:0': allreduce.GradPackTuple(indices=range(2),
vars=['v_1_0', 'v_1_1'],
shapes=[tf.TensorShape([4]),
tf.TensorShape([4])]),
'1:1': allreduce.GradPackTuple(indices=range(3, 5),
vars=['v_1_3', 'v_1_4'],
shapes=[tf.TensorShape([3, 3,]),
tf.TensorShape([3, 3,])])}
t0 = tf.constant([0, 1, 2, 3, 4, 5, 6, 7], dtype=tf.float32)
t1 = tf.constant([17, 17], dtype=tf.float32)
t2 = tf.constant([0, 1, 2, 3, 4, 5, 6, 7, 8,
0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=tf.float32)
t3 = tf.constant([0], dtype=tf.float32)
tower_grads = []
for d in range(0, 2):
one_tower = [(t0, 'packing_var_placeholder'),
(t2, 'packing_var_placeholder'),
(t1, 'v_%d_2' % d), (t3, 'v_%d_5' %d)]
tower_grads.append(one_tower)
new_tower_grads = allreduce.unpack_small_tensors(tower_grads, packing)
self.assertLen(new_tower_grads, 2)
for d, tg in enumerate(new_tower_grads):
self.assertLen(tg, 6)
self.assertEqual('v_%d_0' % d, tg[0][1])
self.assertEqual('v_%d_1' % d, tg[1][1])
self.assertEqual('v_%d_2' % d, tg[2][1])
self.assertEqual('v_%d_3' % d, tg[3][1])
self.assertEqual('v_%d_4' % d, tg[4][1])
self.assertEqual('v_%d_5' % d, tg[5][1])
self.assertEqual(1, tg[0][0].shape.ndims)
self.assertEqual(4, tg[0][0].shape.dims[0])
self.assertEqual(1, tg[1][0].shape.ndims)
self.assertEqual(4, tg[1][0].shape.dims[0])
self.assertEqual(1, tg[2][0].shape.ndims)
self.assertEqual(2, tg[2][0].shape.dims[0])
self.assertEqual(2, tg[3][0].shape.ndims)
self.assertEqual(3, tg[3][0].shape.dims[0])
self.assertEqual(3, tg[3][0].shape.dims[1])
self.assertEqual(2, tg[4][0].shape.ndims)
self.assertEqual(3, tg[4][0].shape.dims[0])
self.assertEqual(3, tg[4][0].shape.dims[1])
self.assertEqual(1, tg[5][0].shape.ndims)
self.assertEqual(1, tg[5][0].shape.dims[0])
class DynamicPackingTest(test_util.TensorFlowTestCase):
"""Packing/Unpacking tests that require executing a TensorFlow session."""
def _init_tensors(self, num_towers, tensor_shapes):
"""Construct a collection of tensors across multiple devices."""
num_tensors = len(tensor_shapes)
consts = []
tensors = []
vrbls = []
tower_grads = []
tf.Variable([-1], dtype=tf.int32, name='packing_var_placeholder')
for dev_idx in range(0, num_towers):
devname = '/job:localhost/device:GPU:%d' % dev_idx
consts.append([])
tensors.append([])
vrbls.append([])
with tf.device(devname):
base_value = 0
gv_tuples = []
for t_idx in range(0, num_tensors):
shape = tensor_shapes[t_idx]
num_elts = 0
for d in shape:
num_elts = (num_elts or 1) * d
c = np.fromiter(range(base_value, base_value + num_elts),
dtype=np.float32).reshape(shape)
base_value += num_elts
consts[dev_idx].append(c)
tensors[dev_idx].append(tf.constant(c))
vrbls[dev_idx].append(
tf.Variable(c, name='v_d%d_t%d' % (dev_idx, t_idx)))
gv_tuples.append((tensors[dev_idx][-1], vrbls[dev_idx][-1]))
tower_grads.append(gv_tuples)
return tower_grads, consts, tensors, vrbls
_test_tuple = pycoll.namedtuple('_test_tuple',
'num_devices, in_shapes out_shapes out_i')
def _do_pack_unpack_test(self, tt):
"""Do a single pack-unpack test.
Args:
tt: A _test_tuple defining the parameters of the test to do.
This test executes a graph that performs a pack of tower_grads
followed by an unpack and verifies that the shapes and values
of gradient tensors are unchanged, along with paired variables.
"""
with ops.Graph().as_default():
tower_grads, consts, _, vrbls = self._init_tensors(
tt.num_devices, tt.in_shapes)
packed_tg, packing = allreduce.pack_small_tensors(
tower_grads, max_bytes=40, max_group=10)
unpacked_tg = allreduce.unpack_small_tensors(packed_tg, packing)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
packed = sess.run(packed_tg)
for d in range(0, tt.num_devices):
for t in range(0, len(tt.out_shapes)):
num_elts = 0
for dim in tt.out_shapes[t]:
num_elts = (num_elts or 1) * dim
self.assertTrue(np.array_equal(
np.array(range(tt.out_i[t], tt.out_i[t] + num_elts),
dtype=np.float32).reshape(tt.out_shapes[t]),
packed[d][t][0]))
unpacked = sess.run(unpacked_tg)
for d in range(0, tt.num_devices):
for t in range(0, len(tt.in_shapes)):
self.assertTrue(np.array_equal(consts[d][t], unpacked[d][t][0]))
self.assertEqual(vrbls[d][t], unpacked_tg[d][t][1])
def testPackUnpack0(self):
self._do_pack_unpack_test(
self._test_tuple(num_devices=3,
in_shapes=[[8], [3, 3], [12], [5, 5, 5]],
out_shapes=[[17], [12], [5, 5, 5]],
out_i=[0, 17, 29]))
def testPackUnpack1(self):
self._do_pack_unpack_test(
self._test_tuple(num_devices=4,
in_shapes=[[5, 5, 5], [2, 3], [5]],
out_shapes=[[11], [5, 5, 5]],
out_i=[125, 0]))
def testPackUnpack2(self):
self._do_pack_unpack_test(
self._test_tuple(num_devices=2,
in_shapes=[[5, 5, 5], [2, 3], [1, 5], [7], [100]],
out_shapes=[[18], [5, 5, 5], [100]],
out_i=[125, 0, 143]))
def _do_all_reduce_pack_test(self, tt):
"""Test that all-reduce results are the same with or without packing."""
with ops.Graph().as_default():
tower_grads, consts, _, _ = self._init_tensors(
tt.num_devices, tt.in_shapes)
dev_prefixes = ['/job:localhost']
num_workers = 1
alg = 'xring'
shards = 1
single_session = True
gpu_indices = range(0, tt.num_devices)
assert len(gpu_indices) == len(tower_grads)
no_pack_all_reduce = allreduce.sum_gradients_all_reduce(
single_session,
dev_prefixes, tower_grads, num_workers, alg, shards,
gpu_indices,
agg_small_grads_max_bytes=0, agg_small_grads_max_group=1)
packed_tg, packing = allreduce.pack_small_tensors(tower_grads, 100, 100)
packed_all_reduce = allreduce.sum_gradients_all_reduce(
single_session,
dev_prefixes, packed_tg, num_workers, alg, shards,
gpu_indices,
agg_small_grads_max_bytes=0, agg_small_grads_max_group=1)
unpacked_tg = allreduce.unpack_small_tensors(packed_all_reduce, packing)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
no_pack_values = sess.run(no_pack_all_reduce)
pack_unpack_values = sess.run(unpacked_tg)
for d in range(1, tt.num_devices):
for t in range(0, len(tt.in_shapes)):
self.assertTrue(np.allclose(no_pack_values[d][t][0],
tt.num_devices * consts[0][t]))
self.assertTrue(np.array_equal(no_pack_values[d][t][0],
pack_unpack_values[d][t][0]))
def testAllReducePacked0(self):
self._do_all_reduce_pack_test(
self._test_tuple(num_devices=3,
in_shapes=[[8], [3, 3], [12], [5, 5, 5]],
out_shapes=[[17], [12], [5, 5, 5]],
out_i=[0, 17, 29]))
def testAllReducePacked1(self):
self._do_all_reduce_pack_test(
self._test_tuple(num_devices=2,
in_shapes=[[8], [3, 3], [12], [5, 5, 5], [3], [4]],
out_shapes=[[17], [7], [12], [5, 5, 5]],
out_i=[0, 17, 29, 154, 157]))
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
|
apache-2.0
| -2,967,735,806,149,767,700 | 41.823661 | 80 | 0.523065 | false |
Akasurde/virt-manager
|
virtinst/devicedisk.py
|
1
|
33899
|
#
# Classes for building disk device xml
#
# Copyright 2006-2008, 2012-2014 Red Hat, Inc.
# Jeremy Katz <katzj@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
import os
import stat
import pwd
import subprocess
import logging
import re
from . import diskbackend
from . import util
from .device import VirtualDevice
from .seclabel import Seclabel
from .xmlbuilder import XMLChildProperty, XMLProperty
def _qemu_sanitize_drvtype(phystype, fmt, manual_format=False):
"""
Sanitize libvirt storage volume format to a valid qemu driver type
"""
raw_list = ["iso"]
if phystype == VirtualDisk.TYPE_BLOCK:
if not fmt:
return VirtualDisk.DRIVER_TYPE_RAW
if fmt and not manual_format:
return VirtualDisk.DRIVER_TYPE_RAW
if fmt in raw_list:
return VirtualDisk.DRIVER_TYPE_RAW
return fmt
def _is_dir_searchable(uid, username, path):
"""
Check if passed directory is searchable by uid
"""
if "VIRTINST_TEST_SUITE" in os.environ:
return True
try:
statinfo = os.stat(path)
except OSError:
return False
if uid == statinfo.st_uid:
flag = stat.S_IXUSR
elif uid == statinfo.st_gid:
flag = stat.S_IXGRP
else:
flag = stat.S_IXOTH
if bool(statinfo.st_mode & flag):
return True
# Check POSIX ACL (since that is what we use to 'fix' access)
cmd = ["getfacl", path]
try:
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
except OSError:
logging.debug("Didn't find the getfacl command.")
return False
if proc.returncode != 0:
logging.debug("Cmd '%s' failed: %s", cmd, err)
return False
return bool(re.search("user:%s:..x" % username, out))
class VirtualDisk(VirtualDevice):
virtual_device_type = VirtualDevice.VIRTUAL_DEV_DISK
DRIVER_NAME_PHY = "phy"
DRIVER_NAME_QEMU = "qemu"
DRIVER_TYPE_RAW = "raw"
CACHE_MODE_NONE = "none"
CACHE_MODE_WRITETHROUGH = "writethrough"
CACHE_MODE_WRITEBACK = "writeback"
CACHE_MODE_DIRECTSYNC = "directsync"
CACHE_MODE_UNSAFE = "unsafe"
cache_types = [CACHE_MODE_NONE, CACHE_MODE_WRITETHROUGH,
CACHE_MODE_WRITEBACK, CACHE_MODE_DIRECTSYNC, CACHE_MODE_UNSAFE]
DISCARD_MODE_IGNORE = "ignore"
DISCARD_MODE_UNMAP = "unmap"
discard_types = [DISCARD_MODE_IGNORE, DISCARD_MODE_UNMAP]
DEVICE_DISK = "disk"
DEVICE_LUN = "lun"
DEVICE_CDROM = "cdrom"
DEVICE_FLOPPY = "floppy"
devices = [DEVICE_DISK, DEVICE_LUN, DEVICE_CDROM, DEVICE_FLOPPY]
TYPE_FILE = "file"
TYPE_BLOCK = "block"
TYPE_DIR = "dir"
TYPE_VOLUME = "volume"
TYPE_NETWORK = "network"
types = [TYPE_FILE, TYPE_BLOCK, TYPE_DIR, TYPE_NETWORK]
IO_MODE_NATIVE = "native"
IO_MODE_THREADS = "threads"
io_modes = [IO_MODE_NATIVE, IO_MODE_THREADS]
error_policies = ["ignore", "stop", "enospace", "report"]
@staticmethod
def disk_type_to_xen_driver_name(disk_type):
"""
Convert a value of VirtualDisk.type to it's associated Xen
<driver name=/> property
"""
if disk_type == VirtualDisk.TYPE_BLOCK:
return "phy"
elif disk_type == VirtualDisk.TYPE_FILE:
return "file"
return "file"
@staticmethod
def pretty_disk_bus(bus):
if bus in ["ide", "sata", "scsi", "usb", "sd"]:
return bus.upper()
if bus in ["xen"]:
return bus.capitalize()
if bus == "virtio":
return "VirtIO"
return bus
@staticmethod
def path_definitely_exists(conn, path):
"""
Check if path exists.
return True if we are certain, False otherwise. Path may in fact
exist if we return False, but we can't exhaustively know in all
cases.
(In fact if cached storage volume data is out of date, the volume
may have disappeared behind out back, but that shouldn't have bad
effects in practice.)
"""
if path is None:
return False
try:
(vol, pool) = diskbackend.check_if_path_managed(conn, path)
ignore = pool
if vol:
return True
if not conn.is_remote():
return os.path.exists(path)
except:
pass
return False
@staticmethod
def check_path_search_for_user(conn, path, username):
"""
Check if the passed user has search permissions for all the
directories in the disk path.
@return: List of the directories the user cannot search, or empty list
@rtype : C{list}
"""
if path is None:
return []
if conn.is_remote():
return []
if username == "root":
return []
if diskbackend.path_is_url(path):
return []
try:
# Get UID for string name
uid = pwd.getpwnam(username)[2]
except Exception, e:
logging.debug("Error looking up username: %s", str(e))
return []
fixlist = []
if os.path.isdir(path):
dirname = path
base = "-"
else:
dirname, base = os.path.split(path)
while base:
if not _is_dir_searchable(uid, username, dirname):
fixlist.append(dirname)
dirname, base = os.path.split(dirname)
return fixlist
@staticmethod
def check_path_search(conn, path):
# Only works for qemu and DAC
if conn.is_remote() or not conn.is_qemu_system():
return None, []
from virtcli import CLIConfig
user = CLIConfig.default_qemu_user
try:
for secmodel in conn.caps.host.secmodels:
if secmodel.model != "dac":
continue
label = None
for baselabel in secmodel.baselabels:
if baselabel.type in ["qemu", "kvm"]:
label = baselabel.content
break
if not label:
continue
pwuid = pwd.getpwuid(
int(label.split(":")[0].replace("+", "")))
if pwuid:
user = pwuid[0]
except:
logging.debug("Exception grabbing qemu DAC user", exc_info=True)
return None, []
return user, VirtualDisk.check_path_search_for_user(conn, path, user)
@staticmethod
def fix_path_search_for_user(conn, path, username):
"""
Try to fix any permission problems found by check_path_search_for_user
@return: Return a dictionary of entries {broken path : error msg}
@rtype : C{dict}
"""
def fix_perms(dirname, useacl=True):
if useacl:
cmd = ["setfacl", "--modify", "user:%s:x" % username, dirname]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
logging.debug("Ran command '%s'", cmd)
if out or err:
logging.debug("out=%s\nerr=%s", out, err)
if proc.returncode != 0:
raise ValueError(err)
else:
logging.debug("Setting +x on %s", dirname)
mode = os.stat(dirname).st_mode
newmode = mode | stat.S_IXOTH
os.chmod(dirname, newmode)
if os.stat(dirname).st_mode != newmode:
# Trying to change perms on vfat at least doesn't work
# but also doesn't seem to error. Try and detect that
raise ValueError(_("Permissions on '%s' did not stick") %
dirname)
fixlist = VirtualDisk.check_path_search_for_user(conn, path, username)
if not fixlist:
return []
fixlist.reverse()
errdict = {}
useacl = True
for dirname in fixlist:
try:
try:
fix_perms(dirname, useacl)
except:
# If acl fails, fall back to chmod and retry
if not useacl:
raise
useacl = False
logging.debug("setfacl failed, trying old fashioned way")
fix_perms(dirname, useacl)
except Exception, e:
errdict[dirname] = str(e)
return errdict
@staticmethod
def path_in_use_by(conn, path, shareable=False, read_only=False):
"""
Return a list of VM names that are using the passed path.
@param conn: virConnect to check VMs
@param path: Path to check for
@param shareable: Path we are checking is marked shareable, so
don't warn if it conflicts with another shareable source.
@param read_only: Path we are checking is marked read_only, so
don't warn if it conflicts with another read_only source.
"""
if not path:
return []
# Find all volumes that have 'path' somewhere in their backing chain
vols = []
volmap = dict((vol.backing_store, vol)
for vol in conn.fetch_all_vols() if vol.backing_store)
backpath = path
while backpath in volmap:
vol = volmap[backpath]
if vol in vols:
break
backpath = vol.target_path
vols.append(backpath)
ret = []
vms = conn.fetch_all_guests()
for vm in vms:
if not read_only:
if path in [vm.os.kernel, vm.os.initrd, vm.os.dtb]:
ret.append(vm.name)
continue
for disk in vm.get_devices("disk"):
if disk.path in vols and vm.name not in ret:
# VM uses the path indirectly via backing store
ret.append(vm.name)
break
if disk.path != path:
continue
if shareable and disk.shareable:
continue
if read_only and disk.read_only:
continue
ret.append(vm.name)
break
return ret
@staticmethod
def build_vol_install(conn, volname, poolobj, size, sparse,
fmt=None, backing_store=None):
"""
Helper for building a StorageVolume instance to pass to VirtualDisk
for eventual storage creation.
:param volname: name of the volume to be created
:param size: size in bytes
"""
from .storage import StorageVolume
if size is None:
raise ValueError(_("Size must be specified for non "
"existent volume '%s'" % volname))
# This catches --disk /dev/idontexist,size=1 if /dev is unmanaged
if not poolobj:
raise RuntimeError(_("Don't know how to create storage for "
"path '%s'. Use libvirt APIs to manage the parent directory "
"as a pool first.") % volname)
logging.debug("Creating volume '%s' on pool '%s'",
volname, poolobj.name())
cap = (size * 1024 * 1024 * 1024)
if sparse:
alloc = 0
else:
alloc = cap
volinst = StorageVolume(conn)
volinst.pool = poolobj
volinst.name = volname
volinst.capacity = cap
volinst.allocation = alloc
volinst.backing_store = backing_store
if fmt:
if not volinst.supports_property("format"):
raise ValueError(_("Format attribute not supported for this "
"volume type"))
volinst.format = fmt
return volinst
@staticmethod
def num_to_target(num):
"""
Convert an index in range (1, 1024) to a disk /dev number
(like hda, hdb, hdaa, etc.)
"""
digits = []
for factor in range(0, 3):
amt = (num % (26 ** (factor + 1))) / (26 ** factor)
if amt == 0 and num >= (26 ** (factor + 1)):
amt = 26
num -= amt
digits.insert(0, amt)
seen_valid = False
gen_t = ""
for digit in digits:
if digit == 0:
if not seen_valid:
continue
digit = 1
seen_valid = True
gen_t += "%c" % (ord('a') + digit - 1)
return gen_t
@staticmethod
def target_to_num(tgt):
"""
Convert disk /dev number (like hda, hdb, hdaa, etc.) to an index
"""
num = 0
k = 0
if tgt[0] == 'x':
# This case is here for 'xvda'
tgt = tgt[1:]
for i, c in enumerate(reversed(tgt[2:])):
if i != 0:
k = 1
num += (ord(c) - ord('a') + k) * (26 ** i)
return num
_XML_PROP_ORDER = [
"type", "device",
"driver_name", "driver_type",
"driver_cache", "driver_discard", "driver_io", "error_policy",
"_source_file", "_source_dev", "_source_dir",
"source_volume", "source_pool", "source_protocol", "source_name",
"source_host_name", "source_host_port",
"source_host_transport", "source_host_socket",
"target", "bus",
]
def __init__(self, *args, **kwargs):
VirtualDevice.__init__(self, *args, **kwargs)
self._storage_backend = None
self.storage_was_created = False
#############################
# Public property-esque API #
#############################
def _get_path(self):
if not self._storage_backend:
xmlpath = self._get_xmlpath()
if xmlpath:
return xmlpath
self._set_default_storage_backend()
return self._storage_backend.get_path()
def _set_path(self, newpath):
if (self._storage_backend and
self._storage_backend.will_create_storage()):
raise ValueError("Can't change disk path if storage creation info "
"has been set.")
# User explicitly changed 'path', so try to lookup its storage
# object since we may need it
(vol_object, parent_pool) = diskbackend.manage_path(self.conn, newpath)
self._change_backend(newpath, vol_object, parent_pool)
self._set_xmlpath(self.path)
path = property(_get_path, _set_path)
def set_vol_object(self, vol_object, parent_pool):
logging.debug("disk.set_vol_object: volxml=\n%s",
vol_object.XMLDesc(0))
logging.debug("disk.set_vol_object: poolxml=\n%s",
parent_pool.XMLDesc(0))
self._change_backend(None, vol_object, parent_pool)
self._set_xmlpath(self.path)
def set_vol_install(self, vol_install):
logging.debug("disk.set_vol_install: name=%s poolxml=\n%s",
vol_install.name, vol_install.pool.XMLDesc(0))
self._storage_backend = diskbackend.ManagedStorageCreator(
self.conn, vol_install)
self._set_xmlpath(self.path)
def get_vol_object(self):
return self._storage_backend.get_vol_object()
def get_vol_install(self):
return self._storage_backend.get_vol_install()
def get_parent_pool(self):
if self.get_vol_install():
return self.get_vol_install().pool
return self._storage_backend.get_parent_pool()
def get_size(self):
return self._storage_backend.get_size()
#############################
# Internal defaults helpers #
#############################
def _get_default_driver_name(self):
if not self.path:
return None
# Recommended xen defaults from here:
# https://bugzilla.redhat.com/show_bug.cgi?id=1171550#c9
# If type block, use name=phy. Otherwise do the same as qemu
if self.conn.is_xen() and self.type == self.TYPE_BLOCK:
return self.DRIVER_NAME_PHY
if self.conn.check_support(
self.conn.SUPPORT_CONN_DISK_DRIVER_NAME_QEMU):
return self.DRIVER_NAME_QEMU
return None
def _get_default_driver_type(self):
"""
Set driver type from passed parameters
Where possible, we want to force /driver/@type = "raw" if installing
a QEMU VM. Without telling QEMU to expect a raw file, the emulator
is forced to autodetect, which has security implications:
http://lists.gnu.org/archive/html/qemu-devel/2008-04/msg00675.html
"""
if self.driver_name != self.DRIVER_NAME_QEMU:
return None
drvtype = self._storage_backend.get_driver_type()
return _qemu_sanitize_drvtype(self.type, drvtype)
#############################
# XML source media handling #
#############################
_source_file = XMLProperty("./source/@file")
_source_dev = XMLProperty("./source/@dev")
_source_dir = XMLProperty("./source/@dir")
source_pool = XMLProperty("./source/@pool")
source_volume = XMLProperty("./source/@volume")
source_name = XMLProperty("./source/@name")
source_protocol = XMLProperty("./source/@protocol")
# Technically multiple host lines can be listed
source_host_name = XMLProperty("./source/host/@name")
source_host_port = XMLProperty("./source/host/@port", is_int=True)
source_host_transport = XMLProperty("./source/host/@transport")
source_host_socket = XMLProperty("./source/host/@socket")
def _set_source_network_from_url(self, uri):
from .uri import URI
uriobj = URI(uri)
if uriobj.scheme:
self.source_protocol = uriobj.scheme
if uriobj.transport:
self.source_host_transport = uriobj.transport
if uriobj.hostname:
self.source_host_name = uriobj.hostname
if uriobj.port:
self.source_host_port = uriobj.port
if uriobj.path:
if self.source_host_transport:
self.source_host_socket = uriobj.path
else:
self.source_name = uriobj.path
if self.source_name.startswith("/"):
self.source_name = self.source_name[1:]
def _set_source_network_from_storage(self, volxml, poolxml):
self.source_protocol = poolxml.type
if poolxml.hosts:
self.source_host_name = poolxml.hosts[0].name
self.source_host_port = poolxml.hosts[0].port
path = ""
if poolxml.source_name:
path += poolxml.source_name
if poolxml.source_path:
path += poolxml.source_path
if not path.endswith('/'):
path += "/"
path += volxml.name
self.source_name = path
self.type = "network"
def _set_network_source_from_backend(self):
if (self._storage_backend.get_vol_object() or
self._storage_backend.get_vol_install()):
volxml = self._storage_backend.get_vol_xml()
poolxml = self._storage_backend.get_parent_pool_xml()
self._set_source_network_from_storage(volxml, poolxml)
elif self._storage_backend.get_path():
self._set_source_network_from_url(self._storage_backend.get_path())
def _build_url_from_network_source(self):
ret = self.source_protocol
if self.source_host_transport:
ret += "+%s" % self.source_host_transport
ret += "://"
if self.source_host_name:
ret += self.source_host_name
if self.source_host_port:
ret += ":" + str(self.source_host_port)
if self.source_name:
if not self.source_name.startswith("/"):
ret += "/"
ret += self.source_name
elif self.source_host_socket:
if not self.source_host_socket.startswith("/"):
ret += "/"
ret += self.source_host_socket
return ret
def _get_default_type(self):
if self.source_pool or self.source_volume:
return VirtualDisk.TYPE_VOLUME
if self._storage_backend:
return self._storage_backend.get_dev_type()
if self.source_protocol:
return VirtualDisk.TYPE_NETWORK
return self.TYPE_FILE
type = XMLProperty("./@type", default_cb=_get_default_type)
def _clear_source_xml(self):
"""
Unset all XML properties that describe the actual source media
"""
self._source_file = None
self._source_dev = None
self._source_dir = None
self.source_volume = None
self.source_pool = None
self.source_name = None
self.source_protocol = None
self.source_host_name = None
self.source_host_port = None
self.source_host_transport = None
self.source_host_socket = None
def _disk_type_to_object_prop_name(self):
disk_type = self.type
if disk_type == VirtualDisk.TYPE_BLOCK:
return "_source_dev"
elif disk_type == VirtualDisk.TYPE_DIR:
return "_source_dir"
elif disk_type == VirtualDisk.TYPE_FILE:
return "_source_file"
return None
# _xmlpath is an abstraction for source file/block/dir paths, since
# they don't have any special properties aside from needing to match
# 'type' value with the source property used.
def _get_xmlpath(self):
if self._source_file:
return self._source_file
if self._source_dev:
return self._source_dev
if self._source_dir:
return self._source_dir
return None
def _set_xmlpath(self, val):
self._clear_source_xml()
if self._storage_backend.get_dev_type() == "network":
self._set_network_source_from_backend()
return
propname = self._disk_type_to_object_prop_name()
if not propname:
return
return setattr(self, propname, val)
##################
# XML properties #
##################
device = XMLProperty("./@device",
default_cb=lambda s: s.DEVICE_DISK)
driver_name = XMLProperty("./driver/@name",
default_cb=_get_default_driver_name)
driver_type = XMLProperty("./driver/@type",
default_cb=_get_default_driver_type)
sgio = XMLProperty("./@sgio")
bus = XMLProperty("./target/@bus")
target = XMLProperty("./target/@dev")
removable = XMLProperty("./target/@removable", is_onoff=True)
read_only = XMLProperty("./readonly", is_bool=True)
shareable = XMLProperty("./shareable", is_bool=True)
driver_cache = XMLProperty("./driver/@cache")
driver_discard = XMLProperty("./driver/@discard")
driver_io = XMLProperty("./driver/@io")
error_policy = XMLProperty("./driver/@error_policy")
serial = XMLProperty("./serial")
startup_policy = XMLProperty("./source/@startupPolicy")
iotune_rbs = XMLProperty("./iotune/read_bytes_sec", is_int=True)
iotune_ris = XMLProperty("./iotune/read_iops_sec", is_int=True)
iotune_tbs = XMLProperty("./iotune/total_bytes_sec", is_int=True)
iotune_tis = XMLProperty("./iotune/total_iops_sec", is_int=True)
iotune_wbs = XMLProperty("./iotune/write_bytes_sec", is_int=True)
iotune_wis = XMLProperty("./iotune/write_iops_sec", is_int=True)
seclabel = XMLChildProperty(Seclabel, relative_xpath="./source")
#################################
# Validation assistance methods #
#################################
def _set_default_storage_backend(self):
path = None
vol_object = None
parent_pool = None
typ = self._get_default_type()
if self.type == VirtualDisk.TYPE_NETWORK:
# Fill in a completed URL for virt-manager UI, path comparison, etc
path = self._build_url_from_network_source()
if typ == VirtualDisk.TYPE_VOLUME:
conn = self.conn
if "weakref" in str(type(conn)):
conn = conn()
try:
parent_pool = conn.storagePoolLookupByName(self.source_pool)
vol_object = parent_pool.storageVolLookupByName(
self.source_volume)
except:
logging.debug("Error fetching source pool=%s vol=%s",
self.source_pool, self.source_volume, exc_info=True)
if vol_object is None and path is None:
path = self._get_xmlpath()
self._change_backend(path, vol_object, parent_pool)
def set_local_disk_to_clone(self, disk, sparse):
"""
Set a path to manually clone (as in, not through libvirt)
"""
self._storage_backend = diskbackend.CloneStorageCreator(self.conn,
self.path, disk.path, disk.get_size(), sparse)
def is_cdrom(self):
return self.device == self.DEVICE_CDROM
def is_floppy(self):
return self.device == self.DEVICE_FLOPPY
def is_disk(self):
return self.device == self.DEVICE_DISK
def can_be_empty(self):
return self.is_floppy() or self.is_cdrom()
def _change_backend(self, path, vol_object, parent_pool):
backend = diskbackend.StorageBackend(self.conn, path,
vol_object, parent_pool)
self._storage_backend = backend
def sync_path_props(self):
"""
Fills in the values of type, driver_type, and driver_name for
the associated backing storage. This needs to be manually called
if changing an existing disk's media.
"""
path = self._get_xmlpath()
self.type = self._get_default_type()
self.driver_name = self._get_default_driver_name()
self.driver_type = self._get_default_driver_type()
# Need to retrigger this if self.type changed
if path:
self._set_xmlpath(path)
def wants_storage_creation(self):
"""
If true, this disk needs storage creation parameters or things
will error.
"""
return (self._storage_backend and
not self._storage_backend.exists())
def validate(self):
if self.path is None:
if not self.can_be_empty():
raise ValueError(_("Device type '%s' requires a path") %
self.device)
return
if (self.type == VirtualDisk.TYPE_DIR and
not self.is_floppy()):
raise ValueError(_("The path '%s' must be a file or a "
"device, not a directory") % self.path)
if not self._storage_backend:
return
if (not self._storage_backend.will_create_storage() and
not self._storage_backend.exists()):
raise ValueError(
_("Must specify storage creation parameters for "
"non-existent path '%s'.") % self.path)
self._storage_backend.validate(self)
def setup(self, meter=None):
"""
Build storage (if required)
If storage doesn't exist (a non-existent file 'path', or 'vol_install'
was specified), we create it.
"""
if not self._storage_backend.will_create_storage():
return
meter = util.ensure_meter(meter)
vol_object = self._storage_backend.create(meter)
self.storage_was_created = True
if not vol_object:
return
parent_pool = self.get_vol_install().pool
self._change_backend(None, vol_object, parent_pool)
def set_defaults(self, guest):
if self.is_cdrom():
self.read_only = True
if self.is_cdrom() and guest.os.is_s390x():
self.bus = "scsi"
if not self.conn.is_qemu():
return
if not self.is_disk():
return
if not self.type == self.TYPE_BLOCK:
return
# Enable cache=none and io=native for block devices. Would
# be nice if qemu did this for us but that time has long passed.
if not self.driver_cache:
self.driver_cache = self.CACHE_MODE_NONE
if not self.driver_io:
self.driver_io = self.IO_MODE_NATIVE
def is_size_conflict(self):
"""
reports if disk size conflicts with available space
returns a two element tuple:
1. first element is True if fatal conflict occurs
2. second element is a string description of the conflict or None
Non fatal conflicts (sparse disk exceeds available space) will
return (False, "description of collision")
"""
return self._storage_backend.is_size_conflict()
def is_conflict_disk(self, conn=None):
"""
check if specified storage is in use by any other VMs on passed
connection.
@return: list of colliding VM names
@rtype: C{list}
"""
if not self.path:
return False
if not conn:
conn = self.conn
ret = self.path_in_use_by(conn, self.path,
shareable=self.shareable,
read_only=self.read_only)
return ret
def get_target_prefix(self, used_targets=None):
"""
Returns the suggested disk target prefix (hd, xvd, sd ...) for the
disk.
@returns: str prefix, or None if no reasonable guess can be made
"""
# The upper limits here aren't necessarilly 1024, but let the HV
# error as appropriate.
def _return(prefix):
nummap = {
"vd": 1024,
"xvd": 1024,
"fd": 2,
"hd": 4,
"sd": 1024,
}
return prefix, nummap[prefix]
if self.bus == "virtio":
return _return("vd")
elif self.bus == "xen":
return _return("xvd")
elif self.bus == "fdc" or self.is_floppy():
return _return("fd")
elif self.bus == "ide":
return _return("hd")
elif self.bus or not used_targets:
# sata, scsi, usb, sd
return _return("sd")
# If guest already has some disks defined
preforder = ["vd", "xvd", "sd", "hd"]
for pref in preforder:
for target in used_targets:
if target.startswith(pref):
return _return(pref)
return _return("sd")
def generate_target(self, skip_targets, pref_ctrl=None):
"""
Generate target device ('hda', 'sdb', etc..) for disk, excluding
any targets in 'skip_targets'. If given the 'pref_ctrl'
parameter, it tries to select the target so that the disk is
mapped onto that controller.
Sets self.target, and returns the generated value.
@param skip_targets: list of targets to exclude
@type skip_targets: C{list}
@param pref_ctrl: preferred controller to connect the disk to
@type pref_ctrl: C{int}
@raise ValueError: can't determine target type, no targets available
@returns generated target
@rtype C{str}
"""
prefix, maxnode = self.get_target_prefix(skip_targets)
skip_targets = [t for t in skip_targets if t and t.startswith(prefix)]
skip_targets.sort()
def get_target():
first_found = None
ran = range(maxnode)
if pref_ctrl is not None:
# We assume narrow SCSI bus and libvirt assigning 7
# (1-7, 8-14, etc.) devices per controller
ran = range(pref_ctrl * 7, (pref_ctrl + 1) * 7)
for i in ran:
gen_t = prefix + self.num_to_target(i + 1)
if gen_t in skip_targets:
skip_targets.remove(gen_t)
continue
if not skip_targets:
return gen_t
elif not first_found:
first_found = gen_t
if first_found:
return first_found
ret = get_target()
if ret:
self.target = ret
return ret
if pref_ctrl is not None:
# This basically means that we either chose full
# controller or didn't add any
raise ValueError(_("Controller number %d for disk of type %s has "
"no empty slot to use" % (pref_ctrl, prefix)))
else:
raise ValueError(_("Only %s disks of type '%s' are supported"
% (maxnode, prefix)))
VirtualDisk.register_type()
|
gpl-2.0
| 6,410,544,415,410,951,000 | 32.39803 | 79 | 0.553291 | false |
Taffer/cryptopals
|
cryptopals/scoring.py
|
1
|
3026
|
# -*- coding: utf-8 -*-
''' Methods for scoring the relative goodness of strings.
Note that scores are relative to other scores *from the same method*.
Score from one method have no relation to scores from a different method.
Created on Feb 10, 2015
@author: Chris
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import string
# Assumption is that mostly text and whitespace is good, but mostly
# punctuation is bad.
GOOD_CHARS = bytearray(string.ascii_letters)
GOOD_CHARS.extend(string.digits)
GOOD_CHARS.extend(string.whitespace)
BAD_CHARS = bytearray(string.punctuation)
PRINTABLE_CHARS = bytearray(string.printable)
def score_naiive(score_bytes):
''' Score the given byte string naiively.
Assumes the target string is English.
Returns a score; scores are relative to each other and have no relation
to anything. Returns 0 if a string isn't made up of printable characters.
:param score_bytes: A bytearray.
'''
# Since we're looking at simple English phrases, anything that comes
# out all 7-bit ASCII printables is probably the right thing.
#
# To focus in on what we're after, we probably want a large ratio of
# letters and numbers to punctuation and whitespace.
score = 0
for c in score_bytes:
if c not in PRINTABLE_CHARS:
# Unprintable character, keep looking for that key...
return 0
if c in GOOD_CHARS:
score += 1
if c in BAD_CHARS:
score -= 1
return score
FREQUENT_CHARS_UPPER = bytearray('ETAOINSHRDLU'.encode('ascii'))
FREQUENT_CHARS_LOWER = bytearray('ETAOINSHRDLU'.lower().encode('ascii'))
def score_freq(score_bytes):
''' Score the given byte string by English letter frequency.
Assumes the target string is English. "ETAOIN SHRDLU"
Returns a score; scores are relative to each other and have no relation
to anything. Returns 0 if a string isn't made up of printable characters.
:param score_bytes: A string of bytes or bytearray.
'''
score = 0
for c in score_bytes:
if c not in PRINTABLE_CHARS:
return 0
if c in FREQUENT_CHARS_UPPER:
# More-frequent gives you more score.
score += len(FREQUENT_CHARS_UPPER) - FREQUENT_CHARS_UPPER.index(chr(c))
elif c in FREQUENT_CHARS_LOWER:
# More-frequent gives you more score.
score += len(FREQUENT_CHARS_LOWER) - FREQUENT_CHARS_LOWER.index(chr(c))
return score
def hamming_distance(bytes1, bytes2):
''' Find the Hamming distance between two strings of bytes.
:param bytes1: A bytearray.
:param bytes2: A bytearray.
'''
if len(bytes1) != len(bytes2):
raise ValueError('Byte arrays must be the same length')
delta = bytearray([bytes1[x] ^ bytes2[x] for x in xrange(len(bytes1))])
return sum([bin(x).count('1') for x in delta])
|
mit
| 5,847,930,100,003,295,000 | 29.852632 | 83 | 0.656642 | false |
MockyJoke/numbers
|
ex11/code/reddit_relative_hint.py
|
1
|
1921
|
import sys
from pyspark.sql import SparkSession, functions, types
spark = SparkSession.builder.appName('reddit relative scores').getOrCreate()
assert sys.version_info >= (3, 4) # make sure we have Python 3.4+
assert spark.version >= '2.1' # make sure we have Spark 2.1+
schema = types.StructType([ # commented-out fields won't be read
#types.StructField('archived', types.BooleanType(), False),
types.StructField('author', types.StringType(), False),
#types.StructField('author_flair_css_class', types.StringType(), False),
#types.StructField('author_flair_text', types.StringType(), False),
#types.StructField('body', types.StringType(), False),
#types.StructField('controversiality', types.LongType(), False),
#types.StructField('created_utc', types.StringType(), False),
#types.StructField('distinguished', types.StringType(), False),
#types.StructField('downs', types.LongType(), False),
#types.StructField('edited', types.StringType(), False),
#types.StructField('gilded', types.LongType(), False),
#types.StructField('id', types.StringType(), False),
#types.StructField('link_id', types.StringType(), False),
#types.StructField('name', types.StringType(), False),
#types.StructField('parent_id', types.StringType(), True),
#types.StructField('retrieved_on', types.LongType(), False),
types.StructField('score', types.LongType(), False),
#types.StructField('score_hidden', types.BooleanType(), False),
types.StructField('subreddit', types.StringType(), False),
#types.StructField('subreddit_id', types.StringType(), False),
#types.StructField('ups', types.LongType(), False),
])
def main():
in_directory = sys.argv[1]
out_directory = sys.argv[2]
comments = spark.read.json(in_directory, schema=schema)
# TODO
#best_author.write.json(out_directory, mode='overwrite')
if __name__=='__main__':
main()
|
mit
| 8,161,441,920,440,513,000 | 40.76087 | 76 | 0.690265 | false |
steveYeah/amqpeek
|
tests/unit/cli/test_format_queues.py
|
1
|
1706
|
"""Tests for the correct reading of the queue config."""
from copy import deepcopy
from amqpeek.cli import build_queue_data
class TestFormatQueues:
"""Tests parsing of queue config."""
def test_dedup_queue_config(self, config_data: dict) -> None:
"""Tests handling of duplicate config entries in different formats.
my_queue is defined twice, both in queues and queue_limits
build_queue_data should dedup queues defined twice if their limits
are the same
"""
result = build_queue_data(config_data)
assert isinstance(result, list)
assert len(result) == 2
expected_queues = [("my_queue", 0), ("my_other_queue", 1)]
for excepted_queue in expected_queues:
assert excepted_queue in result
def test_just_queue_config(self, config_data: dict) -> None:
"""Test that queue config is parsed correctly."""
config_data = deepcopy(config_data)
del config_data["queue_limits"]
result = build_queue_data(config_data)
assert result == [("my_queue", 0)]
def test_just_queue_limits_config(self, config_data: dict) -> None:
"""Test that queue limits config is parsed correctly."""
config_data = deepcopy(config_data)
del config_data["queues"]
result = build_queue_data(config_data)
assert len(result) == 2
expected_queues = [("my_queue", 0), ("my_other_queue", 1)]
for excepted_queue in expected_queues:
assert excepted_queue in result
def test_no_queue_config(self) -> None:
"""Test handling of no queue config."""
result = build_queue_data({})
assert result == []
|
mit
| 3,850,180,313,175,439,400 | 31.807692 | 75 | 0.627198 | false |
rvianello/rdkit
|
rdkit/Chem/MCS.py
|
5
|
17304
|
# This work was funded by Roche and generously donated to the free
# and open source cheminformatics community.
import warnings
warnings.simplefilter('default', DeprecationWarning)
warnings.warn("The rdkit.Chem.MCS module is deprecated; please use rdkit.Chem.rdFMCS instead.",
DeprecationWarning,stacklevel=2)
## Copyright (c) 2012 Andrew Dalke Scientific AB
## Andrew Dalke <dalke@dalkescientific.com>
##
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
##
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
##
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from rdkit.Chem import fmcs
from rdkit.Chem.fmcs import Default
"""MCS - find a Maximum Common Substructure
This software finds the maximum common substructure of a set of
structures and reports it as a SMARTS string.
The SMARTS string depends on the desired match properties. For
example, if ring atoms are only allowed to match ring atoms then an
aliphatic ring carbon in the query is converted to the SMARTS "[C;R]",
and the double-bond ring bond converted to "=;@" while the respective
chain-only version are "[C;!R]" and "=;!@".
"""
# The simplified algorithm description is:
#
# best_substructure = None
# pick one structure as the query, and other as the targets
# for each substructure in the query graph:
# convert it to a SMARTS string based on the desired match properties
# if the SMARTS pattern exists in all of the targets:
# then this is a common substructure
# keep track of the maximum such common structure,
#
# The algorithm will usually take a long time. There are several
# ways to speed it up.
#
# == Bond elimination ==
#
# As the first step, remove bonds which obviously cannot be part of the
# MCS.
#
# This requires atom and bond type information, which I store as SMARTS
# patterns. A bond can only be in the MCS if its canonical bond type is
# present in all of the structures. A bond type is string made of the
# SMARTS for one atom, the SMARTS for the bond, and the SMARTS for the
# other atom. The canonical bond type is the lexographically smaller of
# the two possible bond types for a bond.
#
# The atom and bond SMARTS depend on the type comparison used.
#
# The "ring-matches-ring-only" option adds an "@" or "!@" to the bond
# SMARTS, so that the canonical bondtype for "C-C" becomes [#6]-@[#6] or
# [#6]-!@[#6] if the bond is in a ring or not in a ring, and if atoms
# are compared by element and bonds are compared by bondtype. (This
# option does not add "R" or "!R" to the atom SMARTS because there
# should be a single bond in the MCS of c1ccccc1O and CO.)
#
# The result of all of this atom and bond typing is a "TypedMolecule"
# for each input structure.
#
# I then find which canonical bondtypes are present in all of the
# structures. I convert each TypedMolecule into a
# FragmentedTypedMolecule which has the same atom information but only
# those bonds whose bondtypes are in all of the structures. This can
# break a structure into multiple, disconnected fragments, hence the
# name.
#
# (BTW, I would like to use the fragmented molecules as the targets
# because I think the SMARTS match would go faster, but the RDKit SMARTS
# matcher doesn't like them. I think it's because the new molecule
# hasn't been sanitized and the underlying data structure the ring
# information doesn't exist. Instead, I use the input structures for the
# SMARTS match.)
#
# == Use the structure with the smallest largest fragment as the query ==
# == and sort the targets by the smallest largest fragment ==
#
# I pick one of the FragmentedTypedMolecule instances as the source of
# substructure enumeration. Which one?
#
# My heuristic is to use the one with the smallest largest fragment.
# Hopefully it produces the least number of subgraphs, but that's also
# related to the number of rings, so a large linear graph will product
# fewer subgraphs than a small fused ring system. I don't know how to
# quantify that.
#
# For each of the fragmented structures, I find the number of atoms in
# the fragment with the most atoms, and I find the number of bonds in
# the fragment with the most bonds. These might not be the same
# fragment.
#
# I sort the input structures by the number of bonds in the largest
# fragment, with ties broken first on the number of atoms, and then on
# the input order. The smallest such structure is the query structure,
# and the remaining are the targets.
#
# == Use a breadth-first search and a priority queue to ==
# == enumerate the fragment subgraphs ==
#
# I extract each of the fragments from the FragmentedTypedMolecule into
# a TypedFragment, which I use to make an EnumerationMolecule. An
# enumeration molecule contains a pair of directed edges for each atom,
# which simplifies the enumeration algorithm.
#
# The enumeration algorithm is based around growing a seed. A seed
# contains the current subgraph atoms and bonds as well as an exclusion
# set of bonds which cannot be used for future grown. The initial seed
# is the first bond in the fragment, which may potentially grow to use
# the entire fragment. The second seed is the second bond in the
# fragment, which is excluded from using the first bond in future
# growth. The third seed starts from the third bond, which may not use
# the first or second bonds during growth, and so on.
#
#
# A seed can grow along bonds connected to an atom in the seed but which
# aren't already in the seed and aren't in the set of excluded bonds for
# the seed. If there are no such bonds then subgraph enumeration ends
# for this fragment. Given N bonds there are 2**N-1 possible ways to
# grow, which is just the powerset of the available bonds, excluding the
# no-growth case.
#
# This breadth-first growth takes into account all possibilties of using
# the available N bonds so all of those bonds are added to the exclusion
# set of the newly expanded subgraphs.
#
# For performance reasons, the bonds used for growth are separated into
# 'internal' bonds, which connect two atoms already in the subgraph, and
# 'external' bonds, which lead outwards to an atom not already in the
# subgraph.
#
# Each seed growth can add from 0 to N new atoms and bonds. The goal is
# to maximize the subgraph size so the seeds are stored in a priority
# queue, ranked so the seed with the most bonds is processed first. This
# turns the enumeration into something more like a depth-first search.
#
#
# == Prune seeds which aren't found in all of the structures ==
#
# At each stage of seed growth I check that the new seed exists in all
# of the original structures. (Well, all except the one which I
# enumerate over in the first place; by definition that one will match.)
# If it doesn't match then there's no reason to include this seed or any
# larger seeds made from it.
#
# The check is easy; I turn the subgraph into its corresponding SMARTS
# string and use RDKit's normal SMARTS matcher to test for a match.
#
# There are three ways to generate a SMARTS string: 1) arbitrary, 2)
# canonical, 3) hybrid.
#
# I have not tested #1. During most of the development I assumed that
# SMARTS matches across a few hundred structures would be slow, so that
# the best solution is to generate a *canonical* SMARTS and cache the
# match information.
#
# Well, it turns out that my canonical SMARTS match code takes up most
# of the MCS run-time. If I drop the canonicalization step then the
# code averages about 5-10% faster. This isn't the same as #1 - I still
# do the initial atom assignment based on its neighborhood, which is
# like a circular fingerprint of size 2 and *usually* gives a consistent
# SMARTS pattern, which I can then cache.
#
# However, there are times when the non-canonical SMARTS code is slower.
# Obviously one is if there are a lot of structures, and another if is
# there is a lot of symmetry. I'm still working on characterizing this.
#
#
# == Maximize atoms? or bonds? ==
#
# The above algorithm enumerates all subgraphs of the query and
# identifies those subgraphs which are common to all input structures.
#
# It's trivial then to keep track of the current "best" subgraph, which
# can defined as having the subgraph with the most atoms, or the most
# bonds. Both of those options are implemented.
#
# It would not be hard to keep track of all other subgraphs which are
# the same size.
#
# == complete_ring_only implementation ==
#
# The "complete ring only" option is implemented by first enabling the
# "ring-matches-ring-only" option, as otherwise it doesn't make sense.
#
# Second, in order to be a "best" subgraph, all bonds in the subgraph
# which are ring bonds in the original molecule must also be in a ring
# in the subgraph. This is handled as a post-processing step.
#
# (Note: some possible optimizations, like removing ring bonds from
# structure fragments which are not in a ring, are not yet implemented.)
#
#
# == Prune seeds which have no potential for growing large enough ==
#
# Given a seed, its set of edges available for growth, and the set of
# excluded bonds, figure out the maximum possible growth for the seed.
# If this maximum possible is less than the current best subgraph then
# prune.
#
# This requires a graph search, currently done in Python, which is a bit
# expensive. To speed things up, I precompute some edge information.
# That is, if I know that a given bond is a chain bond (not in a ring)
# then I can calculate the maximum number of atoms and bonds for seed
# growth along that bond, in either direction. However, precomputation
# doesn't take into account the excluded bonds, so after a while the
# predicted value is too high.
#
# Again, I'm still working on characterizing this, and an implementation
# in C++ would have different tradeoffs.
__all__ = ["FindMCS"]
########## Main driver for the MCS code
class MCSResult(object):
def __init__(self, obj):
self.numAtoms = obj.num_atoms
self.numBonds = obj.num_bonds
self.smarts = obj.smarts
self.completed = obj.completed
def __nonzero__(self):
return self.smarts is not None
def __repr__(self):
return "MCSResult(numAtoms=%d, numBonds=%d, smarts=%r, completed=%d)" % (
self.numAtoms, self.numBonds, self.smarts, self.completed)
def __str__(self):
msg = "MCS %r has %d atoms and %d bonds" % (self.smarts, self.numAtoms, self.numBonds)
if not self.completed:
msg += " (timed out)"
return msg
def FindMCS(mols,
minNumAtoms=2,
maximize=Default.maximize,
atomCompare=Default.atomCompare,
bondCompare=Default.bondCompare,
matchValences=Default.matchValences,
ringMatchesRingOnly=False,
completeRingsOnly=False,
timeout=Default.timeout,
threshold=None, ):
"""Find the maximum common substructure of a set of molecules
In the simplest case, pass in a list of molecules and get back
an MCSResult object which describes the MCS:
>>> from rdkit import Chem
>>> mols = [Chem.MolFromSmiles("C#CCP"), Chem.MolFromSmiles("C=CCO")]
>>> from rdkit.Chem import MCS
>>> MCS.FindMCS(mols)
MCSResult(numAtoms=2, numBonds=1, smarts='[#6]-[#6]', completed=1)
The SMARTS '[#6]-[#6]' matches the largest common substructure of
the input structures. It has 2 atoms and 1 bond. If there is no
MCS which is at least `minNumAtoms` in size then the result will set
numAtoms and numBonds to -1 and set smarts to None.
By default, two atoms match if they are the same element and two
bonds match if they have the same bond type. Specify `atomCompare`
and `bondCompare` to use different comparison functions, as in:
>>> MCS.FindMCS(mols, atomCompare="any")
MCSResult(numAtoms=3, numBonds=2, smarts='[*]-[*]-[*]', completed=1)
>>> MCS.FindMCS(mols, bondCompare="any")
MCSResult(numAtoms=3, numBonds=2, smarts='[#6]~[#6]~[#6]', completed=1)
An atomCompare of "any" says that any atom matches any other atom,
"elements" compares by element type, and "isotopes" matches based on
the isotope label. Isotope labels can be used to implement user-defined
atom types. A bondCompare of "any" says that any bond matches any
other bond, and "bondtypes" says bonds are equivalent if and only if
they have the same bond type.
A substructure has both atoms and bonds. The default `maximize`
setting of "atoms" finds a common substructure with the most number
of atoms. Use maximize="bonds" to maximize the number of bonds.
Maximizing the number of bonds tends to maximize the number of rings,
although two small rings may have fewer bonds than one large ring.
You might not want a 3-valent nitrogen to match one which is 5-valent.
The default `matchValences` value of False ignores valence information.
When True, the atomCompare setting is modified to also require that
the two atoms have the same valency.
>>> MCS.FindMCS(mols, matchValences=True)
MCSResult(numAtoms=2, numBonds=1, smarts='[#6v4]-[#6v4]', completed=1)
It can be strange to see a linear carbon chain match a carbon ring,
which is what the `ringMatchesRingOnly` default of False does. If
you set it to True then ring bonds will only match ring bonds.
>>> mols = [Chem.MolFromSmiles("C1CCC1CCC"), Chem.MolFromSmiles("C1CCCCCC1")]
>>> MCS.FindMCS(mols)
MCSResult(numAtoms=7, numBonds=6, smarts='[#6]-[#6]-[#6]-[#6]-[#6]-[#6]-[#6]', completed=1)
>>> MCS.FindMCS(mols, ringMatchesRingOnly=True)
MCSResult(numAtoms=4, numBonds=3, smarts='[#6](-@[#6])-@[#6]-@[#6]', completed=1)
You can further restrict things and require that partial rings
(as in this case) are not allowed. That is, if an atom is part of
the MCS and the atom is in a ring of the entire molecule then
that atom is also in a ring of the MCS. Set `completeRingsOnly`
to True to toggle this requirement and also sets ringMatchesRingOnly
to True.
>>> mols = [Chem.MolFromSmiles("CCC1CC2C1CN2"), Chem.MolFromSmiles("C1CC2C1CC2")]
>>> MCS.FindMCS(mols)
MCSResult(numAtoms=6, numBonds=6, smarts='[#6]-1-[#6]-[#6](-[#6])-[#6]-1-[#6]', completed=1)
>>> MCS.FindMCS(mols, ringMatchesRingOnly=True)
MCSResult(numAtoms=5, numBonds=5, smarts='[#6]-@1-@[#6]-@[#6](-@[#6])-@[#6]-@1', completed=1)
>>> MCS.FindMCS(mols, completeRingsOnly=True)
MCSResult(numAtoms=4, numBonds=4, smarts='[#6]-@1-@[#6]-@[#6]-@[#6]-@1', completed=1)
The MCS algorithm will exhaustively search for a maximum common substructure.
Typically this takes a fraction of a second, but for some comparisons this
can take minutes or longer. Use the `timeout` parameter to stop the search
after the given number of seconds (wall-clock seconds, not CPU seconds) and
return the best match found in that time. If timeout is reached then the
`completed` property of the MCSResult will be 0 instead of 1.
>>> mols = [Chem.MolFromSmiles("Nc1ccccc1"*100), Chem.MolFromSmiles("Nc1ccccccccc1"*100)]
>>> MCS.FindMCS(mols, timeout=0.1)
MCSResult(..., completed=0)
(The MCS after 50 seconds contained 511 atoms.)
"""
warnings.warn("The rdkit.Chem.MCS module is deprecated; please use rdkit.Chem.rdFMCS instead.",
DeprecationWarning,stacklevel=2)
ores = fmcs.fmcs(mols,
minNumAtoms=minNumAtoms,
maximize=maximize,
atomCompare=atomCompare,
bondCompare=bondCompare,
threshold=threshold,
matchValences=matchValences,
ringMatchesRingOnly=ringMatchesRingOnly,
completeRingsOnly=completeRingsOnly,
timeout=timeout, )
return MCSResult(ores)
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest, sys
return doctest.testmod(sys.modules["__main__"],
optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE)
if __name__ == '__main__':
import sys
failed, tried = _test()
sys.exit(failed)
|
bsd-3-clause
| -4,654,737,956,135,602,000 | 43.713178 | 97 | 0.718793 | false |
Stefan-Korner/SpacePyLibrary
|
UTIL/BCH.py
|
1
|
4660
|
#******************************************************************************
# (C) 2018, Stefan Korner, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# Utilities - BCH Encoding *
# *
# The CLTU encoding is based on a (63,56) modified *
# Bose-Chaudhuri-Hocquenghem (BCH) code. *
# The implementation of the BCH encoding is performed with a constant *
# galois field to ensure good performance. *
#******************************************************************************
#############
# constants #
#############
# the BCH encoding divides the data into code blocks,
# each with 7 netto bytes and 1 check byte
#
# +---+---+---+---+---+---+---+---+
# |BY1|BY2|BY3|BY4|BY5|BY6|BY7|CHK|
# +---+---+---+---+---+---+---+---+
#
# size of the BCH code block
CODE_BLOCK_SIZE = 8
# galois field with all possible shift register states/transitions
# this static table speeds up the BCH encoding / checking
# it is a 128 x 256 bytes field:
# the 1st index [0]...[127] defines the possible states of the shift register
# the 2nd index [0]...[255] defines the possible input values
# the values [0]...[127] defines the next state of the shift register
#
# | 0 | 1 | 2 | ... | 255
# ----+------+------+------+-----+-----
# 0 | 0x00 | 0x45 | 0x45 | ... | 0x59
# ----+------+------+------+-----+-----
# 1 | 0x4F | 0x0A | 0x0A | ... | 0x16
# ----+------+------+------+-----+-----
# 2 | 0x5B | 0x1E | 0x1E | ... | 0x02
# ----+------+------+------+-----+-----
# : | : | : | : | | :
# ----+------+------+------+-----+-----
# 127 | 0x1C | 0x59 | 0x59 | ... | 0x45
#
s_shiftRegisterStateTransitions = []
#############
# functions #
#############
# -----------------------------------------------------------------------------
def generateShiftRegisterValues():
"""generates the values of the galois field"""
global s_shiftRegisterStateTransitions
for sregState in range(0, 128):
transitionField = []
for value in range(0, 256):
sreg = sregState # handle the next shift register state
mask = 0x80
while mask != 0:
sreg <<= 1 # shift 7 bits in shift register left
overflow = (sreg & 0x80) > 0 # check if overflow
if (value & mask) > 0: # add the value with the mask
overflow = not overflow # join with overflow
if overflow: # depending on overflow
sreg ^= 0x45 # add bits 0, 2, 6
mask >>= 1 # shift 7 bits in shift register right
sreg &= 0x7F # keep 7 bits
transitionField.append(sreg)
s_shiftRegisterStateTransitions.append(transitionField)
# -----------------------------------------------------------------------------
def encodeStart():
"""starts the BCH encoding with the initial shift register state"""
return 0
# -----------------------------------------------------------------------------
def encodeStep(sreg, value):
"""performs an icremental step in the BCH encoding: 1,...,7 """
global s_shiftRegisterStateTransitions
return s_shiftRegisterStateTransitions[sreg][value]
# -----------------------------------------------------------------------------
def encodeStop(sreg):
"""final step: returns the BCH code from the shift register state"""
sreg ^= 0xFF # invert the shift register state
sreg <<= 1 # make it the 7 most sign. bits
return (sreg & 0xFE) # filter the 7 most sign bits
###########################
# Initialisation sequence #
###########################
# initialise the galois field
generateShiftRegisterValues()
|
mit
| 9,777,268,574,965,220 | 47.041237 | 79 | 0.441845 | false |
UoA-eResearch/saga-gis
|
saga-gis/src/scripting/python/examples/06_xyz2shp.py
|
1
|
2459
|
#! /usr/bin/env python
import saga_api, sys, os
##########################################
def xyz2shp(fTable):
table = saga_api.SG_Get_Data_Manager().Add_Table()
if table.Create(saga_api.CSG_String(fTable)) == 0:
table.Add_Field(saga_api.CSG_String('X'), saga_api.SG_DATATYPE_Double)
table.Add_Field(saga_api.CSG_String('Y'), saga_api.SG_DATATYPE_Double)
table.Add_Field(saga_api.CSG_String('Z'), saga_api.SG_DATATYPE_Double)
rec = table.Add_Record()
rec.Set_Value(0,0)
rec.Set_Value(1,0)
rec.Set_Value(2,2)
rec = table.Add_Record()
rec.Set_Value(0,0)
rec.Set_Value(1,1)
rec.Set_Value(2,2)
rec = table.Add_Record()
rec.Set_Value(0,1)
rec.Set_Value(1,1)
rec.Set_Value(2,1)
rec = table.Add_Record()
rec.Set_Value(0,1)
rec.Set_Value(1,0)
rec.Set_Value(2,1)
points = saga_api.SG_Get_Data_Manager().Add_Shapes(saga_api.SHAPE_TYPE_Point)
# ------------------------------------
if os.name == 'nt': # Windows
saga_api.SG_Get_Module_Library_Manager().Add_Library(os.environ['SAGA_32' ] + '/modules/shapes_points.dll')
else: # Linux
saga_api.SG_Get_Module_Library_Manager().Add_Library(os.environ['SAGA_MLB'] + '/libshapes_points.so')
m = saga_api.SG_Get_Module_Library_Manager().Get_Module(saga_api.CSG_String('shapes_points'), 0) # 'Convert Table to Points'
p = m.Get_Parameters()
p(saga_api.CSG_String('TABLE' )).Set_Value(table)
p(saga_api.CSG_String('POINTS')).Set_Value(points)
p(saga_api.CSG_String('X' )).Set_Value(0)
p(saga_api.CSG_String('Y' )).Set_Value(1)
if m.Execute() == 0:
print 'ERROR: executing module [' + m.Get_Name().c_str() + ']'
return 0
# ------------------------------------
points.Save(saga_api.CSG_String(fTable))
print 'success'
return 1
##########################################
if __name__ == '__main__':
print 'Python - Version ' + sys.version
print saga_api.SAGA_API_Get_Version()
print
if len( sys.argv ) != 2:
print 'Usage: xyz2shp.py <in: x/y/z-data as text or dbase table>'
print '... trying to run with test_data'
fTable = './test_pts_xyz.xyz'
else:
fTable = sys.argv[ 1 ]
if os.path.split(fTable)[0] == '':
fTable = './' + fTable
xyz2shp(fTable)
|
gpl-3.0
| -6,035,837,658,102,959,000 | 34.637681 | 133 | 0.537617 | false |
akrherz/iem
|
htdocs/plotting/auto/scripts100/p153.py
|
1
|
6880
|
"""Highest hourly values"""
from collections import OrderedDict
import datetime
import pandas as pd
from pandas.io.sql import read_sql
from matplotlib.font_manager import FontProperties
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.plot.use_agg import plt
from pyiem.exceptions import NoDataFound
PDICT = OrderedDict(
[
("max_dwpf", "Highest Dew Point Temperature"),
("min_dwpf", "Lowest Dew Point Temperature"),
("max_tmpf", "Highest Air Temperature"),
("min_tmpf", "Lowest Air Temperature"),
("max_feel", "Highest Feels Like Temperature"),
("min_feel", "Lowest Feels Like Temperature"),
("max_mslp", "Maximum Sea Level Pressure"),
("min_mslp", "Minimum Sea Level Pressure"),
("max_alti", "Maximum Pressure Altimeter"),
("min_alti", "Minimum Pressure Altimeter"),
]
)
UNITS = {
"max_dwpf": "F",
"max_tmpf": "F",
"min_dwpf": "F",
"min_tmpf": "F",
"min_feel": "F",
"max_feel": "F",
"max_mslp": "mb",
"min_mslp": "mb",
"max_alti": "in",
"min_alti": "in",
}
MDICT = OrderedDict(
[
("all", "No Month Limit"),
("spring", "Spring (MAM)"),
("fall", "Fall (SON)"),
("winter", "Winter (DJF)"),
("summer", "Summer (JJA)"),
("gs", "1 May to 30 Sep"),
("jan", "January"),
("feb", "February"),
("mar", "March"),
("apr", "April"),
("may", "May"),
("jun", "June"),
("jul", "July"),
("aug", "August"),
("sep", "September"),
("oct", "October"),
("nov", "November"),
("dec", "December"),
]
)
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc[
"description"
] = """This table presents the extreme hourly value of
some variable of your choice based on available observations maintained
by the IEM. Sadly, this app will likely point out some bad data points
as such points tend to be obvious at extremes. If you contact us to
point out troubles, we'll certainly attempt to fix the archive to
remove the bad data points. Observations are arbitrarly bumped 10
minutes into the future to place the near to top of the hour obs on
that hour. For example, a 9:53 AM observation becomes the ob for 10 AM.
"""
desc["arguments"] = [
dict(
type="zstation",
name="zstation",
default="AMW",
network="IA_ASOS",
label="Select Station:",
),
dict(
type="select",
name="month",
default="all",
options=MDICT,
label="Select Month/Season/All",
),
dict(
type="select",
name="var",
options=PDICT,
default="max_dwpf",
label="Which Variable to Plot",
),
]
return desc
def plotter(fdict):
""" Go """
font0 = FontProperties()
font0.set_family("monospace")
font0.set_size(16)
font1 = FontProperties()
font1.set_size(16)
pgconn = get_dbconn("asos")
ctx = get_autoplot_context(fdict, get_description())
varname = ctx["var"]
varname2 = varname.split("_")[1]
if varname2 in ["dwpf", "tmpf", "feel"]:
varname2 = "i" + varname2
month = ctx["month"]
station = ctx["zstation"]
if month == "all":
months = range(1, 13)
elif month == "fall":
months = [9, 10, 11]
elif month == "winter":
months = [12, 1, 2]
elif month == "spring":
months = [3, 4, 5]
elif month == "summer":
months = [6, 7, 8]
elif month == "gs":
months = [5, 6, 7, 8, 9]
else:
ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
# make sure it is length two for the trick below in SQL
months = [ts.month]
df = read_sql(
f"""
WITH obs as (
SELECT (valid + '10 minutes'::interval) at time zone %s as ts,
tmpf::int as itmpf, dwpf::int as idwpf,
feel::int as ifeel, mslp, alti from alldata
where station = %s and
extract(month from valid at time zone %s) in %s),
agg1 as (
SELECT extract(hour from ts) as hr,
max(idwpf) as max_dwpf,
max(itmpf) as max_tmpf,
min(idwpf) as min_dwpf,
min(itmpf) as min_tmpf,
min(ifeel) as min_feel,
max(ifeel) as max_feel,
max(alti) as max_alti,
min(alti) as min_alti,
max(mslp) as max_mslp,
min(mslp) as min_mslp
from obs GROUP by hr)
SELECT o.ts, a.hr::int as hr,
a.{varname} from agg1 a JOIN obs o on
(a.hr = extract(hour from o.ts)
and a.{varname} = o.{varname2})
ORDER by a.hr ASC, o.ts DESC
""",
pgconn,
params=(
ctx["_nt"].sts[station]["tzname"],
station,
ctx["_nt"].sts[station]["tzname"],
tuple(months),
),
index_col=None,
)
if df.empty:
raise NoDataFound("No Data was found.")
y0 = 0.1
yheight = 0.8
dy = yheight / 24.0
(fig, ax) = plt.subplots(1, 1, figsize=(8, 8))
ax.set_position([0.12, y0, 0.57, yheight])
ax.barh(df["hr"], df[varname], align="center")
ax.set_ylim(-0.5, 23.5)
ax.set_yticks([0, 4, 8, 12, 16, 20])
ax.set_yticklabels(["Mid", "4 AM", "8 AM", "Noon", "4 PM", "8 PM"])
ax.grid(True)
ax.set_xlim([df[varname].min() - 5, df[varname].max() + 5])
ax.set_ylabel(
"Local Time %s" % (ctx["_nt"].sts[station]["tzname"],),
fontproperties=font1,
)
ab = ctx["_nt"].sts[station]["archive_begin"]
if ab is None:
raise NoDataFound("Unknown station metadata")
fig.text(
0.5,
0.93,
("%s [%s] %s-%s\n" "%s [%s]")
% (
ctx["_nt"].sts[station]["name"],
station,
ab.year,
datetime.date.today().year,
PDICT[varname],
MDICT[month],
),
ha="center",
fontproperties=font1,
)
ypos = y0 + (dy / 2.0)
for hr in range(24):
sdf = df[df["hr"] == hr]
if sdf.empty:
continue
row = sdf.iloc[0]
fig.text(
0.7,
ypos,
"%3.0f: %s%s"
% (
row[varname],
pd.Timestamp(row["ts"]).strftime("%d %b %Y"),
("*" if len(sdf.index) > 1 else ""),
),
fontproperties=font0,
va="center",
)
ypos += dy
ax.set_xlabel(
"%s %s, * denotes ties" % (PDICT[varname], UNITS[varname]),
fontproperties=font1,
)
return plt.gcf(), df
if __name__ == "__main__":
plotter(dict())
|
mit
| -6,262,800,647,354,188,000 | 28.029536 | 76 | 0.5125 | false |
rahulsekar/iplytics
|
app/detl.py
|
1
|
4587
|
import requests
import json
import os.path
import csv
def master_csv():
return 'data/master.csv'
def download_scorecard(year, game, redo=False):
fname = 'data/extracts/%d-%02d.json' % (year, game)
if os.path.isfile(fname) and not redo:
return fname
url = 'http://datacdn.iplt20.com/dynamic/data/core/cricket/2012/ipl%d/ipl%d-%02d/scoring.js' % (year, year, game)
r = requests.get(url)
if r.text.startswith('onScoring(') and r.text.endswith(');'):
content = r.text[10:-2]
f = open(fname, 'w')
f.write(content)
f.close()
return fname
return ''
def extract_stats(filename):
content = json.load(open(filename))
matchInfo = content['matchInfo']
common = {
'year' : int(content['matchId']['tournamentId']['name'][3:]),
'game' : int(content['matchId']['name'][-2:]),
'venueID' : matchInfo['venue']['id'],
'venueName' : matchInfo['venue']['fullName'],
}
if matchInfo['matchStatus']['text'] == 'Match abandoned without a ball bowled':
return []
players = extract_players(matchInfo['teams'][0],
matchInfo['battingOrder'][0])
players.update(extract_players(matchInfo['teams'][1],
matchInfo['battingOrder'][1]))
inn1 = extract_innings(content['innings'][0])
inn2 = extract_innings(content['innings'][1])
rows = []
for playerId in players:
row = dict(players[playerId], **common)
row = dict(row, **inn1.get(playerId, {}))
row = dict(row, **inn2.get(playerId, {}))
rows.append(row)
schema = dict.fromkeys(reduce(list.__add__,
[row.keys() for row in rows]))
ret = [dict(schema, **row) for row in rows]
return ret
def extract_players(team, battingOrder):
ret = {}
common = {
'teamID' : team['team']['id'],
'teamName' : team['team']['abbreviation'],
'bat_innings' : battingOrder + 1
}
for player in team['players']:
ret[player['id']] = {
'playerID' : player['id'],
'playerName' : player['shortName'],
}
ret[player['id']].update(common)
return ret
def extract_innings(innings):
ret = {}
for bt in innings['scorecard']['battingStats']:
out = '*'
if 'mod' in bt:
out = bt['mod']['dismissedMethod']
data = {
'bat_r':bt['r'], 'bat_b':bt['b'],
'bat_sr':bt.get('sr', None),
'bat_4s':bt['4s'], 'bat_6s':bt['6s'], 'bat_out':out,
}
ret[bt['playerId']] = data
for bl in innings['scorecard']['bowlingStats']:
data = {
'bowl_r':bl['r'], 'bowl_w':bl['w'], 'bowl_ov':bl['ov'],
'bowl_e':bl['e'], 'bowl_nb':bl['nb'], 'bowl_d':bl['d'],
'bowl_md':bl['maid'], 'bowl_wd':bl['wd']
}
ret[bl['playerId']] = data
return ret
def make_csv(rows):
mastercsv = master_csv()
writeHeader = False
if not os.path.isfile(mastercsv):
writeHeader = True
fh = open(mastercsv, 'a')
csvwriter = csv.writer(fh)
keys = rows[0].keys()
keys.sort()
if writeHeader:
csvwriter.writerow(keys)
for row in rows:
row_list = [row[key] for key in keys]
csvwriter.writerow(row_list)
fh.close()
def data_exists(year, game):
mastercsv = master_csv()
if not os.path.isfile(mastercsv):
return False
fh = open(mastercsv)
csvreader = csv.DictReader(fh)
count = sum([1 for row in csvreader
if row['year'] == str(year)
and row['game'] == str(game)])
return count == 22
def detl():
games = {
2014 : 60,
2013 : 76,
2012 : 76
}
for year in games:
for game in range(1, games[year]+1):
print 'Processing %d %02d' % (year, game)
if data_exists(year, game):
print('\tData is already loaded\n')
continue
print '\tDownloading...',
f = download_scorecard(year, game)
print 'done'
print '\tExtracting...',
rows = extract_stats(f)
print 'done'
print '\tTransforming & loading...',
if len(rows):
make_csv(rows)
print 'done'
else:
print 'nothing to load, match probably abandoned.'
if __name__ == '__main__':
detl()
|
mit
| -1,113,001,072,501,218,300 | 26.303571 | 117 | 0.514279 | false |
technige/py2neo
|
test/integration/test_node.py
|
1
|
4417
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2021, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2neo import Node, Relationship
from py2neo.compat import long
def test_single_node_creation(graph):
a = Node("Person", name="Alice")
assert a.labels == {"Person"}
assert a["name"] == "Alice"
graph.create(a)
assert isinstance(a.identity, int)
assert graph.exists(a)
def test_can_create_local_node(graph):
a = Node("Person", name="Alice", age=33)
assert set(a.labels) == {"Person"}
assert dict(a) == {"name": "Alice", "age": 33}
def test_can_create_remote_node(graph):
a = Node("Person", name="Alice", age=33)
graph.create(a)
assert set(a.labels) == {"Person"}
assert dict(a) == {"name": "Alice", "age": 33}
def test_bound_node_equals_unbound_node_with_same_properties(graph):
alice_1 = Node(name="Alice")
alice_1.graph = graph
alice_1.identity = 999
alice_2 = Node(name="Alice")
assert set(alice_1.labels) == set(alice_2.labels)
assert dict(alice_1) == dict(alice_2)
def test_bound_node_equality(graph):
alice_1 = Node(name="Alice")
alice_1.graph = graph
alice_1.identity = 999
alice_2 = Node(name="Alice")
alice_2.graph = alice_1.graph
alice_2.identity = alice_1.identity
assert alice_1 == alice_2
def test_unbound_node_equality(graph):
alice_1 = Node("Person", name="Alice")
alice_2 = Node("Person", name="Alice")
assert set(alice_1.labels) == set(alice_2.labels)
assert dict(alice_1) == dict(alice_2)
def test_can_merge_unsaved_changes_when_querying_node(graph):
a = Node("Person", name="Alice")
b = Node()
graph.create(a | b | Relationship(a, "KNOWS", b))
assert dict(a) == {"name": "Alice"}
a["age"] = 33
assert dict(a) == {"name": "Alice", "age": 33}
_ = list(graph.match((a, None), "KNOWS"))
assert dict(a) == {"name": "Alice", "age": 33}
def test_pull_node_labels_if_stale(graph):
a = Node("Thing")
graph.create(a)
a.remove_label("Thing")
a._stale.add("labels")
labels = a.labels
assert set(labels) == {"Thing"}
def test_pull_node_property_if_stale(graph):
a = Node(foo="bar")
graph.create(a)
a["foo"] = None
a._stale.add("properties")
assert a["foo"] == "bar"
def test_can_create_concrete_node(graph):
alice = Node(name="Alice", age=34)
graph.create(alice)
assert isinstance(alice, Node)
assert alice["name"] == "Alice"
assert alice["age"] == 34
def test_all_property_types(graph):
data = {
"nun": None,
"yes": True,
"no": False,
"int": 42,
"float": 3.141592653589,
"long": long("9223372036854775807"),
"str": "hello, world",
"unicode": u"hello, world",
"boolean_list": [True, False, True, True, False],
"int_list": [1, 1, 2, 3, 5, 8, 13, 21, 35],
"str_list": ["red", "orange", "yellow", "green", "blue", "indigo", "violet"]
}
foo = Node(**data)
graph.create(foo)
for key, value in data.items():
assert foo[key] == value
def test_node_hashes(graph):
node_1 = Node("Person", name="Alice")
node_1.graph = graph
node_1.identity = 999
node_2 = Node("Person", name="Alice")
node_2.graph = node_1.graph
node_2.identity = node_1.identity
assert node_1 is not node_2
assert hash(node_1) == hash(node_2)
def test_cannot_delete_uncreated_node(graph):
a = Node()
graph.delete(a)
def test_node_exists(graph):
a = Node()
graph.create(a)
assert graph.exists(a)
def test_node_does_not_exist(graph):
a = Node()
assert not graph.exists(a)
def test_can_name_using_name_property(graph):
a = Node(name="Alice")
assert a.__name__ == "Alice"
def test_can_name_using_magic_name_property(graph):
a = Node(__name__="Alice")
assert a.__name__ == "Alice"
|
apache-2.0
| 7,801,800,200,833,103,000 | 26.434783 | 84 | 0.618746 | false |
NilsJPWerner/Sublet-Uchicago
|
accounts/forms.py
|
1
|
3140
|
from django import forms
from .models import ExtendedUser
from allauth.account.forms import UserForm, PasswordField, SetPasswordField
from allauth.account import app_settings
from allauth.account.adapter import get_adapter
from allauth.account.models import EmailAddress
from allauth.account.utils import filter_users_by_email
class ExtendedUserForm(forms.ModelForm):
class Meta:
model = ExtendedUser
fields = ('first_name', 'last_name', 'profile_picture', 'phone_number', 'uni_division', 'home_town', 'description',)
widgets = {
"profile_picture": forms.FileInput(attrs={'style': 'display: none;'}),
"uni_division": forms.Select(attrs={'class': 'ui dropdown'}),
}
# Subclassed to get rid of placeholder data
class ChangePasswordFormModified(UserForm):
oldpassword = PasswordField(label=(""))
password1 = SetPasswordField(label=(""))
password2 = PasswordField(label=(""))
def clean_oldpassword(self):
if not self.user.check_password(self.cleaned_data.get("oldpassword")):
raise forms.ValidationError(("Please type your current"
" password."))
return self.cleaned_data["oldpassword"]
def clean_password2(self):
if ("password1" in self.cleaned_data and "password2" in self.cleaned_data):
if (self.cleaned_data["password1"] != self.cleaned_data["password2"]):
raise forms.ValidationError(("You must type the same password"
" each time."))
return self.cleaned_data["password2"]
def save(self):
get_adapter().set_password(self.user, self.cleaned_data["password1"])
class AddEmailFormCombined(UserForm):
add_email = forms.EmailField(label=("E-mail"),
required=True,
widget=forms.TextInput(attrs={"type": "email",
"size": "30"}))
def clean_email(self):
value = self.cleaned_data["add_email"]
value = get_adapter().clean_email(value)
errors = {
"this_account": ("This e-mail address is already associated"
" with this account."),
"different_account": ("This e-mail address is already associated"
" with another account."),
}
users = filter_users_by_email(value)
on_this_account = [u for u in users if u.pk == self.user.pk]
on_diff_account = [u for u in users if u.pk != self.user.pk]
if on_this_account:
raise forms.ValidationError(errors["this_account"])
if on_diff_account and app_settings.UNIQUE_EMAIL:
raise forms.ValidationError(errors["different_account"])
return value
def save(self, request):
return EmailAddress.objects.add_email(request,
self.user,
self.cleaned_data["add_email"],
confirm=True)
|
gpl-2.0
| 1,834,477,134,348,089,900 | 40.88 | 124 | 0.577707 | false |
karllark/dust_extinction
|
dust_extinction/helpers.py
|
1
|
1863
|
from __future__ import absolute_import, print_function, division
import warnings
import numpy as np
import astropy.units as u
__all__ = ["_get_x_in_wavenumbers", "_test_valid_x_range"]
def _get_x_in_wavenumbers(in_x):
"""
Convert input x to wavenumber given x has units.
Otherwise, assume x is in waveneumbers and issue a warning to this effect.
Parameters
----------
in_x : astropy.quantity or simple floats
x values
Returns
-------
x : floats
input x values in wavenumbers w/o units
"""
# handles the case where x is a scaler
in_x = np.atleast_1d(in_x)
# check if in_x is an astropy quantity, if not issue a warning
if not isinstance(in_x, u.Quantity):
warnings.warn(
"x has no units, assuming x units are inverse microns", UserWarning
)
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(in_x, 1.0 / u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
return x_quant.value
def _test_valid_x_range(x, x_range, outname):
"""
Test if any of the x values are outside of the valid range
Parameters
----------
x : float array
wavenumbers in inverse microns
x_range: 2 floats
allowed min/max of x
outname: str
name of curve for error message
"""
if np.logical_or(np.any(x < x_range[0]), np.any(x > x_range[1])):
raise ValueError(
"Input x outside of range defined for "
+ outname
+ " ["
+ str(x_range[0])
+ " <= x <= "
+ str(x_range[1])
+ ", x has units 1/micron]"
)
|
bsd-3-clause
| -973,880,790,089,943,900 | 26 | 79 | 0.595276 | false |
yugangw-msft/azure-cli
|
src/azure-cli/azure/cli/command_modules/security/_help.py
|
2
|
39766
|
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
# pylint: disable=line-too-long, too-many-lines
helps['security'] = """
type: group
short-summary: Manage your security posture with Azure Security Center.
"""
helps['security alert'] = """
type: group
short-summary: View security alerts.
"""
helps['security alert list'] = """
type: command
short-summary: List security alerts.
examples:
- name: Get security alerts on a subscription scope.
text: >
az security alert list
- name: Get security alerts on a resource group scope.
text: >
az security alert list -g "myRg"
"""
helps['security alert show'] = """
type: command
short-summary: Shows a security alert.
examples:
- name: Get a security alert on a subscription scope.
text: >
az security alert show --location "centralus" -n "alertName"
- name: Get a security alert on a resource group scope.
text: >
az security alert show -g "myRg" --location "centralus" -n "alertName"
"""
helps['security alert update'] = """
type: command
short-summary: Updates a security alert status.
examples:
- name: Dismiss a security alert on a subscription scope.
text: >
az security alert update --location "centralus" -n "alertName" --status "dismiss"
- name: Dismiss a security alert on a resource group scope.
text: >
az security alert update -g "myRg" --location "centralus" -n "alertName" --status "dismiss"
- name: Activate a security alert on a subscritpion scope.
text: >
az security alert update --location "centralus" -n "alertName" --status "activate"
- name: Activate a security alert on a resource group scope.
text: >
az security alert update -g "myRg" --location "centralus" -n "alertName" --status "activate"
"""
helps['security atp'] = """
type: group
short-summary: View and manage Advanced Threat Protection settings.
"""
helps['security atp storage'] = """
type: group
short-summary: View and manage Advanced Threat Protection settings for storage accounts.
"""
helps['security atp storage show'] = """
type: command
short-summary: Display Advanced Threat Protection settings for a storage account.
examples:
- name: Retrieve Advanced Threat Protection settings for a storage account on a subscription scope.
text: >
az security atp storage show --resource-group MyResourceGroup --storage-account MyStorageAccount
"""
helps['security atp storage update'] = """
type: command
short-summary: Toggle status of Advanced Threat Protection for a storage account.
examples:
- name: Enable Advanced Threat Protection for a storage account on a subscription scope.
text: >
az security atp storage update --resource-group MyResourceGroup --storage-account MyStorageAccount --is-enabled true
- name: Disable Advanced Threat Protection for a storage account on a subscription scope.
text: >
az security atp storage update --resource-group MyResourceGroup --storage-account MyStorageAccount --is-enabled false
"""
helps['security va sql'] = """
type: group
short-summary: View Sql Vulnerability Assessment scan results and manage baseline.
"""
helps['security va sql scans'] = """
type: group
short-summary: View Sql Vulnerability Assessment scan summaries.
"""
helps['security va sql scans show'] = """
type: command
short-summary: View Sql Vulnerability Assessment scan summaries.
examples:
- name: View Sql Vulnerability Assessment scan summary on an Azure virtual machine.
text: >
az security va sql scans show --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.Compute/VirtualMachines/MyVmName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --scan-id MyScanId
- name: View Sql Vulnerability Assessment scan summary on an On-Premise machine.
text: >
az security va sql scans show --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.OperationalInsights/Workspaces/MyWorkspaceName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --vm-name MyVmName --agent-id MyAgentId --vm-uuid MyVmUUID --scan-id MyScanId
"""
helps['security va sql scans list'] = """
type: command
short-summary: List all Sql Vulnerability Assessment scan summaries.
examples:
- name: List all Sql Vulnerability Assessment scan summaries on an Azure virtual machine.
text: >
az security va sql scans list --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.Compute/VirtualMachines/MyVmName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName
- name: List all Sql Vulnerability Assessment scan summaries on an On-Premise machine.
text: >
az security va sql scans list --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.OperationalInsights/Workspaces/MyWorkspaceName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --vm-name MyVmName --agent-id MyAgentId --vm-uuid MyVmUUID
"""
helps['security va sql results'] = """
type: group
short-summary: View Sql Vulnerability Assessment scan results.
"""
helps['security va sql results show'] = """
type: command
short-summary: View Sql Vulnerability Assessment scan results.
examples:
- name: View Sql Vulnerability Assessment scan results on an Azure virtual machine.
text: >
az security va sql results show --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.Compute/VirtualMachines/MyVmName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --scan-id MyScanId --rule-id VA9999
- name: View Sql Vulnerability Assessment scan results on an On-Premise machine.
text: >
az security va sql results show --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.OperationalInsights/Workspaces/MyWorkspaceName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --vm-name MyVmName --agent-id MyAgentId --vm-uuid MyVmUUID --scan-id MyScanId --rule-id VA9999
"""
helps['security va sql results list'] = """
type: command
short-summary: View all Sql Vulnerability Assessment scan results.
examples:
- name: View all Sql Vulnerability Assessment scan results on an Azure virtual machine.
text: >
az security va sql results list --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.Compute/VirtualMachines/MyVmName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --scan-id MyScanId
- name: View all Sql Vulnerability Assessment scan results on an On-Premise machine.
text: >
az security va sql results list --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.OperationalInsights/Workspaces/MyWorkspaceName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --vm-name MyVmName --agent-id MyAgentId --vm-uuid MyVmUUID --scan-id MyScanId
"""
helps['security va sql baseline'] = """
type: group
short-summary: View and manage Sql Vulnerability Assessment baseline.
"""
helps['security va sql baseline show'] = """
type: command
short-summary: View Sql Vulnerability Assessment rule baseline.
examples:
- name: View Sql Vulnerability Assessment rule baseline on an Azure virtual machine.
text: >
az security va sql baseline show --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.Compute/VirtualMachines/MyVmName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --rule-id VA9999
- name: View Sql Vulnerability Assessment rule baseline on an On-Premise machine.
text: >
az security va sql baseline show --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.OperationalInsights/Workspaces/MyWorkspaceName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --vm-name MyVmName --agent-id MyAgentId --vm-uuid MyVmUUID --rule-id VA9999
"""
helps['security va sql baseline list'] = """
type: command
short-summary: View Sql Vulnerability Assessment baseline for all rules.
examples:
- name: View Sql Vulnerability Assessment baseline for all rules on an Azure virtual machine.
text: >
az security va sql baseline list --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.Compute/VirtualMachines/MyVmName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName
- name: View Sql Vulnerability Assessment baseline for all rules on an On-Premise machine.
text: >
az security va sql baseline list --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.OperationalInsights/Workspaces/MyWorkspaceName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --vm-name MyVmName --agent-id MyAgentId --vm-uuid MyVmUUID
"""
helps['security va sql baseline delete'] = """
type: command
short-summary: Delete Sql Vulnerability Assessment rule baseline.
examples:
- name: Delete Sql Vulnerability Assessment rule baseline on an Azure virtual machine.
text: >
az security va sql baseline delete --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.Compute/VirtualMachines/MyVmName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --rule-id VA9999
- name: Delete Sql Vulnerability Assessment rule baseline on an On-Premise machine.
text: >
az security va sql baseline delete --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.OperationalInsights/Workspaces/MyWorkspaceName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --vm-name MyVmName --agent-id MyAgentId --vm-uuid MyVmUUID --rule-id VA9999
"""
helps['security va sql baseline update'] = """
type: command
short-summary: Update Sql Vulnerability Assessment rule baseline. Replaces the current rule baseline.
examples:
- name: Update Sql Vulnerability Assessment rule baseline on an Azure virtual machine. Replaces the current rule baseline with latest scan results.
text: >
az security va sql baseline update --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.Compute/VirtualMachines/MyVmName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --rule-id VA9999 --latest
- name: Update Sql Vulnerability Assessment rule baseline on an Azure virtual machine. Replaces the current rule baseline with provided results.
text: >
az security va sql baseline update --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.Compute/VirtualMachines/MyVmName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --rule-id VA9999 --baseline Line1_Col1 Line1_Col2 --baseline Line2_Col1 Line2_Col2
- name: Update Sql Vulnerability Assessment rule baseline on an On-Premise machine. Replaces the current rule baseline with latest scan results.
text: >
az security va sql baseline update --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.OperationalInsights/Workspaces/MyWorkspaceName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --vm-name MyVmName --agent-id MyAgentId --vm-uuid MyVmUUID --rule-id VA9999 --latest
- name: Update Sql Vulnerability Assessment rule baseline on an On-Premise machine. Replaces the current rule baseline with provided results.
text: >
az security va sql baseline update --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.OperationalInsights/Workspaces/MyWorkspaceName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --vm-name MyVmName --agent-id MyAgentId --vm-uuid MyVmUUID --rule-id VA9999 --baseline Line1_Col1 Line1_Col2 --baseline Line2_Col1 Line2_Col2
"""
helps['security va sql baseline set'] = """
type: command
short-summary: Sets Sql Vulnerability Assessment baseline. Replaces the current baseline.
examples:
- name: Sets Sql Vulnerability Assessment baseline on an Azure virtual machine. Replaces the current baseline with latest scan results.
text: >
az security va sql baseline set --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.Compute/VirtualMachines/MyVmName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --latest
- name: Sets Sql Vulnerability Assessment baseline on an Azure virtual machine. Replaces the current baseline with provided results.
text: >
az security va sql baseline set --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.Compute/VirtualMachines/MyVmName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --baseline rule=VA9999 Line1_col1 Line1_col2 Line1_col3 --baseline rule=VA8888 Line1_col1 Line1_col2 --baseline rule=VA9999 Line2_col1 Line2_col2 Line2_col3
- name: Sets Sql Vulnerability Assessment baseline on an On-Premise machine. Replaces the current baseline with latest scan results.
text: >
az security va sql baseline set --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.OperationalInsights/Workspaces/MyWorkspaceName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --vm-name MyVmName --agent-id MyAgentId --vm-uuid MyVmUUID --latest
- name: Sets Sql Vulnerability Assessment baseline on an On-Premise machine. Replaces the current baseline with provided results.
text: >
az security va sql baseline set --vm-resource-id subscriptions/MySubscription/ResourceGroups/MyResourceGroup/Providers/Microsoft.OperationalInsights/Workspaces/MyWorkspaceName --workspace-id 00000000-0000-0000-0000-000000000000 --server-name MyServerName --database-name MyDbName --vm-name MyVmName --agent-id MyAgentId --vm-uuid MyVmUUID --baseline rule=VA9999 Line1_col1 Line1_col2 Line1_col3 --baseline rule=VA8888 Line1_col1 Line1_col2 --baseline rule=VA9999 Line2_col1 Line2_col2 Line2_col3
"""
helps['security auto-provisioning-setting'] = """
type: group
short-summary: View your auto provisioning settings.
"""
helps['security auto-provisioning-setting list'] = """
type: command
short-summary: List the auto provisioning settings.
examples:
- name: Get auto provisioning settings.
text: >
az security auto-provisioning-setting list
"""
helps['security auto-provisioning-setting show'] = """
type: command
short-summary: Shows an auto provisioning setting.
examples:
- name: Get an auto provisioning setting.
text: >
az security auto-provisioning-setting show -n "default"
"""
helps['security auto-provisioning-setting update'] = """
type: command
short-summary: Updates your automatic provisioning settings on the subscription.
examples:
- name: Turns on automatic provisioning on the subscription.
text: >
az security auto-provisioning-setting update -n "default" --auto-provision "On"
- name: Turns off automatic provisioning on the subscription.
text: >
az security auto-provisioning-setting update -n "default" --auto-provision "Off"
- name: Updates your automatic provisioning settings on the subscription. (autogenerated)
text: |
az security auto-provisioning-setting update --auto-provision "Off" --name "default" --subscription MySubscription
crafted: true
"""
helps['security contact'] = """
type: group
short-summary: View your security contacts.
"""
helps['security contact create'] = """
type: command
short-summary: Creates a security contact.
examples:
- name: Creates a security contact.
text: >
az security contact create -n "default1" --email 'john@contoso.com' --phone '(214)275-4038' --alert-notifications 'on' --alerts-admins 'on'
"""
helps['security contact delete'] = """
type: command
short-summary: Deletes a security contact.
examples:
- name: Deletes a security contact.
text: >
az security contact delete -n "default1"
"""
helps['security contact list'] = """
type: command
short-summary: List security contact.
examples:
- name: Get security contacts.
text: >
az security contact list
"""
helps['security contact show'] = """
type: command
short-summary: Shows a security contact.
examples:
- name: Get a security contact.
text: >
az security contact show -n "default1"
"""
helps['security discovered-security-solution'] = """
type: group
short-summary: View your discovered security solutions
"""
helps['security discovered-security-solution list'] = """
type: command
short-summary: List the discovered security solutions.
examples:
- name: Get discovered security solutions.
text: >
az security discovered-security-solution list
"""
helps['security discovered-security-solution show'] = """
type: command
short-summary: Shows a discovered security solution.
examples:
- name: Get a discovered security solution.
text: >
az security discovered-security-solution show -n ContosoWAF2 -g myService1
"""
helps['security external-security-solution'] = """
type: group
short-summary: View your external security solutions
"""
helps['security external-security-solution list'] = """
type: command
short-summary: List the external security solutions.
examples:
- name: Get external security solutions.
text: >
az security external-security-solution list
"""
helps['security external-security-solution show'] = """
type: command
short-summary: Shows an external security solution.
examples:
- name: Get an external security solution.
text: >
az security external-security-solution show -n aad_defaultworkspace-20ff7fc3-e762-44dd-bd96-b71116dcdc23-eus -g defaultresourcegroup-eus
"""
helps['security jit-policy'] = """
type: group
short-summary: Manage your Just in Time network access policies
"""
helps['security jit-policy list'] = """
type: command
short-summary: List your Just in Time network access policies.
examples:
- name: Get all the Just in Time network access policies.
text: >
az security jit-policy list
"""
helps['security jit-policy show'] = """
type: command
short-summary: Shows a Just in Time network access policy.
examples:
- name: Get a Just in Time network access policy.
text: >
az security jit-policy show -l northeurope -n default -g myService1
"""
helps['security location'] = """
type: group
short-summary: Shows the Azure Security Center Home region location.
"""
helps['security location list'] = """
type: command
short-summary: Shows the Azure Security Center Home region location.
examples:
- name: Shows the Azure Security Center Home region location.
text: >
az security location list
"""
helps['security location show'] = """
type: command
short-summary: Shows the Azure Security Center Home region location.
examples:
- name: Shows the Azure Security Center Home region location.
text: >
az security location show -n centralus
"""
helps['security pricing'] = """
type: group
short-summary: Enables managing the Azure Defender plan for the subscription
"""
helps['security pricing create'] = """
type: command
short-summary: Updates the Azure defender plan for the subscription.
examples:
- name: Updates the Azure defender plan for the subscription.
text: >
az security pricing create -n VirtualMachines --tier 'standard'
- name: Updates the Azure defender plan for the subscription. (autogenerated)
text: az security pricing create -n VirtualMachines --tier 'standard'
crafted: true
"""
helps['security pricing list'] = """
type: command
short-summary: Shows the Azure Defender plans for the subscription.
examples:
- name: Shows the Azure Defender plans for the subscription.
text: >
az security pricing list
"""
helps['security pricing show'] = """
type: command
short-summary: Shows the Azure Defender plan for the subscription
examples:
- name: Shows the Azure Defender plan for the subscription
text: >
az security pricing show -n VirtualMachines
"""
helps['security setting'] = """
type: group
short-summary: View your security settings.
"""
helps['security setting list'] = """
type: command
short-summary: List security settings.
examples:
- name: Get security settings.
text: >
az security setting list
"""
helps['security setting show'] = """
type: command
short-summary: Shows a security setting.
examples:
- name: Get a security setting.
text: >
az security setting show -n "MCAS"
"""
helps['security task'] = """
type: group
short-summary: View security tasks (recommendations).
"""
helps['security task list'] = """
type: command
short-summary: List security tasks (recommendations).
examples:
- name: Get security tasks (recommendations) on a subscription scope.
text: >
az security task list
- name: Get security tasks (recommendations) on a resource group scope.
text: >
az security task list -g "myRg"
"""
helps['security task show'] = """
type: command
short-summary: shows a security task (recommendation).
examples:
- name: Get a security task (recommendation) on a subscription scope.
text: >
az security task show -n "taskName"
- name: Get a security task (recommendation) on a resource group scope.
text: >
az security task show -g "myRg" -n "taskName"
"""
helps['security topology'] = """
type: group
short-summary: Shows the network topology in your subscription.
"""
helps['security topology list'] = """
type: command
short-summary: Shows the network topology in your subscription.
examples:
- name: Shows the network topology in your subscription.
text: >
az security topology list
"""
helps['security topology show'] = """
type: command
short-summary: Shows the network topology in your subscription.
examples:
- name: Shows the network topology in your subscription.
text: >
az security topology show -n default -g myService1
"""
helps['security workspace-setting'] = """
type: group
short-summary: Shows the workspace settings in your subscription - these settings let you control which workspace will hold your security data
"""
helps['security workspace-setting create'] = """
type: command
short-summary: Creates a workspace settings in your subscription - these settings let you control which workspace will hold your security data
examples:
- name: Creates a workspace settings in your subscription - these settings let you control which workspace will hold your security data
text: >
az security workspace-setting create -n default --target-workspace '/subscriptions/20ff7fc3-e762-44dd-bd96-b71116dcdc23/resourceGroups/myRg/providers/Microsoft.OperationalInsights/workspaces/myWorkspace'
"""
helps['security workspace-setting delete'] = """
type: command
short-summary: Deletes the workspace settings in your subscription - this will make the security events on the subscription be reported to the default workspace
examples:
- name: Deletes the workspace settings in your subscription - this will make the security events on the subscription be reported to the default workspace
text: >
az security workspace-setting delete -n default
"""
helps['security workspace-setting list'] = """
type: command
short-summary: Shows the workspace settings in your subscription - these settings let you control which workspace will hold your security data
examples:
- name: Shows the workspace settings in your subscription - these settings let you control which workspace will hold your security data
text: >
az security workspace-setting list
"""
helps['security workspace-setting show'] = """
type: command
short-summary: Shows the workspace settings in your subscription - these settings let you control which workspace will hold your security data
examples:
- name: Shows the workspace settings in your subscription - these settings let you control which workspace will hold your security data
text: >
az security workspace-setting show -n default
"""
helps['security assessment'] = """
type: group
short-summary: View your security assessment results.
"""
helps['security assessment create'] = """
type: command
short-summary: Creates a customer managed security assessment.
examples:
- name: Creates a security assessment.
text: >
az security assessment create -n '4fb6c0a0-1137-42c7-a1c7-4bd37c91de8d' --status-code 'unhealthy'
"""
helps['security assessment delete'] = """
type: command
short-summary: Deletes a security assessment.
examples:
- name: Deletes a security assessment.
text: >
az security assessment delete -n '4fb6c0a0-1137-42c7-a1c7-4bd37c91de8d'
"""
helps['security assessment list'] = """
type: command
short-summary: List all security assessment results.
examples:
- name: Get security assessments.
text: >
az security assessment list
"""
helps['security assessment show'] = """
type: command
short-summary: Shows a security assessment.
examples:
- name: Get a security assessment.
text: >
az security assessment show -n '4fb6c0a0-1137-42c7-a1c7-4bd37c91de8d'
"""
helps['security assessment-metadata'] = """
type: group
short-summary: View your security assessment metadata.
"""
helps['security assessment-metadata create'] = """
type: command
short-summary: Creates a customer managed security assessment type.
examples:
- name: Creates a security assessment type.
text: >
az security assessment-metadata create -n "4fb6c0a0-1137-42c7-a1c7-4bd37c91de8d" --display-name "Resource should be secured" --severity "high" --description "The resource should be secured according to my company security policy"
"""
helps['security assessment-metadata delete'] = """
type: command
short-summary: Deletes a security assessment type and all it's assessment results.
examples:
- name: Deletes a security assessment type.
text: >
az security assessment-metadata delete -n '4fb6c0a0-1137-42c7-a1c7-4bd37c91de8d'
"""
helps['security assessment-metadata list'] = """
type: command
short-summary: List all security assessment results.
examples:
- name: Get security assessment metadata.
text: >
az security assessment-metadata list
"""
helps['security assessment-metadata show'] = """
type: command
short-summary: Shows a security assessment.
examples:
- name: Get a security assessment metadata.
text: >
az security assessment-metadata show -n '4fb6c0a0-1137-42c7-a1c7-4bd37c91de8d'
"""
helps['security sub-assessment'] = """
type: group
short-summary: View your security sub assessments.
"""
helps['security sub-assessment list'] = """
type: command
short-summary: List all security sub assessment results.
examples:
- name: Get security sub assessments.
text: >
az security sub-assessment list
"""
helps['security sub-assessment show'] = """
type: command
short-summary: Shows a security sub assessment.
examples:
- name: Get a security sub assessment.
text: >
az security sub-assessment show --assessed-resource-id '/subscriptions/f8b197db-3b2b-4404-a3a3-0dfec293d0d0/resourceGroups/rg1/providers/Microsoft.Compute/virtualMachines/vm1' --assessment-name '4fb6c0a0-1137-42c7-a1c7-4bd37c91de8d' -n 'd7c4d9ec-227c-4fb3-acf9-25fdd97c1bf1'
"""
helps['security adaptive-application-controls'] = """
type: group
short-summary: Enable control which applications can run on your Azure and non-Azure machines (Windows and Linux)
"""
helps['security adaptive-application-controls list'] = """
type: command
short-summary: Adaptive Application Controls - List
examples:
- name: list all application control VM/server groups.
text: >
az security adaptive-application-controls list
"""
helps['security adaptive-application-controls show'] = """
type: command
short-summary: Adaptive Application Controls - Get
examples:
- name: Get a single application control VM/server group.
text: >
az security adaptive-application-controls show --group-name GROUP1
"""
helps['security allowed_connections'] = """
type: group
short-summary: View all possible traffic between resources for the subscription and location, based on connection type.
"""
helps['security allowed_connections list'] = """
type: command
short-summary: list of all possible traffic between resources for the subscription.
examples:
- name: Get possible traffic between resources at the subscription level.
text: >
az security allowed_connections list
"""
helps['security allowed_connections show'] = """
type: command
short-summary: List all possible traffic between resources for the subscription and location, based on connection type.
examples:
- name: Get all possible traffic between resources for the subscription and location, based on connection type.
text: >
az security allowed_connections show --name Internal --resource-group mygroup
"""
helps['security adaptive_network_hardenings'] = """
type: group
short-summary: View all Adaptive Network Hardening resources
"""
helps['security adaptive_network_hardenings list'] = """
type: command
short-summary: Gets a list of Adaptive Network Hardenings resources in scope of an extended resource.
examples:
- name: Adaptive Network Hardenings - List By Extended Resource
text: >
az security adaptive_network_hardenings list --resource-group 'RG1' --resource-type 'virtualMachines' --resource-namespace 'Microsoft.Compute' --resource-name 'VM1'
"""
helps['security adaptive_network_hardenings show'] = """
type: command
short-summary: Gets a single Adaptive Network Hardening resource.
examples:
- name: Adaptive Network Hardenings - Get.
text: >
az security adaptive_network_hardenings show --resource-group 'RG1' --resource-type 'virtualMachines' --resource-namespace 'Microsoft.Compute' --resource-name 'VM1' --adaptive-network-hardenings-resource-name 'default'
"""
helps['security iot-solution'] = """
type: group
short-summary: Manage your IoT Security solution.
"""
helps['security iot-solution create'] = """
type: command
short-summary: Create your IoT Security solution.
examples:
- name: create an IoT Security solution on existing IoT Hub.
text: >
az security iot-solution create --solution-name 'IoT-Hub1' --resource-group 'rg1' --iot-hubs /subscriptions/subscriptionId/resourcegroups/rg1/providers/Microsoft.Devices/IotHubs/IoT-Hub1 --display-name "Solution Default" --location "eastus"
"""
helps['security iot-solution update'] = """
type: command
short-summary: Update your IoT Security solution.
examples:
- name: Update your IoT Security solution.
text: >
az security iot-solution update --solution-name 'IoT-Hub1' --resource-group 'rg1' --iot-hubs /subscriptions/subscriptionId/resourcegroups/rg1/providers/Microsoft.Devices/IotHubs/IoT-Hub1 --display-name "Solution Default"
"""
helps['security iot-solution delete'] = """
type: command
short-summary: Delete your IoT Security solution.
examples:
- name: Delete an IoT Security solutions.
text: >
az security iot-solution delete --solution-name 'IoT-Hub1' --resource-group 'rg1'
"""
helps['security iot-solution show'] = """
type: command
short-summary: Shows a IoT Security solution.
examples:
- name: Get an IoT Security solutions.
text: >
az security iot-solution show --solution-name 'IoT-Hub1' --resource-group 'rg1'
"""
helps['security iot-solution list'] = """
type: command
short-summary: List all IoT Security solutions.
examples:
- name: Get List of all IoT Security solutions in subscription.
text: >
az security iot-solution list
"""
helps['security iot-analytics'] = """
type: group
short-summary: View IoT Security Analytics metrics.
"""
helps['security iot-analytics show'] = """
type: command
short-summary: Shows IoT Security Analytics metrics.
examples:
- name: Get an IoT Security Analytics metrics.
text: >
az security iot-analytics show --solution-name 'IoT-Hub1' --resource-group 'rg1'
"""
helps['security iot-analytics list'] = """
type: command
short-summary: List all IoT security Analytics metrics.
examples:
- name: Get List of all IoT security Analytics metrics.
text: >
az security iot-analytics list --solution-name 'IoT-Hub1' --resource-group 'rg1'
"""
helps['security iot-alerts'] = """
type: group
short-summary: View IoT Security aggregated alerts.
"""
helps['security iot-alerts delete'] = """
type: command
short-summary: Dismiss an aggregated IoT Security Alert.
examples:
- name: Dismiss an aggregated IoT Security Alert.
text: >
az security iot-alerts delete --solution-name 'IoT-Hub1' --resource-group 'rg1' --name 'IoT_CryptoMiner/2020-06-24'
"""
helps['security iot-alerts show'] = """
type: command
short-summary: Shows a single aggregated alert of yours IoT Security solution.
examples:
- name: Get an IoT Security solution aggregated alert.
text: >
az security iot-alerts show --solution-name 'IoT-Hub1' --resource-group 'rg1' --name 'IoT_CryptoMiner/2020-06-24'
"""
helps['security iot-alerts list'] = """
type: command
short-summary: List all yours IoT Security solution aggregated alerts.
examples:
- name: Get list of all IoT Security solution aggregated alerts.
text: >
az security iot-alerts list --solution-name 'IoT-Hub1' --resource-group 'rg1'
"""
helps['security iot-recommendations'] = """
type: group
short-summary: View IoT Security aggregated recommendations.
"""
helps['security iot-recommendations show'] = """
type: command
short-summary: Shows a single aggregated recommendation of yours IoT Security solution.
examples:
- name: Get an IoT Security solution aggregated recommendation.
text: >
az security iot-recommendations show --solution-name 'IoT-Hub1' --resource-group 'rg1' --name 'IoT_PermissiveFirewallPolicy'
"""
helps['security iot-recommendations list'] = """
type: command
short-summary: List all yours IoT Security solution aggregated recommendations.
examples:
- name: Get list of all IoT Security solution aggregated recommendations.
text: >
az security iot-recommendations list --solution-name 'IoT-Hub1' --resource-group 'rg1'
"""
helps['security regulatory-compliance-standards'] = """
type: group
short-summary: regulatory compliance standards.
"""
helps['security regulatory-compliance-standards list'] = """
type: command
short-summary: List supported regulatory compliance standards details and state results.
examples:
- name: Get regulatory compliance standards list.
text: >
az security regulatory-compliance-standards list
"""
helps['security regulatory-compliance-standards show'] = """
type: command
short-summary: Shows a regulatory compliance details state for selected standard.
examples:
- name: Get regulatory compliance standard details.
text: >
az security regulatory-compliance-standards show -n 'Azure-CIS-1.1.0'
"""
helps['security regulatory-compliance-controls'] = """
type: group
short-summary: regulatory compliance controls.
"""
helps['security regulatory-compliance-controls list'] = """
type: command
short-summary: List supported of regulatory compliance controls details and state for selected standard.
examples:
- name: Get regulatory compliance controls list.
text: >
az security regulatory-compliance-controls list --standard-name 'Azure-CIS-1.1.0'
"""
helps['security regulatory-compliance-controls show'] = """
type: command
short-summary: Shows a regulatory compliance details state for selected standard.
examples:
- name: Get selected regulatory compliance control details and state.
text: >
az security regulatory-compliance-controls show --standard-name 'Azure-CIS-1.1.0' -n '1.1'
"""
helps['security regulatory-compliance-assessments'] = """
type: group
short-summary: regulatory compliance assessments.
"""
helps['security regulatory-compliance-assessments list'] = """
type: command
short-summary: Get details and state of assessments mapped to selected regulatory compliance control.
examples:
- name: Get state of mapped assessments.
text: >
az security regulatory-compliance-assessments list --standard-name 'Azure-CIS-1.1.0' --control-name '1.1'
"""
helps['security regulatory-compliance-assessments show'] = """
type: command
short-summary: Shows supported regulatory compliance details and state for selected assessment.
examples:
- name: Get selected regulatory compliance control details and state.
text: >
az security regulatory-compliance-assessments show --standard-name 'Azure-CIS-1.1.0' --control-name '1.1' -n '94290b00-4d0c-d7b4-7cea-064a9554e681'
"""
helps['security secure-scores'] = """
type: group
short-summary: secure scores.
"""
helps['security secure-scores list'] = """
type: command
short-summary: List of secure-scores details and state results.
examples:
- name: Get secure scores list.
text: >
az security secure-scores list
"""
helps['security secure-scores show'] = """
type: command
short-summary: Shows a secure score details for selected initiative.
examples:
- name: Get secure score details.
text: >
az security secure-scores show -n 'ascScore'
"""
helps['security secure-score-controls'] = """
type: group
short-summary: secure score controls.
"""
helps['security secure-score-controls list'] = """
type: command
short-summary: List supported of secure score controls details and state for scope.
examples:
- name: Get secure score controls list.
text: >
az security secure-score-controls list
"""
helps['security secure-score-controls list_by_score'] = """
type: command
short-summary: List supported of secure score controls details and state for selected score.
examples:
- name: Get secure score controls list.
text: >
az security secure-score-controls list_by_score -n 'ascScore'
"""
helps['security secure-score-control-definitions'] = """
type: group
short-summary: secure score control definitions.
"""
helps['security secure-score-control-definitions list'] = """
type: command
short-summary: Get details of secure score control definitions.
examples:
- name: Get secure score control definitions.
text: >
az security secure-score-control-definitions list
"""
|
mit
| -4,440,880,175,984,025,600 | 39.371574 | 503 | 0.744832 | false |
ComputoCienciasUniandes/MetodosComputacionalesLaboratorio
|
2017-1/lab8_EJ3/lab8SOL_eJ3/spring_mass.py
|
1
|
1084
|
import numpy as np
import matplotlib.pyplot as plt
N = 5000 #number of steps to take
xo = 0.2 #initial position in m
vo = 0.0 #initial velocity
tau = 4.0 #total time for the simulation in s .
dt = tau/float(N) # time step
k = 42.0 #spring constant in N/m
m = 0.25 #mass in kg
g = 9.8 #in m/ s ^2
mu = 0.15 #friction coefficient
y = np.zeros([N,2])
#y is the vector of positions and velocities.
y[0,0] = xo #initial position
y[0,1] = vo #initial velocity
#This function defines the derivatives of the system.
def SpringMass(state,time) :
g0=state[1]
if g0 > 0 :
g1=-k/m*state[0]-g*mu
else:
g1=-k/m*state[0]+g*mu
return np.array([g0,g1])
#This is the basic step in the Euler Method for solving ODEs.
def euler (y,time,dt,derivs) :
k0 = dt*derivs(y,time)
ynext = y + k0
return ynext
for j in range (N-1):
y[j+1] = euler(y[j],0,dt,SpringMass)
#Just to plot
time = np.linspace(0,tau,N)
plt.plot(time, y[:,0],'b',label="position")
plt.xlabel( "time" )
plt.ylabel( "position" )
plt.savefig('spring_mass.png')
|
mit
| -9,078,993,478,934,890,000 | 20.27451 | 61 | 0.631919 | false |
brabsmit/home-control
|
homecontrol/docs/conf.py
|
1
|
9338
|
# -*- coding: utf-8 -*-
#
# django-homecontrol documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 13 17:28:03 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
from django.conf import settings
settings.configure()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
source_suffix = ['.rst']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-homecontrol'
copyright = u'2015, Bryan Smith'
author = u'Bryan Smith'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-homecontroldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'django-homecontrol.tex', u'django-homecontrol Documentation',
u'Bryan Smith', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'django-homecontrol', u'django-homecontrol Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'django-homecontrol', u'django-homecontrol Documentation',
author, 'django-homecontrol', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
mit
| -3,928,595,788,817,941,500 | 31.423611 | 79 | 0.709681 | false |
leonevo/euao
|
BaseAdapter/BaseAdapter.py
|
1
|
39358
|
#-*- coding: utf-8 -*-
from optparse import OptionParser
import sys
import os
root_dir=os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(root_dir)
import CommonDefinition
import subprocess, os, time, signal
import telnetlib
import paramiko
from os import linesep
import re
from time import sleep
import datetime
import platform
import commands
#log setting
import logging
myloger=logging.getLogger(CommonDefinition.loggerName)
import EUAOSSHClient
import traceback
import sys
def trace_back():
try:
return traceback.format_exc()
except:
return 'trace back error.'
def Add(a,b,c):
"""just for test multithread
return a+b
sleep c
"""
sleep(c)
return a+b
def ExecuteCMDviaTelnet(host,user,passwd,command,port=23,**kwargs):
"""
Run command via telnet, return command status and command output
Required:
host: target server hostname
user: target server username
passwd: target server password
command: command that need to execute
Optional:
port: target server telnet port
connect_timeout: second
command_timeout: second
login_prompt: the word before typing username
passwd_prompt: the word before typing passwd
cmd_prompt: the char/word before typing command
return:
exitCode:
0: success
1: connection error
2: command error
commandOutput: output
"""
if CommonDefinition.simulation:
# 2013.1.9 vioserver2 down, skip this server
#if CommonDefinition.simulation or host=='182.247.251.219':
myloger.info('simulation: telnet to %s to exec cmd: %s' % (host,command))
exit_code=0
output='ok'
sleep(CommonDefinition.simulation_sleep_sec)
return exit_code,output
#initiate the args
login_prompt=kwargs.get('login_prompt','login:')
passwd_prompt=kwargs.get('passwd_prompt','assword:')
cmd_prompt=kwargs.get('cmd_prompt','#')
connect_timeout=kwargs.get('connect_timeout',60)
command_timeout=kwargs.get('command_timeout',60)
log=kwargs.get('log',True)
get_exit_code=kwargs.get('get_exit_code',True)
if log:
myloger.debug(linesep
+"IP:"+host+" User:"+user+" Password:"+passwd+" Port:"+str(port)+" Connection timeout:"+str(connect_timeout)
+linesep
+"login prompt:"+login_prompt+" password prompt:"+passwd_prompt+" Command:"+command
+linesep)
try:
tn=telnetlib.Telnet(host,port,connect_timeout)
except EOFError,e:
if log:
myloger.error("Telnet cannot open "+host+":"+str(port))
commandExitCode=3
commandOutput='Error'
#tn.close()
return commandExitCode,commandOutput
except Exception,e:
if log:
myloger.error("Telnet cannot open %s : %s, %s" % (host,str(type(e)),+e.args[0]))
commandExitCode=3
commandOutput='Error'
#tn.close()
return commandExitCode,commandOutput
else:
if log:
myloger.debug('HOST: %s connected, need to login in.' % host)
try:
#tn.read_until(login_prompt,connect_timeout)
index,match,data=tn.expect([GetLastPrompt(login_prompt)],connect_timeout)
if index!=0:
if log:
myloger.error("can not wait for %s in %d seconds, the output until now is: [%s]" % (login_prompt,connect_timeout,data))
commandExitCode=3
commandOutput="Error"
tn.close()
return commandExitCode,commandOutput
myloger.debug('User: %s' % user)
tn.write(user+'\r\n')
index,match,data=tn.expect([GetLastPrompt(passwd_prompt)],connect_timeout)
if index==-1:
if log:
myloger.error("can not wait for %s in %d seconds, the output until now is: [%s]" % (passwd_prompt,connect_timeout,data))
commandExitCode=3
commandOutput="Error"
tn.close()
return commandExitCode,commandOutput
#tn.read_until(passwd_prompt,connect_timeout)
if log:
myloger.debug('%s OK, need password.' % user)
tn.write(passwd+'\r\n')
if log:
myloger.debug('password sent.')
index,match,data=tn.expect([GetRegObjLastPrompt(cmd_prompt)],connect_timeout)
if index==-1:
if log:
myloger.error("can not wait for %s in %d seconds, the output until now is: [%s]" % (cmd_prompt,connect_timeout,data))
commandExitCode=3
commandOutput="Error"
tn.close()
return commandExitCode,commandOutput
if log:
myloger.debug('Password OK, ready to execute command')
tn.write(command+'\r\n')
if log:
myloger.debug('Command: [%s] sent.' % command)
index,match,data=tn.expect([GetRegObjLastPrompt(cmd_prompt)],command_timeout)
if index==-1:
if log:
myloger.error('can not wait for %s in %d seconds, the output until now is: [%s]' % (cmd_prompt,connect_timeout,data))
commandExitCode=3
commandOutput="Error"
tn.close()
return commandExitCode,commandOutput
#commandOutput=data.split('\r\n')[1]
commandOutput=GetCommandOutput(data)
if get_exit_code:
#get command exit code
tn.write("echo $?\r\n")
#commandExitCode=tn.read_until(cmd_prompt,command_timeout)
index,match,data=tn.expect([GetRegObjLastPrompt(cmd_prompt)],command_timeout)
if index==-1:
if log:
myloger.error("Error in getting command: %s exit code. Return data: %s." % (command,data))
commandExitCode=3
else:
commandExitCode=int(data.split('\r\n')[1])
if log:
myloger.debug("ExitCode: %s. Command output: %s." % (commandExitCode,commandOutput))
else:
commandExitCode=0
except EOFError,e:
if log:
myloger.error("Can't read data")
commandExitCode=3
commandOutput='Error'
except Exception,e:
commandExitCode=3
commandOutput='Error'
if log:
myloger.error("Error: "+str(type(e))+","+e.args[0])
tn.close()
return commandExitCode,commandOutput
def ExecuteSimpleCMDviaSSH2(host,user,passwd,command,port=22,connect_timeout=60,command_timeout=60,return_list=False):
"""
For Simple command with single line ouput.
Run command via SSH2, return command status and command output
Required:
host: target server hostname
user: target server username
passwd: target server password
command: command that need to execute
Optional:
connect_timeout:
port: target server telnet port
command_timeout: second
return:
exitCode:
0: success
1: connection error
2: command error
commandOutput: output
"""
if CommonDefinition.simulation:
myloger.info('ssh to %s to exec cmd: %s' % (host,command))
exitCode=0
if not return_list:
cmdOutput='ok'
else:
cmdOutput=['ok']
sleep(CommonDefinition.simulation_sleep_sec)
return exitCode,cmdOutput
myloger.debug("IP: %s, user: %s, password: %s, command: %s, connect_timeout: %d." % (host,user,passwd,command,connect_timeout))
try:
#ssh_client=paramiko.SSHClient()
ssh_client=EUAOSSHClient.EUAOSSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(host,port=port,username=user,password=passwd,timeout=connect_timeout)
#ssh_client.exec_command(command,timeout=20.0)
i,o,e=ssh_client.exec_command(command,timeout=command_timeout)
if i and o and e:
error_readlines=e.readlines()
if error_readlines==[]:
# commandOutput=''.join(o.readlines()).lstrip().replace('\n','')
output_readlines=o.readlines()
if output_readlines==[]:
commandOutput=''
else:
output_readlines=remove_output_list_return(output_readlines)
if not return_list:
commandOutput=GetSimpleSSHCommandOutput(output_readlines)
else:
commandOutput=output_readlines
myloger.debug('Command executed successfully. Response is: %s' % commandOutput)
commandExitCode=0
else:
myloger.debug('Error in executing command %s: %s.' % (command,error_readlines))
commandOutput=error_readlines
commandExitCode=3
else:
myloger.debug('Error in connection while executing command: %s.' % command)
if not return_list:
commandOutput=''
else: commandOutput=[]
commandExitCode=3
return commandExitCode,commandOutput
except Exception,e:
myloger.error("SSH Adapter cannot connect to "+host+" : "+str(type(e)))
myloger.debug(trace_back())
commandExitCode=3
if not return_list:
commandOutput='Error'
else:
commandOutput=['Error']
ssh_client.close()
return commandExitCode,commandOutput
def ExecuteCMDviaSSH2(host,user,passwd,command,port=22,**kwargs):
"""
Run command via SSH2, return command status and command output
Required:
host: target server hostname
user: target server username
passwd: target server password
command: command that need to execute
Optional:
port: target server telnet port
connect_timeout: second
command_timeout: second
cmd_prompt: the char/word before typing command
return:
exitCode:
0: success
1: connection error
2: command error
commandOutput: output
"""
if CommonDefinition.simulation:
myloger.info('ssh to %s to exec cmd: %s' % (host,command))
exitCode=0
cmdOutput='ok'
sleep(CommonDefinition.simulation_sleep_sec)
return exitCode,cmdOutput
#initiate the args
# login_prompt=kwargs.get('login_prompt','login:')
# passwd_prompt=kwargs.get('passwd_prompt','assword:')
cmd_prompt=kwargs.get('cmd_prompt','#')
connect_timeout=kwargs.get('connect_timeout',60)
command_timeout=kwargs.get('command_timeout',60)
myloger.debug(linesep
+"IP:"+host+" User:"+user+" Password:"+passwd+" Port:"+str(port)+" Connection timeout:"+str(connect_timeout)
+linesep
+" Command:"+command
+linesep)
try:
ssh_client=paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(host,port=port,username=user,password=passwd,timeout=connect_timeout)
except Exception,e:
myloger.error("SSH Adapter cannot connect to "+host+" : "+str(type(e))+","+e.args[0])
commandExitCode=3
commandOutput='Error'
ssh_client.close()
return commandExitCode,commandOutput
else:
myloger.debug('HOST: %s connected, User %s login' % (host,user))
try:
ssh_channel=ssh_client.get_transport().open_session()
ssh_channel.setblocking(0)
ssh_channel.get_pty()
ssh_channel.invoke_shell()
ssh_channel.settimeout(command_timeout)
#read the login info before send command
login_info=""
starttime=datetime.datetime.now()
while ssh_channel.recv_ready() or login_info=="":
tmpRecv=ssh_channel.recv(-1)
if tmpRecv!='':
login_info+=tmpRecv
#print "login_info: %s" % login_info
if GetRegObjLastPrompt(cmd_prompt).match(login_info.splitlines()[-1]):
break
time_delta=datetime.datetime.now()-starttime
if time_delta.total_seconds()>connect_timeout:
ssh_client.close()
ssh_channel.close()
myloger.error('Connection timeout.')
commandExitCode=1
commandOutput='Error'
return commandExitCode,commandOutput
sleep(0.1)
login_info=ReplaceTermControlChar(login_info)
myloger.debug("Login Info: %s" % login_info)
if ssh_channel.send_ready():
# ready to send command
ssh_channel.send(command+'\r')
myloger.debug("Send command %s." % command)
command_response=""
starttime=datetime.datetime.now()
while not ssh_channel.closed and ssh_channel.recv_ready():
tmpRecv=ssh_channel.recv(-1)
if tmpRecv!='':
command_response+=tmpRecv
if GetRegObjLastPrompt(cmd_prompt).match(command_response.splitlines()[-1]):
break
time_delta=datetime.datetime.now()-starttime
if time_delta.total_seconds()>command_timeout:
myloger.error('Command timeout. Maybe the connection error or the command_timeout is not long enough for this command to be executed.')
ssh_client.close()
ssh_channel.close()
commandExitCode=3
commandOutput='Error'
return commandExitCode,commandOutput
sleep(0.1)
#get command exit code
ssh_channel.send('echo $?\r')
command_exit_code=""
starttime=datetime.datetime.now()
while not ssh_channel.closed and ssh_channel.recv_ready():
tmpRecv=ssh_channel.recv(-1)
if tmpRecv!='':
command_exit_code+=tmpRecv
if GetRegObjLastPrompt(cmd_prompt).match(command_exit_code.splitlines()[-1]):
break
time_delta=datetime.datetime.now()-starttime
if time_delta.total_seconds()>command_timeout:
myloger.error('Command timeout. Maybe the connection error.')
ssh_client.close()
ssh_channel.close()
commandExitCode=3
commandOutput='Error'
return commandExitCode,commandOutput
sleep(0.1)
commandOutput=ReplaceTermControlChar(GetCommandOutput(command_response))
commandExitCode=int(command_exit_code.splitlines()[1])
myloger.debug("Command exit code: %s." % commandExitCode)
myloger.debug("Command response: %s." % commandOutput)
ssh_channel.close()
ssh_client.close()
return commandExitCode,commandOutput
else:
myloger.error('SSH Transport Channel is not ready to send command.')
ssh_client.close()
ssh_channel.close()
commandExitCode=3
commandOutput='Error'
return commandExitCode,commandOutput
except Exception,e:
myloger.error("SSH Adapter error in executing command: %s." % command)
myloger.error("Error Message: "+str(type(e))+","+e.args[0])
ssh_client.close()
ssh_channel.close()
commandExitCode=3
commandOutput='Error'
return commandExitCode,commandOutput
def ExecuteMultiCMDsviaSSH2(host,user,passwd,commands,port=22,**kwargs):
"""
Run command via SSH2, return command status and command output
Required:
host: target server hostname
user: target server username
passwd: target server password
commands: commands,prompts,timeout that need to execute
Optional:
port: target server telnet port
connect_timeout: second
command_timeout: second
cmd_prompt: the char/word before typing command
return:
exitCode:
0: success
1: connection error
2: command error
commandOutput: output
"""
if CommonDefinition.simulation:
myloger.info('ssh to %s to exec cmd: %s' % (host,commands))
exitCode=0
cmdOutput='ok'
sleep(CommonDefinition.simulation_sleep_sec)
return exitCode,cmdOutput
#initiate the args
# login_prompt=kwargs.get('login_prompt','login:')
# passwd_prompt=kwargs.get('passwd_prompt','assword:')
cmd_prompt=kwargs.get('cmd_prompt','#')
connect_timeout=kwargs.get('connect_timeout',60)
command_timeout=kwargs.get('command_timeout',60)
CommandOutputlist=[]
CommandExitCodelist=[]
myloger.debug("IP: %s, User: %s, Passsword: %s, Port: %d, Connection timeout: %d, Commands: %s" % (host,user,passwd,port,connect_timeout,commands))
try:
ssh_client=paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(host,port=port,username=user,password=passwd,timeout=connect_timeout)
except Exception,e:
myloger.error("SSH Adapter cannot connect to "+host+" : "+str(type(e))+","+e.args[0])
commandExitCode=3
commandOutput='Error'
ssh_client.close()
return commandExitCode,commandOutput
else:
myloger.debug('HOST: %s connected, User %s login' % (host,user))
try:
ssh_channel=ssh_client.get_transport().open_session()
ssh_channel.setblocking(0)
ssh_channel.get_pty()
ssh_channel.invoke_shell()
ssh_channel.settimeout(command_timeout)
#read the login info before send command
login_info=""
starttime=datetime.datetime.now()
while ssh_channel.recv_ready() or login_info=="":
tmpRecv=ssh_channel.recv(-1)
if tmpRecv!='':
login_info+=tmpRecv
print "login_info: %s" % login_info
if GetRegObjLastPrompt(cmd_prompt).match(login_info.splitlines()[-1]):
break
time_delta=datetime.datetime.now()-starttime
if time_delta.total_seconds()>connect_timeout:
ssh_client.close()
ssh_channel.close()
myloger.error('Connection timeout.')
commandExitCode=1
commandOutput='Error'
return commandExitCode,commandOutput
sleep(0.1)
login_info=ReplaceTermControlChar(login_info)
myloger.debug("Login Info: %s" % login_info)
if ssh_channel.send_ready():
for command in commands:
# ready to send command
if not ssh_channel.closed:
myloger.debug('Command, prompt and timeout: %s' % command)
subcommand_count=len(command)
if subcommand_count%3!=0:
myloger.error('Commands Error, command, prompt and command timeout do not appears together')
#ErrorExit
CommandExitCodelist.append(3)
CommandOutputlist.append(['Error'])
ssh_channel.close()
ssh_client.close()
return CommandExitCodelist,CommandOutputlist
cmd_output=[]
for i in range(0,subcommand_count,3):
cmd=command[i]
prt=command[i+1]
cmd_time_out=command[i+2]
ssh_channel.send(cmd+'\r')
myloger.debug("Send command %s." % cmd)
command_response=""
starttime=datetime.datetime.now()
while not ssh_channel.closed and ssh_channel.recv_ready():
tmpRecv=ssh_channel.recv(-1)
if tmpRecv!='':
command_response+=tmpRecv
if GetRegObjLastPrompt(prt).match(command_response.splitlines()[-1]):
break
time_delta=datetime.datetime.now()-starttime
if time_delta.total_seconds()>cmd_time_out:
#command time out
myloger.error('Command timeout. Maybe the connection error or the command time out is too short to wait for the command response.')
CommandExitCodelist.append(3)
CommandOutputlist.append(['Error'])
ssh_channel.close()
ssh_client.close()
return CommandExitCodelist,CommandOutputlist
sleep(0.1)
#get command exit code
ssh_channel.send('echo $?\r')
command_exit_code=""
starttime=datetime.datetime.now()
while not ssh_channel.closed and ssh_channel.recv_ready():
tmpRecv=ssh_channel.recv(-1)
if tmpRecv!='':
command_exit_code+=tmpRecv
if GetRegObjLastPrompt(prt).match(command_exit_code.splitlines()[-1]):
break
time_delta=datetime.datetime.now()-starttime
if time_delta.total_seconds()>cmd_time_out:
#command time out
myloger.error('Command timeout. Maybe the connection error.')
CommandExitCodelist.append(3)
CommandOutputlist.append(['Error'])
ssh_channel.close()
ssh_client.close()
return CommandExitCodelist,CommandOutputlist
sleep(0.1)
commandOutput=ReplaceTermControlChar(GetCommandOutput(command_response))
commandExitCode=int(command_exit_code.splitlines()[1])
CommandOutputlist.append(commandOutput)
CommandExitCodelist.append(commandExitCode)
myloger.debug("Command exit code: %s." % commandExitCode)
myloger.debug("Command response: %s." % commandOutput)
else:
myloger.error('SSH Channel closed by foreign reason')
return CommandExitCodelist,CommandOutputlist
ssh_channel.close()
ssh_client.close()
return CommandExitCodelist,CommandOutputlist
else:
myloger.error('SSH Transport Channel is not ready to send command.')
ssh_client.close()
#ErrorExit
CommandExitCodelist.append(3)
CommandOutputlist.append(['Error'])
ssh_channel.close()
ssh_client.close()
return CommandExitCodelist,CommandOutputlist
except Exception,e:
myloger.error("SSH Adapter error in executing command: %s." % command)
myloger.error("Error Message: "+str(type(e))+","+e.args[0])
#ErrorExit
CommandExitCodelist.append(3)
CommandOutputlist.append(['Error'])
ssh_channel.close()
ssh_client.close()
return CommandExitCodelist,CommandOutputlist
def ExecuteMultiCMDsviaTelnet(host,user,passwd,commands,port=23,**kwargs):
"""
Run command via telnet, return command status and command output
Required:
host: target server hostname
user: target server username
passwd: target server password
commands: commands and prompts pair,
#subcommand format:[[command,prompt,timeout],[str,str,int]]
#example: [['ls','$',5],['pwd','$',5],['passwd','password:',5,'123','password:',5,'123','$',5]]
each item in commands is a list, each list contains pairs of command or subcommand and their prompts and timeout
after command/subcommand was successfully executed.
Optional:
port: target server telnet port
connect_timeout: second
command_timeout: second
login_prompt: the word before typing username
passwd_prompt: the word before typing passwd
cmd_prompt: the char/word before typing command
return:
exitCode:
0: success
1: connection error
2: command error
commandOutput: output
"""
if CommonDefinition.simulation:
myloger.info('telnet to %s to exec cmd: %s' % (host,commands))
exitCode=[0,0,0]
cmdOutput=[['cmd 1 ok\r\n$ '], ['cmd 2 ok\r\n$ '], ['cmd 3 ok\r\n$ ']]
sleep(CommonDefinition.simulation_sleep_sec)
return exitCode,cmdOutput
#initiate the args
login_prompt=kwargs.get('login_prompt','login:')
passwd_prompt=kwargs.get('passwd_prompt','assword:')
cmd_prompt=kwargs.get('cmd_prompt','#')
connect_timeout=kwargs.get('connect_timeout',60)
command_timeout=kwargs.get('command_timeout',60)
CommandOutputlist=[]
CommandExitCodelist=[]
myloger.debug("IP: %s, User: %s, Passsword: %s, Port: %d, Connection timeout: %d, login prompt: %s, password prompt: %s, Commands: %s" % (host,user,passwd,port,connect_timeout,login_prompt,passwd_prompt,commands))
try:
tn=telnetlib.Telnet(host,port,connect_timeout)
except EOFError,e:
myloger.error("Telnet cannot open "+host+":"+str(port))
CommandExitCodelist.append(3)
CommandOutputlist.append(['Error'])
tn.close()
return CommandExitCodelist,CommandOutputlist
except Exception,e:
myloger.error("Telnet cannot open "+host+" : "+str(type(e))+","+e.args[0])
CommandExitCodelist.append(3)
CommandOutputlist.append(['Error'])
tn.close()
return CommandExitCodelist,CommandOutputlist
else:
myloger.debug('HOST: %s connected, need to login in.' % host)
try:
#tn.read_until(login_prompt,connect_timeout)
index,match,data=tn.expect([GetLastPrompt(login_prompt)],connect_timeout)
if index!=0:
myloger.error("can not wait for %s in %d seconds, the output until now is: [%s]" % (login_prompt,connect_timeout,data))
CommandExitCodelist.append(3)
CommandOutputlist.append(['Error'])
tn.close()
return CommandExitCodelist,CommandOutputlist
myloger.debug('User: %s' % user)
tn.write(user+'\r\n')
index,match,data=tn.expect([GetLastPrompt(passwd_prompt)],connect_timeout)
if index==-1:
myloger.error("can not wait for %s in %d seconds, the output until now is: [%s]" % (passwd_prompt,connect_timeout,data))
CommandExitCodelist.append(3)
CommandOutputlist.append(["Error"])
tn.close()
return CommandExitCodelist,CommandOutputlist
#tn.read_until(passwd_prompt,connect_timeout)
myloger.debug('%s OK, need password.' % user)
tn.write(passwd+'\r\n')
myloger.debug('password sent.')
index,match,data=tn.expect([GetRegObjLastPrompt(cmd_prompt)],connect_timeout)
if index==-1:
myloger.error("can not wait for %s in %d seconds, the output until now is: [%s]" % (cmd_prompt,connect_timeout,data))
CommandExitCodelist.append(3)
CommandOutputlist.append(['Error'])
tn.close()
return CommandExitCodelist,CommandOutputlist
myloger.debug('Password OK, ready to execute command')
#commands
for command in commands:
myloger.debug('Command and prompt: %s' % command)
#subcommand format:[[command,prompt,timeout],[str,str,int]]
#example: [['ls','$',5],['pwd','$',5],['passwd','password:',5,'123','password:',5,'123','$',5]]
subcommand_count=len(command)
if subcommand_count%3!=0:
myloger.error('Commands Error, command, prompts and command timeout do not appear in pairs')
#ErrorExit
CommandExitCodelist.append(3)
CommandOutputlist.append(['Error'])
tn.close()
return CommandExitCodelist,CommandOutputlist
cmd_output=[]
for i in range(0,subcommand_count,3):
cmd=command[i]
prt=command[i+1]
cmd_time_out=command[i+2]
tn.write(cmd+'\r\n')
myloger.debug('Commands: %s sent.' % cmd)
index,match,data=tn.expect([GetRegObjLastPrompt(prt)],cmd_time_out)
if index==-1:
myloger.error('can not wait for %s in %d seconds, the output until now is: [%s]' % (cmd_prompt,connect_timeout,data))
CommandExitCodelist.append(3)
CommandOutputlist.append(['Error'])
tn.close()
return CommandExitCodelist,CommandOutputlist
myloger.debug("%s output: %s" % (cmd,data))
cmd_output.append(data)
CommandOutputlist.append(cmd_output)
#get command exit code
tn.write("echo $?\r\n")
index,match,data=tn.expect([GetRegObjLastPrompt(cmd_prompt)],command_timeout)
if index==-1:
ErrStr="Error in getting command: %s exit code. Return data: %s." % (command,data)
myloger.error(ErrStr)
CommandOutputlist.append([ErrStr])
CommandExitCodelist.append(3)
return CommandExitCodelist,CommandOutputlist
else:
commandExitCode=int(data.split('\r\n')[1])
CommandExitCodelist.append(commandExitCode)
myloger.debug("ExitCode: %s, Command output: %s." % (commandExitCode,cmd_output))
except EOFError,e:
myloger.error("Can't read data")
CommandExitCodelist.append(3)
CommandOutputlist.append(['Error: Can not read data.'])
except Exception,e:
commandExitCode=3
CommandOutputlist.append(['Error'])
myloger.error("Error: "+str(type(e))+","+e.args[0])
tn.close()
return CommandExitCodelist,CommandOutputlist
def GetLastPrompt(prompt):
"""
form a regular express string: '.*'+prompt+'\ $'
"""
return '.*'+prompt+'\ $'
def GetRegObjLastPrompt(prompt):
"""
Use RegExp mutiline mode, for some first login and some command with multiline output
"""
if prompt.find('$')>=0:
prompt=prompt.replace('$','\$')
return re.compile('^.*'+prompt+'\ ?$',re.MULTILINE)
def remove_output_list_return(data):
return_list=[]
for item in data:
return_list.append(item.replace('\n',''))
return return_list
def GetSimpleSSHCommandOutput(data):
"""
here data is a list.
"""
data_item_count=len(data)
if data_item_count>1:
str_linesep=CommonDefinition.line_sep
data_str=''
for i in range(data_item_count-1):
data_str+=data[i]
data_str+=str_linesep
data_str+=data[data_item_count-1]
return data_str
elif data_item_count==1:
return data[0]
else:
return ''
def GetCommandOutput(data):
"""
remove the first and last line,return the raw output
data: str
return data_output
*** Notice ***
This function works in low efficiency, it should be replace in the future.
"""
str_linesep='\r\n'
data_list=str(data).split(str_linesep)
if len(data_list)>=3:
data_list=data_list[1:-1]
return_str=data_list[0]
for item in data_list[1:]:
return_str=return_str+str_linesep
return_str=return_str+item
return return_str
elif len(data_list)==2:
return ''
else:
return data
#return ''.join(str(data).split('\r\n')[1:-1])
def ReplaceTermControlChar(data):
"""
remore the Terminal Control Characters
"""
# for linux
strinfo=re.compile('\\x1b.{1,6}?m')
return strinfo.sub('',data)
def ExecuteSimpleCMDLocal(cmd):
if CommonDefinition.simulation:
myloger.info('local exec cmd: %s' % commands)
exitCode=0
cmdOutput='ok'
sleep(CommonDefinition.simulation_sleep_sec)
return exitCode,cmdOutput
myloger.debug('Local command: %s' % cmd)
platform_system=platform.system()
if platform_system=='Windows':
return os.system(cmd),''
elif platform_system=='Linux':
return commands.getstatusoutput(cmd)
def ExecuteCMDLocal(cmd,timeout=5):
"""
timeout: seconds
"""
if CommonDefinition.simulation:
myloger.info('local exec cmd: %s' % commands)
exitCode=0
cmdOutput='ok'
sleep(CommonDefinition.simulation_sleep_sec)
return exitCode,cmdOutput
myloger.debug('Local command: %s' % cmd)
platform_system=platform.system()
if platform_system=='Windows':
myloger.debug('Execute command in local cmd: %s' % cmd)
try:
output=os.popen(cmd)
CommandOutput=output.read()
myloger.debug('Command Output: %s' % CommandOutput)
o=os.popen('echo %errorlevel%')
ExitCode=o.read()
print ExitCode,CommandOutput
except Exception,e:
myloger.error('Error while executing command \'%s\' in local CMD. %s, %s' % (cmd,str(type(e)),e.args[0]))
ExitCode=3
return ExitCode,''
elif platform_system=='Linux':
cmds = cmd.split(" ")
start = datetime.datetime.now()
try:
process = subprocess.Popen(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while process.poll() is None:
time.sleep(0.2)
now = datetime.datetime.now()
if (now - start).seconds> timeout:
os.kill(process.pid, signal.SIGILL)
"""
waitpid(pid, options) -> (pid, status << 8)
Wait for completion of a given process. options is ignored on Windows.
"""
os.waitpid(-1,0)
ExitCode=3
Output='Execute command \'%s\' Timeout. Timeout is %d second' % (cmd,timeout)
return ExitCode,Output
Output=process.stdout.readlines()
except Exception,e:
myloger.error('Error while executing command \'%s\' in local CMD. %s, %s' % (cmd,str(type(e)),e.args[0]))
ExitCode=3
return ExitCode,''
o=os.popen('echo %errorlevel%')
ExitCode=o.read()
print ExitCode,Output
return ExitCode,Output
else:
errorMsg='EUAO Base Adapter ExecuteCMDLocal Function does no support Platform: %s' % platform_system
myloger.error(errorMsg)
ExitCode=3
return ExitCode,errorMsg
if __name__ == '__main__':
#ExecuteCMDviaTelnet('182.247.251.215','padmin','padmin','pwd',cmd_prompt='$')
"""
ExecuteCMDLocal('vol',timeout=4)
ExecuteCMDLocal('java -jar',timeout=4)
"""
"""
username='padmin'
passwd='padmin'
cmd_list=[['cp .sh_history a','$',5],['cp .sh_history b','$',5],['cp .sh_history c','$',5]]
exit_code,output=ExecuteMultiCMDsviaTelnet('182.247.251.219',username,passwd,cmd_list,port=23,cmd_prompt='$')
print exit_code
print output
"""
exit_code,result=ExecuteSimpleCMDviaSSH2('182.247.251.247','hscroot','abc1234','pwd')
print exit_code
print result
|
apache-2.0
| 7,493,101,361,177,667,000 | 41.631929 | 217 | 0.530618 | false |
ximepa/docp
|
planning/migrations/0001_initial.py
|
1
|
2739
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PlanningConnections'
db.create_table(u'planning_planningconnections', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('vyl', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['options.Dom'])),
('kv', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('start', self.gf('django.db.models.fields.DateTimeField')()),
('end', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'planning', ['PlanningConnections'])
def backwards(self, orm):
# Deleting model 'PlanningConnections'
db.delete_table(u'planning_planningconnections')
models = {
u'options.dom': {
'Meta': {'ordering': "('sorting',)", 'unique_together': "(('vyl', 'house'),)", 'object_name': 'Dom'},
'house': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['options.House']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sorting': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'}),
'vyl': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['options.Vyl']"})
},
u'options.house': {
'Meta': {'ordering': "('num',)", 'object_name': 'House'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
u'options.vyl': {
'Meta': {'ordering': "('name',)", 'object_name': 'Vyl'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
u'planning.planningconnections': {
'Meta': {'object_name': 'PlanningConnections'},
'end': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kv': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'vyl': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['options.Dom']"})
}
}
complete_apps = ['planning']
|
gpl-3.0
| 8,479,388,373,723,343,000 | 48.818182 | 123 | 0.560058 | false |
jansel/gridea
|
network.py
|
1
|
7182
|
# encoding: utf-8
# Broadcast protocol used to build a distributed cluster of solver processes.
#
# This code uses the twisted event driven networking framework:
# http://twistedmatrix.com/documents/13.0.0/core/howto/servers.html
#
import json
import logging
import threading
import time
import twisted
from twisted.internet.endpoints import clientFromString
from twisted.internet import reactor
from twisted.internet.protocol import Factory
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.protocols.basic import LineReceiver
__author__ = 'Jason Ansel'
log = logging.getLogger(__name__)
# Simple shared secret to identify our peers
PROTO_PASSWORD = 'ooLeel9aiJ4iW1nei1sa8Haichaig2ch'
class GlobalBest(object):
"""
Singleton class used to store the global best. Lock is required in
the worker processes as networking code runs in a different thread than
the solver.
"""
puzzle_id = None
score = None
solution = None
timestamp = time.time()
lock = threading.Lock()
@classmethod
def reset(cls, puzzle_id=None):
"""
Called when starting a new puzzle
:param puzzle_id: string identifying the puzzle being solved
"""
with cls.lock:
cls.puzzle_id = puzzle_id
cls.score = None
cls.solution = None
cls.timestamp = time.time()
@classmethod
def update(cls, puzzle_id, score, solution):
"""
Replace the current global best if score is lower than GlobalBest.score
:param puzzle_id: string identifying the puzzle being solved
:param score: number of squares required by solution
:param solution: packed permutation representation
:return: True if a new global best was established
"""
with cls.lock:
if puzzle_id != cls.puzzle_id:
log.warning('discarding results for wrong puzzle %s != %s',
puzzle_id, cls.puzzle_id)
elif cls.score is None or score < cls.score:
cls.score = score
cls.solution = solution
cls.timestamp = time.time()
return True
return False
class GrideaProtocol(LineReceiver):
"""
Network protocol used to communicate problem instances and solutions
to other processes. All messages are broadcast to entire network and
consist of a JSON string on a single line. There exist two message types:
1) Problem instances, cause workers to start solving:
{'id': string, 'puzzle': [[...], ...], ...}
2) New global best solutions, sent by workers:
{'puzzle_id': string, 'score': int, 'solution': [...]}
"""
peers = set() # GrideaProtocol() instances
broadcast_lock = threading.Lock()
def __init__(self, worker=None, on_connect=None):
"""
:param worker: optional instance of gridea.GrideaWorker()
:param on_connect: optional callback for after connection
"""
self.worker = worker
self.on_connect = on_connect
self.logged_in = False
def connectionMade(self):
"""
Called by twisted framework on connect.
"""
self.transport.setTcpKeepAlive(True)
self.transport.setTcpNoDelay(True)
if isinstance(self.transport, twisted.internet.tcp.Client):
self.sendLine(PROTO_PASSWORD)
self.logged_in = True
GrideaProtocol.peers.add(self)
log.info('connect (%d peers)', len(GrideaProtocol.peers))
if self.on_connect:
self.on_connect()
def connectionLost(self, reason=None):
"""
Called by twisted framework on disconnect.
"""
GrideaProtocol.peers.discard(self)
log.info('disconnect (%d peers)', len(GrideaProtocol.peers))
if (isinstance(self.transport, twisted.internet.tcp.Client) and
reactor.running):
log.info('shutting down')
reactor.stop()
def lineReceived(self, line):
"""
Called by twisted framework from incoming network messages.
:param line: the line received from the network
"""
if not self.logged_in:
return self.login(line)
msg = json.loads(line)
if 'puzzle' in msg:
# Start solving a new puzzle instance
GlobalBest.reset(msg['id'])
if self.worker:
reactor.callInThread(self.worker.solve, msg)
self.broadcast(line)
log.debug('got new puzzle %s', msg['id'])
elif 'score' in msg:
# A new global best was found by other process
self.best(msg['puzzle_id'], msg['score'], msg['solution'])
def login(self, password):
"""
Called for any message sent by a client not logged in. We use a
simple shared secret auth to make sure we are talking to others who
speak the same protocol.
:param password: the message from the client
"""
if password == PROTO_PASSWORD:
self.logged_in = True
GrideaProtocol.peers.add(self)
log.info('login ok (%d peers)', len(GrideaProtocol.peers))
else:
self.transport.loseConnection()
log.info('login failed (%d peers)', len(GrideaProtocol.peers))
def broadcast(self, line):
"""
Broadcast line to all connected peers. Broadcast lock is only required
in worker processes as the solver will send from another thread.
:param line: the line to broadcast
"""
with GrideaProtocol.broadcast_lock:
for peer in GrideaProtocol.peers:
if peer is not self:
peer.sendLine(line)
def best(self, puzzle_id, score, solution):
"""
Record a new solution to the puzzle, and broadcast it to other
processes if it is a new global best.
:param puzzle_id: string identifying the puzzle being solved
:param score: number of squares required by solution
:param solution: packed permutation representation
"""
if GlobalBest.update(puzzle_id, score, solution):
self.broadcast(json.dumps({'puzzle_id': puzzle_id, 'score': score,
'solution': solution}))
def listen(port):
"""
Start a server using GrideaProtocol
:param port: port to listen on
"""
class ServerFactory(Factory):
protocol = GrideaProtocol
reactor.listenTCP(port, ServerFactory())
def connect(hostname, worker=None, on_connect=None):
"""
Connect to server using GrideaProtocol, automatically retry if it is
not yet running.
:param hostname: `hostname:port` to connect to
:param worker: optional gridea.GrideaWorker() to make this process a worker
:param on_connect: optional callback after connection
"""
class ClientFactory(ReconnectingClientFactory):
def buildProtocol(self, addr):
return GrideaProtocol(worker, on_connect)
clientFromString(reactor, 'tcp:' + hostname).connect(ClientFactory())
|
mit
| 5,373,871,410,311,807,000 | 33.528846 | 79 | 0.627123 | false |
helix84/activae
|
src/CTK_trunk/CTK/ToggleButton.py
|
1
|
4484
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 CENATIC: Centro Nacional de Referencia de
# Aplicacion de las TIC basadas en Fuentes Abiertas, Spain.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of the CENATIC nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You may contact the copyright holder at: Fundacion CENATIC, Avenida
# Clara Campoamor, s/n. 06200 Almendralejo (Badajoz), Spain
#
# NOTE: This version of CTK is a fork re-licensed by its author. The
# mainstream version of CTK is available under a GPLv2 license
# at the Cherokee Project source code repository.
#
from Widget import Widget
from util import props_to_str
from Image import ImageStock
HTML = """
<div id="%(id)s" %(props)s>
%(on_html)s
%(off_html)s
<input id="hidden_%(id)s" name="%(id)s" type="hidden" value="%(value)s" />
</div>
"""
JS = """
/* On click
*/
$('#%(id)s').click (function() {
var self = $(this);
var hidden = self.find ('input:hidden');
var val = hidden.val();
if (val == "0") {
hidden.val("1");
self.find('#%(on_id)s').show();
self.find('#%(off_id)s').hide();
} else {
hidden.val("0");
self.find('#%(off_id)s').show();
self.find('#%(on_id)s').hide();
}
self.trigger({'type': "changed", 'value': val});
return false;
});
/* Init
*/
var self = $('#%(id)s');
if ("%(value)s" == "1") {
self.find('#%(on_id)s').show();
self.find('#%(off_id)s').hide();
} else {
self.find('#%(off_id)s').show();
self.find('#%(on_id)s').hide();
}
"""
class ToggleButtonImages (Widget):
def __init__ (self, on, off, active=True, props={}):
Widget.__init__ (self)
self.props = props.copy()
self.active = active
self.widget_on = on
self.widget_off = off
if 'class' in props:
self.props['class'] += " togglebutton"
else:
self.props['class'] = "togglebutton"
self.id = props.pop('id', "togglebutton_%d"%(self.uniq_id))
# Public interface
#
def Render (self):
id = self.id
props = props_to_str (self.props)
on_id = self.widget_on.id
off_id = self.widget_off.id
value = "01"[int(self.active)]
# Render embedded images
render_on = self.widget_on.Render()
render_off = self.widget_off.Render()
on_html = render_on.html
off_html = render_off.html
# Render
render = Widget.Render (self)
render.html += HTML %(locals())
render.js += JS %(locals())
# Merge the image renders, just in case
render_on.html = ''
render_off.html = ''
render += render_on
render += render_off
return render
class ToggleButtonOnOff (ToggleButtonImages):
def __init__ (self, active=True, props={}):
ToggleButtonImages.__init__ (self,
ImageStock('on', {'title': _("Disable")}),
ImageStock('off', {'title': _("Enable")}),
active, props.copy())
|
bsd-3-clause
| -6,415,709,346,895,044,000 | 31.258993 | 80 | 0.617306 | false |
pocin/kbc-mailchimp-writer
|
tests/test_cleaning_tags.py
|
1
|
2758
|
import pytest
from mcwriter.exceptions import MissingFieldError, CleaningError
from mcwriter.cleaning import clean_and_validate_tags_data, _clean_tags_options
def test_cleaning_tags_minimal_example():
data = {
'name': 'My custom tag',
'list_id': 'abc01234',
'type': 'text',
}
expected = data
cleaned = clean_and_validate_tags_data(data)
assert cleaned == expected
def test_cleaning_tags_missing_required_field():
data = {
'name': 'My custom tag',
}
expected = data
with pytest.raises(MissingFieldError):
clean_and_validate_tags_data(data)
def test_cleaning_tags_invalid_type():
data = {
'name': 'My custom tag',
'list_id': 'abc01234',
'type': 'qux',
}
with pytest.raises(CleaningError):
clean_and_validate_tags_data(data)
def test_cleaning_tags_optional_bool_fields():
data = {
'name': 'My custom tag',
'list_id': 'abc01234',
'type': 'text',
'required': 'true'
}
expected = {
'name': 'My custom tag',
'list_id': 'abc01234',
'type': 'text',
'required': True
}
cleaned = clean_and_validate_tags_data(data)
assert cleaned == expected
def test_cleaning_tags_optional_str_fields():
data = {
'name': 'My custom tag',
'list_id': 'abc01234',
'type': 'text',
'tag': 'MYTAG'
}
expected = {
'name': 'My custom tag',
'list_id': 'abc01234',
'type': 'text',
'tag': 'MYTAG'
}
cleaned = clean_and_validate_tags_data(data)
assert cleaned == expected
def test_cleaning_tags_integer_fields():
data = {
'name': 'My custom tag',
'type': 'text',
'list_id': 'abc01234',
'display_order': '2'
}
expected = {
'name': 'My custom tag',
'list_id': 'abc01234',
'type': 'text',
'display_order': 2
}
cleaned = clean_and_validate_tags_data(data)
assert cleaned == expected
def test_cleaning_tags_integer_fields_fails_on_wrong_dtype():
data = {
'name': 'My custom tag',
'list_id': 'abc01234',
'type': 'text',
'display_order': 'foo'
}
with pytest.raises(CleaningError):
clean_and_validate_tags_data(data)
def test_cleaning_tags_options_cleaning_int():
data = {
'options__default_country':'3',
}
expected = {
'options__default_country': 3
}
cleaned = _clean_tags_options(data)
assert cleaned == expected
def test_cleaning_tags_options_cleaning_int_wring_type():
data = {
'options__default_country': 'xxx',
}
with pytest.raises(CleaningError):
_clean_tags_options(data)
|
mit
| 103,546,474,157,596,140 | 22.982609 | 79 | 0.565265 | false |
common-workflow-language/cwltool
|
tests/test_udocker.py
|
1
|
3095
|
"""Test optional udocker feature."""
import copy
import os
import subprocess
import sys
from pathlib import Path
import pytest
from _pytest.tmpdir import TempPathFactory
from .util import get_data, get_main_output, working_directory
LINUX = sys.platform in ("linux", "linux2")
@pytest.fixture(scope="session")
def udocker(tmp_path_factory: TempPathFactory) -> str:
"""Udocker fixture, returns the path to the udocker script."""
test_cwd = os.getcwd()
test_environ = copy.copy(os.environ)
docker_install_dir = str(tmp_path_factory.mktemp("udocker"))
with working_directory(docker_install_dir):
url = "https://raw.githubusercontent.com/jorge-lip/udocker-builds/master/tarballs/udocker-1.1.4.tar.gz"
install_cmds = [
["curl", url, "-o", "./udocker-tarball.tgz"],
["tar", "xzvf", "udocker-tarball.tgz", "udocker"],
[
"bash",
"-c",
"UDOCKER_TARBALL={}/udocker-tarball.tgz ./udocker install".format(
docker_install_dir
),
],
]
test_environ["UDOCKER_DIR"] = os.path.join(docker_install_dir, ".udocker")
test_environ["HOME"] = docker_install_dir
results = []
for _ in range(3):
results = [subprocess.call(cmds, env=test_environ) for cmds in install_cmds]
if sum(results) == 0:
break
subprocess.call(["rm", "./udocker"])
assert sum(results) == 0
udocker_path = os.path.join(docker_install_dir, "udocker")
return udocker_path
@pytest.mark.skipif(not LINUX, reason="LINUX only")
def test_udocker_usage_should_not_write_cid_file(udocker: str, tmp_path: Path) -> None:
"""Confirm that no cidfile is made when udocker is used."""
with working_directory(tmp_path):
test_file = "tests/wf/wc-tool.cwl"
job_file = "tests/wf/wc-job.json"
error_code, stdout, stderr = get_main_output(
[
"--debug",
"--default-container",
"debian",
"--user-space-docker-cmd=" + udocker,
get_data(test_file),
get_data(job_file),
]
)
cidfiles_count = sum(1 for _ in tmp_path.glob("*.cid"))
assert "completed success" in stderr, stderr
assert cidfiles_count == 0
@pytest.mark.skipif(
not LINUX or "GITHUB" in os.environ,
reason="Linux only",
)
def test_udocker_should_display_memory_usage(udocker: str, tmp_path: Path) -> None:
"""Confirm that memory ussage is logged even with udocker."""
with working_directory(tmp_path):
error_code, stdout, stderr = get_main_output(
[
"--enable-ext",
"--default-container=debian",
"--user-space-docker-cmd=" + udocker,
get_data("tests/wf/timelimit.cwl"),
"--sleep_time",
"10",
]
)
assert "completed success" in stderr, stderr
assert "Max memory" in stderr, stderr
|
apache-2.0
| 5,282,559,012,136,588,000 | 30.907216 | 111 | 0.572213 | false |
tobias-d-oe/plugin.program.tvhighlights
|
default.py
|
1
|
21271
|
#!/usr/bin/python
###########################################################################
#
# FILE: plugin.program.tvhighlights/default.py
#
# AUTHOR: Tobias D. Oestreicher
#
# LICENSE: GPLv3 <http://www.gnu.org/licenses/gpl.txt>
# VERSION: 0.1.5
# CREATED: 05.02.2016
#
###########################################################################
import urllib
import urllib2
import os
import re
import sys
import xbmc
import xbmcgui
import xbmcaddon
import time
import datetime
import json
from resources.lib.tvhighlights import TVDScraper
__addon__ = xbmcaddon.Addon()
__addonID__ = __addon__.getAddonInfo('id')
__addonname__ = __addon__.getAddonInfo('name')
__version__ = __addon__.getAddonInfo('version')
__path__ = __addon__.getAddonInfo('path')
__LS__ = __addon__.getLocalizedString
__icon__ = xbmc.translatePath(os.path.join(__path__, 'icon.png'))
__showOutdated__ = True if __addon__.getSetting('showOutdated').upper() == 'TRUE' else False
__maxHLCat__ = int(re.match('\d+', __addon__.getSetting('max_hl_cat')).group())
__prefer_hd__ = True if __addon__.getSetting('prefer_hd').upper() == 'TRUE' else False
WINDOW = xbmcgui.Window(10000)
OSD = xbmcgui.Dialog()
TVDURL = 'http://www.tvdigital.de/tv-tipps/heute/'
# Helpers
def notifyOSD(header, message, icon=xbmcgui.NOTIFICATION_INFO, disp=4000, enabled=True):
if enabled:
OSD.notification(header.encode('utf-8'), message.encode('utf-8'), icon, disp)
def writeLog(message, level=xbmc.LOGNOTICE):
try:
xbmc.log('[%s %s]: %s' % (__addonID__, __version__, message.encode('utf-8')), level)
except Exception:
xbmc.log('[%s %s]: %s' % (__addonID__, __version__, 'Fatal: Message could not displayed'), xbmc.LOGERROR)
# End Helpers
ChannelTranslateFile = xbmc.translatePath(os.path.join(__path__, 'ChannelTranslate.json'))
with open(ChannelTranslateFile, 'r') as transfile:
ChannelTranslate=transfile.read().rstrip('\n')
TVDWatchtypes = ['spielfilm', 'serie', 'sport', 'unterhaltung', 'doku-und-info', 'kinder']
TVDTranslations = {'spielfilm': __LS__(30120), 'serie': __LS__(30121), 'sport': __LS__(30122), 'unterhaltung': __LS__(30123), 'doku-und-info': __LS__(30124), 'kinder':__LS__(30125)}
properties = ['ID', 'Title', 'Thumb', 'Time', 'Channel', 'PVRID', 'Logo', 'Genre', 'Comment', 'Duration', 'Extrainfos', 'WatchType']
infoprops = ['Title', 'Picture', 'Subtitle', 'Description', 'Broadcastdetails', 'Channel', 'ChannelID', 'Logo', 'Date', 'StartTime', 'RunTime', 'EndTime', 'Keywords']
# create category list from selection in settings
def categories():
cats = []
for category in TVDWatchtypes:
if __addon__.getSetting(category).upper() == 'TRUE': cats.append(category)
return cats
# get remote URL, replace '\' and optional split into css containers
def getUnicodePage(url, container=None):
try:
req = urllib2.urlopen(url.encode('utf-8'))
except UnicodeDecodeError:
req = urllib2.urlopen(url)
except ValueError:
return False
encoding = 'utf-8'
if "content-type" in req.headers and "charset=" in req.headers['content-type']:
encoding=req.headers['content-type'].split('charset=')[-1]
content = unicode(req.read(), encoding).replace("\\", "")
if container is None: return content
return content.split(container)
# get parameter hash, convert into parameter/value pairs, return dictionary
def parameters_string_to_dict(parameters):
paramDict = {}
if parameters:
paramPairs = parameters[1:].split("&")
for paramsPair in paramPairs:
paramSplits = paramsPair.split('=')
if (len(paramSplits)) == 2:
paramDict[paramSplits[0]] = paramSplits[1]
return paramDict
# get used dateformat of kodi
def getDateFormat():
df = xbmc.getRegion('dateshort')
tf = xbmc.getRegion('time').split(':')
try:
# time format is 12h with am/pm
return df + ' ' + tf[0][0:2] + ':' + tf[1] + ' ' + tf[2].split()[1]
except IndexError:
# time format is 24h with or w/o leading zero
return df + ' ' + tf[0][0:2] + ':' + tf[1]
# convert datetime string to timestamp with workaround python bug (http://bugs.python.org/issue7980) - Thanks to BJ1
def date2timeStamp(date, format):
try:
dtime = datetime.datetime.strptime(date, format)
except TypeError:
try:
dtime = datetime.datetime.fromtimestamp(time.mktime(time.strptime(date, format)))
except ValueError:
return False
except Exception:
return False
return int(time.mktime(dtime.timetuple()))
# get pvr channelname, translate from TVHighlights to pvr channelname if necessary
def channelName2channelId(channelname):
query = {
"jsonrpc": "2.0",
"method": "PVR.GetChannels",
"params": {"channelgroupid": "alltv"},
"id": 1
}
res = json.loads(xbmc.executeJSONRPC(json.dumps(query, encoding='utf-8')))
# translate via json if necessary
trans = json.loads(str(ChannelTranslate))
for tr in trans:
if channelname == tr['name']:
writeLog("Translating %s to %s" % (channelname,tr['pvrname']), level=xbmc.LOGDEBUG)
channelname = tr['pvrname']
if 'result' in res and 'channels' in res['result']:
res = res['result'].get('channels')
for channels in res:
# prefer HD Channel if available
if __prefer_hd__ and (channelname + " HD").lower() in channels['label'].lower():
writeLog("TVHighlights found HD priorized channel %s" % (channels['label']), level=xbmc.LOGDEBUG)
return channels['channelid']
#if channelname.lower() in channels['label'].lower():
if channelname.lower() == channels['label'].lower():
writeLog("TVHighlights found channel %s" % (channels['label']), level=xbmc.LOGDEBUG)
return channels['channelid']
return False
# get pvr channelname by id
def pvrchannelid2channelname(channelid):
query = {
"jsonrpc": "2.0",
"method": "PVR.GetChannels",
"params": {"channelgroupid": "alltv"},
"id": 1
}
res = json.loads(xbmc.executeJSONRPC(json.dumps(query, encoding='utf-8')))
if 'result' in res and 'channels' in res['result']:
res = res['result'].get('channels')
for channels in res:
if channels['channelid'] == channelid:
writeLog("TVHighlights found id for channel %s" % (channels['label']), level=xbmc.LOGDEBUG)
return channels['label']
return False
# get pvr channel logo url
def pvrchannelid2logo(channelid):
query = {
"jsonrpc": "2.0",
"method": "PVR.GetChannelDetails",
"params": {"channelid": channelid, "properties": ["thumbnail"]},
"id": 1
}
res = json.loads(xbmc.executeJSONRPC(json.dumps(query, encoding='utf-8')))
if 'result' in res and 'channeldetails' in res['result'] and 'thumbnail' in res['result']['channeldetails']:
return res['result']['channeldetails']['thumbnail']
else:
return False
def switchToChannel(pvrid):
writeLog('Switch to channel id %s' % (pvrid), level=xbmc.LOGDEBUG)
query = {
"jsonrpc": "2.0",
"id": 1,
"method": "Player.Open",
"params": {"item": {"channelid": int(pvrid)}}
}
res = json.loads(xbmc.executeJSONRPC(json.dumps(query, encoding='utf-8')))
if 'result' in res and res['result'] == 'OK':
return True
else:
writeLog('Couldn\'t switch to channel id %s' % (pvrid), level=xbmc.LOGDEBUG)
return False
# clear all info properties (info window) in Home Window
def clearInfoProperties():
writeLog('clear all info properties (used in info popup)', level=xbmc.LOGDEBUG)
for property in infoprops:
WINDOW.clearProperty('TVHighlightsToday.Info.%s' % (property))
for i in range(1, 6, 1):
WINDOW.clearProperty('TVHighlightsToday.Info.RatingType.%s' % (i))
WINDOW.clearProperty('TVHighlightsToday.Info.Rating.%s' % (i))
# clear content of widgets in Home Window
def clearWidgets(start_from=1):
writeLog('Clear widgets from #%s and up' % (start_from), level=xbmc.LOGDEBUG)
for i in range(start_from, 17, 1):
for property in properties:
WINDOW.clearProperty('TVHighlightsToday.%s.%s' % (i, property))
def refreshWidget(category, offset=0, limit=True):
if not __showOutdated__:
writeLog("TVHighlights: Show only upcoming events", level=xbmc.LOGDEBUG)
blobs = WINDOW.getProperty('TVD.%s.blobs' % category)
if blobs == '': return 0
widget = 1
for i in range(1, int(blobs) + 1, 1):
if (limit and widget > __maxHLCat__) or offset + widget > 16:
writeLog('Max. Limit of widgets reached, abort processing', level=xbmc.LOGDEBUG)
break
writeLog('Processing blob TVD.%s.%s for widget #%s' % (category, i, offset + widget), level=xbmc.LOGDEBUG)
blob = eval(WINDOW.getProperty('TVD.%s.%s' % (category, i)))
if not __showOutdated__:
_now = datetime.datetime.now()
try:
_st = '%s.%s.%s %s' % (_now.day, _now.month, _now.year, blob['time'])
if date2timeStamp(_st, '%d.%m.%Y %H:%M') + 60 * int(blob['runtime']) < int(time.time()):
writeLog('TVHighlights: discard blob TVD.%s.%s, broadcast @%s has already finished' % (category, i, _st), level=xbmc.LOGDEBUG)
continue
except ValueError:
writeLog('Could not determine any date value, discard blob TVD.%s.%s' % (category, i), level=xbmc.LOGERROR)
continue
WINDOW.setProperty('TVHighlightsToday.%s.ID' % (offset + widget), blob['id'])
WINDOW.setProperty('TVHighlightsToday.%s.Title' % (offset + widget), blob['title'])
WINDOW.setProperty('TVHighlightsToday.%s.Thumb' % (offset + widget), blob['thumb'])
WINDOW.setProperty('TVHighlightsToday.%s.Time' % (offset + widget), blob['time'])
WINDOW.setProperty('TVHighlightsToday.%s.Channel' % (offset + widget), blob['pvrchannel'])
WINDOW.setProperty('TVHighlightsToday.%s.PVRID' % (offset + widget), blob['pvrid'])
WINDOW.setProperty('TVHighlightsToday.%s.Logo' % (offset + widget), blob['logo'])
WINDOW.setProperty('TVHighlightsToday.%s.Genre' % (offset + widget), blob['genre'])
WINDOW.setProperty('TVHighlightsToday.%s.Comment' % (offset + widget), blob['outline'])
WINDOW.setProperty('TVHighlightsToday.%s.Extrainfos' % (offset + widget), blob['extrainfos'])
WINDOW.setProperty('TVHighlightsToday.%s.Popup' % (offset + widget), blob['popup'])
WINDOW.setProperty('TVHighlightsToday.%s.WatchType' % (offset + widget), TVDTranslations[blob['category']])
widget += 1
return widget - 1
def refreshHighlights():
offset = 0
numcats = len(categories())
if numcats == 1:
limit = False
else:
limit = True
for category in categories():
offset += refreshWidget(category, offset, limit=limit)
clearWidgets(offset + 1)
WINDOW.setProperty('numCategories', str(numcats))
def searchBlob(item, value):
for category in TVDWatchtypes:
blobs = WINDOW.getProperty('TVD.%s.blobs' % category)
if blobs == '':
writeLog('No blobs for cat %s' % (category), level=xbmc.LOGDEBUG)
continue
for idx in range(1, int(blobs) + 1, 1):
blob = eval(WINDOW.getProperty('TVD.%s.%s' % (category, idx)))
if blob[item] == value.decode('utf-8'):
writeLog('Found value \'%s\' in item \'%s\' of blob \'TVD.%s.%s\'' % (value.decode('utf-8'), item, category, idx), level=xbmc.LOGDEBUG)
return blob
return False
def scrapeTVDPage(category):
url = '%s%s/' % (TVDURL, category)
writeLog('Start scraping category %s from %s' % (category, url), level=xbmc.LOGDEBUG)
content = getUnicodePage(url, container='class="highlight-container"')
i = 1
content.pop(0)
blobs = WINDOW.getProperty('TVD.%s.blobs' % (category))
if blobs != '':
for idx in range(1, int(blobs) + 1, 1):
WINDOW.clearProperty('TVD.%s.%s' % (category, idx))
for container in content:
data = TVDScraper()
data.scrapeHighlights(container)
pvrchannelID = channelName2channelId(data.channel)
if not pvrchannelID:
writeLog("TVHighlights: Channel %s is not in PVR, discard entry" % (data.channel), level=xbmc.LOGDEBUG)
continue
logoURL = pvrchannelid2logo(pvrchannelID)
channel = pvrchannelid2channelname(pvrchannelID)
writeLog('', level=xbmc.LOGDEBUG)
writeLog('ID: TVD.%s.%s' %(category, i), level=xbmc.LOGDEBUG)
writeLog('Title: %s' % (data.title), level=xbmc.LOGDEBUG)
writeLog('Thumb: %s' % (data.thumb), level=xbmc.LOGDEBUG)
writeLog('Start time: %s' % (data.starttime), level=xbmc.LOGDEBUG)
writeLog('Running Time: %s' % (data.runtime), level=xbmc.LOGDEBUG)
writeLog('Channel (TVD): %s' % (data.channel), level=xbmc.LOGDEBUG)
writeLog('Channel (PVR): %s' % (channel), level=xbmc.LOGDEBUG)
writeLog('ChannelID (PVR): %s' % (pvrchannelID), level=xbmc.LOGDEBUG)
writeLog('Channel logo: %s' % (logoURL), level=xbmc.LOGDEBUG)
writeLog('Genre: %s' % (data.genre), level=xbmc.LOGDEBUG)
writeLog('Outline: %s' % (data.outline), level=xbmc.LOGDEBUG)
writeLog('Extrainfos: %s' % (data.extrainfos), level=xbmc.LOGDEBUG)
writeLog('Popup: %s' % (data.detailURL), level=xbmc.LOGDEBUG)
writeLog('Watchtype: %s' % (category), level=xbmc.LOGDEBUG)
writeLog('', level=xbmc.LOGDEBUG)
blob = {
'id': unicode('TVD.%s.%s' % (i, category)),
'title': unicode(data.title),
'thumb': unicode(data.thumb),
'time': unicode(data.starttime),
'runtime': unicode(data.runtime),
'endtime': unicode(data.endtime),
'channel': unicode(data.channel),
'pvrchannel': unicode(channel),
'pvrid': unicode(pvrchannelID),
'logo': unicode(logoURL),
'genre': unicode(data.genre),
'outline': unicode(unicode(data.outline)),
'extrainfos': unicode(data.extrainfos),
'popup': unicode(data.detailURL),
'category': unicode(category),
}
WINDOW.setProperty('TVD.%s.%s' % (category, i), str(blob))
i += 1
WINDOW.setProperty('TVD.%s.blobs' % (category), str(i - 1))
# Set details to Window (INFO Labels)
def showInfoWindow(detailurl, showWindow=True):
writeLog('Set details to home/info screen', level=xbmc.LOGDEBUG)
data = TVDScraper()
writeLog('DETURL: %s' % (detailurl), level=xbmc.LOGDEBUG)
details = getUnicodePage(detailurl)
if details:
writeLog('DET: %s' % (details), level=xbmc.LOGDEBUG)
#data.scrapeDetailPage(details, 'div id="main-content" class="clearfix"')
data.scrapeDetailPage(details, '<div id="remodal-content" class="detail">')
blob = searchBlob('popup', detailurl)
broadcastinfo = '%s: %s - %s' % (blob['pvrchannel'], blob['time'], data.endtime)
writeLog('', level=xbmc.LOGDEBUG)
writeLog('Title: %s' % (blob['title']), level=xbmc.LOGDEBUG)
writeLog('Thumb: %s' % (blob['thumb']), level=xbmc.LOGDEBUG)
writeLog('Channel (TVD): %s' % (blob['channel']), level=xbmc.LOGDEBUG)
writeLog('Channel (PVR): %s' % (blob['pvrchannel']), level=xbmc.LOGDEBUG)
writeLog('ChannelID: %s' % (blob['pvrid']), level=xbmc.LOGDEBUG)
writeLog('Start Time: %s' % (blob['time']), level=xbmc.LOGDEBUG)
writeLog('End Time: %s' % (data.endtime), level=xbmc.LOGDEBUG)
writeLog('Rating Value: %s' % (data.ratingValue), level=xbmc.LOGDEBUG)
writeLog('Best Rating: %s' % (data.bestRating), level=xbmc.LOGDEBUG)
writeLog('Description: %s' % (data.plot or __LS__(30140)), level=xbmc.LOGDEBUG)
writeLog('Keywords: %s' % (data.keywords), level=xbmc.LOGDEBUG)
writeLog('Rating Data: %s' % (data.ratingdata), level=xbmc.LOGDEBUG)
writeLog('Broadcast Flags: %s' % (data.broadcastflags), level=xbmc.LOGDEBUG)
writeLog('', level=xbmc.LOGDEBUG)
clearInfoProperties()
WINDOW.setProperty("TVHighlightsToday.Info.isInFuture", "")
WINDOW.setProperty("TVHighlightsToday.Info.isRunning", "")
now = datetime.datetime.now()
_st = '%s.%s.%s %s' % (now.day, now.month, now.year, blob['time'])
# try:
# _date = time.strftime(getDateFormat(), time.strptime(_st, '%d.%m.%Y %H:%M'))
#
# timestamp = date2timeStamp(_st, '%d.%m.%Y %H:%M')
#
# if timestamp >= int(time.time()):
# writeLog('Start time of title \'%s\' is @%s, enable switchtimer button' % (blob['title'], blob['time']), level=xbmc.LOGDEBUG)
# WINDOW.setProperty("TVHighlightsToday.Info.isInFuture", "yes")
# elif timestamp < int(time.time()) < timestamp + 60 * int(blob['runtime']):
# writeLog('Title \'%s\' is currently running, enable switch button' % (blob['title']), level=xbmc.LOGDEBUG)
# WINDOW.setProperty("TVHighlightsToday.Info.isRunning", "yes")
# except ImportError:
# writeLog('Could not make time conversion, strptime locked', level=xbmc.LOGERROR)
# _date = ''
#
WINDOW.setProperty("TVHighlightsToday.Info.Title", blob['title'])
WINDOW.setProperty("TVHighlightsToday.Info.Picture", blob['thumb'])
WINDOW.setProperty("TVHighlightsToday.Info.Subtitle", blob['outline'])
WINDOW.setProperty("TVHighlightsToday.Info.Description", data.plot or __LS__(30140))
WINDOW.setProperty("TVHighlightsToday.Info.Broadcastdetails", broadcastinfo)
WINDOW.setProperty("TVHighlightsToday.Info.Channel", blob['pvrchannel'])
WINDOW.setProperty("TVHighlightsToday.Info.ChannelID", blob['pvrid'])
WINDOW.setProperty("TVHighlightsToday.Info.Logo", blob['logo'])
# WINDOW.setProperty("TVHighlightsToday.Info.Date", _date)
WINDOW.setProperty("TVHighlightsToday.Info.StartTime", blob['time'])
WINDOW.setProperty("TVHighlightsToday.Info.RunTime", blob['runtime'])
WINDOW.setProperty("TVHighlightsToday.Info.EndTime", data.endtime)
WINDOW.setProperty("TVHighlightsToday.Info.Keywords", blob['genre'])
# Ratings
i = 1
for r in data.ratingdata:
WINDOW.setProperty( "TVHighlightsToday.Info.RatingType.%s" %(i), r['ratingtype'] )
WINDOW.setProperty( "TVHighlightsToday.Info.Rating.%s" %(i), r['rating'][0] )
i += 1
if showWindow:
Popup = xbmcgui.WindowXMLDialog('script-GTO-InfoWindow.xml', __path__, 'Default', '720p')
# Popup = xbmcgui.WindowXMLDialog('script-TVHighlights-DialogWindow.xml', __path__, 'Default', '720p')
Popup.doModal()
else:
notifyOSD(__LS__(30010), __LS__(30140))
# M A I N
#________
# Get starting methode & params
methode = None
detailurl = None
pvrid = None
if len(sys.argv)>1:
params = parameters_string_to_dict(sys.argv[1])
methode = urllib.unquote_plus(params.get('methode', ''))
detailurl = urllib.unquote_plus(params.get('detailurl', ''))
pvrid = urllib.unquote_plus(params.get('pvrid', ''))
writeLog("Methode called from external script: %s" % (methode), level=xbmc.LOGDEBUG)
writeLog("Detailurl provided from external script: %s" % (detailurl), level=xbmc.LOGDEBUG)
writeLog("pvrid provided from external script: %s" % (pvrid), level=xbmc.LOGDEBUG)
if methode == 'scrape_highlights':
for category in categories():
scrapeTVDPage(category)
refreshHighlights()
elif methode == 'refresh_screen':
refreshHighlights()
elif methode == 'infopopup':
showInfoWindow(detailurl)
elif methode == 'set_details_to_home':
showInfoWindow(detailurl, showWindow=False)
elif methode == 'switch_channel':
switchToChannel(pvrid)
elif methode=='show_select_dialog':
writeLog('Methode: show select dialog', level=xbmc.LOGDEBUG)
dialog = xbmcgui.Dialog()
cats = [__LS__(30120), __LS__(30121), __LS__(30122), __LS__(30123), __LS__(30124), __LS__(30125), __LS__(30116)]
ret = dialog.select(__LS__(30011), cats)
if ret == 6:
for category in categories():
scrapeTVDPage(category)
refreshHighlights()
elif 0 <= ret <= 5:
writeLog('%s selected' % (cats[ret]), level=xbmc.LOGDEBUG)
scrapeTVDPage(TVDWatchtypes[ret])
empty_widgets = refreshWidget(TVDWatchtypes[ret], limit=False)
clearWidgets(empty_widgets + 1)
else:
pass
|
gpl-3.0
| 1,174,358,839,341,263,600 | 41.037549 | 181 | 0.608669 | false |
radish-bdd/radish
|
src/radish/hookregistry.py
|
1
|
12567
|
"""
radish
~~~~~~
The root from red to green. BDD tooling for Python.
:copyright: (c) 2019 by Timo Furrer <tuxtimo@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import inspect
import bisect
import tagexpressions
from radish.errors import HookExecError
class HookImpl:
"""Represent a single Hook Implementation"""
__slots__ = ["what", "when", "func", "on_tags", "order", "is_formatter", "always"]
def __init__(
self, what, when, func, on_tags, order, is_formatter=False, always=False
):
self.what = what
self.when = when
self.func = func
self.on_tags = on_tags
self.order = order
self.is_formatter = is_formatter
self.always = always
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __repr__(self) -> str:
return "<HookImpl @{}.{} for tags {} with order {}>".format(
self.when, self.what, self.on_tags, self.order
)
def __hash__(self):
return hash(
(
self.what,
self.when,
self.func,
self.on_tags,
self.order,
self.is_formatter,
self.always,
)
)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return (
self.what == other.what
and self.when == other.when # noqa
and self.func == other.func # noqa
and self.on_tags == other.on_tags # noqa
and self.order == other.order # noqa
and self.is_formatter == other.is_formatter # noqa
and self.always == other.always # noqa
)
def __lt__(self, other):
return self.order < other.order
def __le__(self, other):
return self.order <= other.order
def __gt__(self, other):
return self.order > other.order
def __ge__(self, other):
return self.order >= other.order
class GeneratorHookImpl:
"""Specialized Hook Implementation for Generator Hooks
A Generator Hook uses a yield statement to separate
the `before` and `after` part of a Hook.
"""
def __init__(self, func):
self.func = func
self.generator = None
def __call__(self, *args, **kwargs):
if self.generator is None:
# hook is called the first time, thus we create it and
# consume the `before` part of it
self.generator = self.func(*args, **kwargs)
return next(self.generator)
else:
# the hook is called the second time, thus we
# consume the `after` part of it and expect it to be exhausted
try:
return next(self.generator)
except StopIteration:
pass # raised when the generator is exhausted
# reset the generator for the next "before" call.
# NOTE(TF): this introduces the thread-unsafety,
# which is fine for the moment, I guess ...
# A better implementation might be to use
# a factory approach which would create
# unique hook instances for each occurance.
self.generator = None
def __name__(self):
return self.func.name
class HookRegistry:
"""The ``HookRegistry`` keeps track of all declared ``HookImpl``s"""
DEFAULT_HOOK_ORDER = 100
GENERATOR_HOOK_NAMES = {
"for_all",
"each_feature",
"each_rule",
"each_scenario",
"each_step",
}
def __init__(self):
#: Holds a set of all possible Hook combinations
self._hooks = {
"before": {
"all": [],
"each_feature": [],
"each_rule": [],
"each_scenario": [],
"each_step": [],
},
"after": {
"all": [],
"each_feature": [],
"each_rule": [],
"each_scenario": [],
"each_step": [],
},
}
def register(
self, what, when, func, on_tags, order, is_formatter=False, always=False
):
"""Register the given Hook for later execution"""
if inspect.isgeneratorfunction(func):
# the registered function is a generator hook
generator_hook = GeneratorHookImpl(func)
before_hook_impl = HookImpl(
what, "before", generator_hook, on_tags, order, is_formatter, always
)
after_hook_impl = HookImpl(
what, "after", generator_hook, on_tags, order * -1, is_formatter, always
)
if (
before_hook_impl in self._hooks["before"][what]
and after_hook_impl in self._hooks["after"][what]
):
# NOTE: allow a Hook Implementation to be registered multiple times.
# This can happend when one hook module imports another in the same
# RADISH_BASEDIR.
return
# insert the HookImpl in the order given by ``order``.
bisect.insort_right(self._hooks["before"][what], before_hook_impl)
bisect.insort_right(self._hooks["after"][what], after_hook_impl)
else:
# we have regular hook
hook_impl = HookImpl(what, when, func, on_tags, order, is_formatter, always)
if hook_impl in self._hooks[when][what]:
# NOTE: allow a Hook Implementation to be registered multiple times.
# This can happend when one hook module imports another in the same
# RADISH_BASEDIR.
return
# insert the HookImpl in the order given by ``order``.
bisect.insort_right(self._hooks[when][what], hook_impl)
def create_hook_decorators(self, context=None):
"""Create Hook decorators for all hook combinations
The created Hook decorators are injected into the given ``dict``-like ``context`` object.
If the given ``context`` is ``None`` the Hooks will be injected into ``globals()``.
"""
if context is None:
context = globals()
created_decorator_names = []
for when, whats in self._hooks.items():
class HookProvider:
"""whuat"""
def __init__(self, when):
self.when = when
when_object = HookProvider(when)
for what in whats.keys():
def __create_decorator(what, when):
def __decorator(
on_tags=None,
order=self.DEFAULT_HOOK_ORDER,
is_formatter=False,
always=False,
):
if on_tags is None:
on_tags = []
def __wrapper(func):
self.register(
what, when, func, on_tags, order, is_formatter, always
)
return func
return __wrapper
__decorator.__doc__ = """Decorator to register a hook function
A hook function registered with this decorator will be run {when} {what}.
Args:
on_tags (list): a list of :class:`Tag` names for which this hook will be run
order (int): a number which is used to order the registered hooks when running them
is_formatter (bool): flag to indicate that the hook is a formatter.
Formatter hooks are run even if ``on_tags`` do not match
always (bool): flag to indicate that the hook should always be run.
Only enable this ``True`` if the hook doesn't depend on the Feature File
you will be running.
""".format( # noqa
when=when, what=what
)
return __decorator
setattr(when_object, what, __create_decorator(what, when))
context[when] = when_object
created_decorator_names.append(when)
return created_decorator_names
def create_generator_hook_decorators(self, context=None):
"""Create Generator Hook decorators for models
The created Hook decorators are injected into the given ``dict``-like ``context`` object.
If the given ``context`` is ``None`` the Hooks will be injected into ``globals()``.
"""
if context is None:
context = globals()
created_decorator_names = []
for what in self.GENERATOR_HOOK_NAMES:
def __create_decorator(what):
def __decorator(
on_tags=None,
order=self.DEFAULT_HOOK_ORDER,
is_formatter=False,
always=False,
):
if on_tags is None:
on_tags = []
def __wrapper(func):
self.register(
what, None, func, on_tags, order, is_formatter, always
)
return func
return __wrapper
__decorator.__doc__ = """Decorator to register a generator hook function
A generator hook function registered with this decorator will be run
twice for {what}, once for the `before` part before the yield statement and once
after the yield statement for the `after` part.
.. code-block:: python
@{what}
def do_{what}(model):
# do some setup
setup()
yield
# do some teardown
teardown()
Args:
on_tags (list): a list of :class:`Tag` names for which this hook will be run
order (int): a number which is used to order the registered hooks when running them
is_formatter (bool): flag to indicate that the hook is a formatter.
Formatter hooks are run even if ``on_tags`` do not match
always (bool): flag to indicate that the hook should always be run.
Only enable this ``True`` if the hook doesn't depend on the Feature File
you will be running.
""".format( # noqa
what=what
)
return __decorator
context[what] = __create_decorator(what)
created_decorator_names.append(what)
return created_decorator_names
def call(self, what, when, only_formatters, tagged_model, *args, **kwargs):
"""Calls a registered Hook"""
hooks = self._hooks[when][what]
for hook_impl in hooks:
if not hook_impl.always:
if only_formatters and not hook_impl.is_formatter:
continue
#: holds a flag whether or not the Hook actually has to be called
# which is depenend on the `on_tags` setting of the HookImpl.
call_hook = True
if hook_impl.on_tags:
tag_expression = tagexpressions.parse(" or ".join(hook_impl.on_tags))
# get the Tags for models which actually have Tags
tags = (
tagged_model.get_all_tags()
if hasattr(tagged_model, "get_all_tags")
else []
)
call_hook = tag_expression.evaluate([t.name for t in tags])
if not call_hook:
continue
try:
hook_impl(tagged_model, *args, **kwargs)
except Exception as exc:
raise HookExecError(hook_impl, exc) from exc
#: Holds a global instance of the HookRegistry which shall be used
# by all modules implementing Hooks.
registry = HookRegistry()
__all__ = (
registry.create_hook_decorators() + registry.create_generator_hook_decorators()
)
|
mit
| 3,404,934,900,386,662,000 | 34.4 | 111 | 0.505928 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.