repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
benpicco/mate-deskbar-applet
|
deskbar/ui/cuemiac/CuemiacEntry.py
|
1
|
5439
|
import gobject
import gtk
import deskbar.ui.iconentry
# Make epydoc document signal
__extra_epydoc_fields__ = [('signal', 'Signals')]
class CuemiacEntry (deskbar.ui.iconentry.IconEntry):
"""
For all outside purposes this widget should appear to be a gtk.Entry
with an icon inside it. Use it as such - if you find odd behavior
don't work around it, but fix the behavior in this class instead.
@signal icon-clicked: (C{gtk.gdk.Event})
"""
__gsignals__ = {
"icon-clicked" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, [gobject.TYPE_PYOBJECT]),
"changed" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, []),
"activate" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, []),
"go-next" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_BOOLEAN, []),
"go-previous" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_BOOLEAN, []),
}
def __init__(self, default_pixbuf):
deskbar.ui.iconentry.IconEntry.__init__ (self)
self.entry = self.get_entry ()
self.entry_icon = gtk.Image ()
self.icon_event_box = gtk.EventBox ()
self._default_pixbuf = default_pixbuf
# Set up the event box for the entry icon
self.icon_event_box.set_property('visible-window', False)
self.icon_event_box.add(self.entry_icon)
self.pack_widget (self.icon_event_box, True)
# Set up icon
self.entry_icon.set_property('pixbuf', self._default_pixbuf)
self.icon_event_box.connect ("button-press-event", self._on_icon_button_press)
# Set up "inheritance" of the gtk.Entry
# methods
self.get_text = self.entry.get_text
self.set_text = self.entry.set_text
self.select_region = self.entry.select_region
self.set_width_chars = self.entry.set_width_chars
self.get_width_chars = self.entry.get_width_chars
self.get_position = self.entry.get_position
self.set_position = self.entry.set_position
# When applications want to forward events to,
# this widget, it is 99% likely to want to forward
# to the underlying gtk.Entry widget, so:
self.event = self.entry.event
# Forward commonly used entry signals
self.handler_changed_id = self.entry.connect ("changed", lambda entry: self.emit("changed"))
self.entry.connect ("activate", lambda entry: self.emit("activate"))
self.entry.connect ("key-press-event", self.__on_key_press_event )
self.entry.connect ("button-press-event", lambda entry, event: self.emit("button-press-event", event))
self.entry.connect ("focus-out-event", lambda entry, event: self.emit("focus-out-event", event))
def __on_key_press_event(self, entry, event):
if event.keyval == gtk.keysyms.Down:
ret = self.emit("go-next")
if ret:
return True
elif event.keyval == gtk.keysyms.Up:
ret = self.emit("go-previous")
if ret:
return True
return self.emit("key-press-event", event)
def grab_focus (self):
"""
Focus the entry, ready for text input.
"""
self.entry.grab_focus ()
def set_sensitive (self, active):
"""
Set sensitivity of the entry including the icon.
"""
self.set_property ("sensitive", active)
self.entry_icon.set_sensitive (active)
self.icon_event_box.set_sensitive (active)
def get_image (self):
"""
@return: The C{gtk.Image} packed into this entry.
"""
return self.entry_icon
def set_icon (self, pixbuf):
"""
Set the icon in the entry to the given pixbuf.
@param pixbuf: A C{gtk.gdk.Pixbuf}.
"""
self.entry_icon.set_property('pixbuf', pixbuf)
self.entry_icon.set_size_request(deskbar.ICON_WIDTH, deskbar.ICON_HEIGHT)
def set_icon_tooltip (self, tooltip):
"""
@param tooltip: A string describing the action associated to clicking the entry icon.
"""
self.icon_event_box.set_tooltip_markup(tooltip)
def set_entry_tooltip (self, tooltip):
"""
@param tooltip: A string describing basic usage of the entry.
"""
self.entry.set_tooltip_markup(tooltip)
def show (self):
"""
Show the the entry - including the icon.
"""
self.show_all () # We need to show the icon
def set_history_item(self, item):
if item == None:
self.set_icon( self._default_pixbuf )
self.entry.set_text("")
else:
text, match = item
self.entry.handler_block( self.handler_changed_id )
self.entry.set_text(text)
icon = match.get_icon()
if icon == None:
icon = self._default_pixbuf
if isinstance(icon, gtk.gdk.Pixbuf) :
pixbuf = icon
else:
pixbuf = deskbar.core.Utils.load_icon(icon)
self.set_icon ( pixbuf )
self.entry.select_region(0, -1)
self.entry.handler_unblock( self.handler_changed_id )
def _on_icon_button_press (self, widget, event):
if not self.icon_event_box.get_property ('sensitive'):
return False
self.emit ("icon-clicked", event)
return False
|
gpl-2.0
| 1,979,936,256,918,578,700 | 36.253425 | 110 | 0.58963 | false |
Uli1/mapnik
|
scons/scons-local-2.4.0/SCons/Tool/packaging/tarbz2.py
|
1
|
1780
|
"""SCons.Tool.Packaging.tarbz2
The tarbz2 SRC packager.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/tarbz2.py rel_2.4.0:3365:9259ea1c13d7 2015/09/21 14:03:43 bdbaddog"
from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Tar']
bld.set_suffix('.tar.gz')
target, source = putintopackageroot(target, source, env, PACKAGEROOT)
target, source = stripinstallbuilder(target, source, env)
return bld(env, target, source, TARFLAGS='-jc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
lgpl-2.1
| -4,742,350,656,658,062,000 | 39.454545 | 115 | 0.754494 | false |
GitYiheng/reinforcement_learning_test
|
test00_previous_files/mountaincar_q_learning.py
|
1
|
4304
|
import gym
import os
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from gym import wrappers
from datetime import datetime
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.kernel_approximation import RBFSampler
from sklearn.linear_model import SGDRegressor
class FeatureTransformer:
def __init__(self, env, n_components=500):
observation_examples = np.array([env.observation_space.sample() for x in range(1000)])
scaler = StandardScaler()
scaler.fit(observation_examples)
featurizer = FeatureUnion([
("rbf1", RBFSampler(gamma=5.0, n_components=n_components)),
("rbf2", RBFSampler(gamma=4.0, n_components=n_components)),
("rbf3", RBFSampler(gamma=3.0, n_components=n_components)),
("rbf4", RBFSampler(gamma=2.0, n_components=n_components)),
("rbf5", RBFSampler(gamma=1.0, n_components=n_components)),
("rbf6", RBFSampler(gamma=0.5, n_components=n_components)),
])
example_features = featurizer.fit_transform(scaler.transform(observation_examples))
self.dimensions = example_features.shape[1]
self.scaler = scaler
self.featurizer = featurizer
def transform(self, observation):
scaled = self.scaler.transform(observation)
return self.featurizer.transform(scaled)
class Model:
def __init__(self, env, feature_transformer, learning_rate):
self.env = env
self.models = []
self.feature_transformer = feature_transformer
for i in range(env.action_space.n):
model = SGDRegressor(learning_rate)
model.partial_fit(feature_transformer.transform([env.reset()]), [0])
self.models.append(model)
def predict(self, s):
X = self.feature_transformer.transform([s])
assert(len(X.shape) == 2)
return np.array([m.predict(X)[0] for m in self.models])
def update(self, s, a, G):
X = self.feature_transformer.transform([s])
assert(len(X.shape) == 2)
self.models[a].partial_fit(X, [G])
def sample_action(self, s, eps):
if np.random.random() < eps:
return self.env.action_space.sample()
else:
return np.argmax(self.predict(s))
def play_one(model, eps, gamma):
observation = env.reset()
done = False
totalreward = 0
iters = 0
while not done and iters < 1000:
action = model.sample_action(observation, eps)
prev_observation = observation
observation, reward, done, info = env.step(action)
# Update the model
G = reward + gamma*np.max(model.predict(observation)[0])
model.update(prev_observation, action, G)
totalreward += reward
iters += 1
return totalreward
def plot_cost_to_go(env, estimator, num_tiles=20):
x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles)
y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles)
X, Y = np.meshgrid(x, y)
Z = np.apply_along_axis(lambda _: -np.max(estimator.predict(_)), 2, np.dstack([X, Y]))
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0)
ax.set_xlabel('Position')
ax.set_ylabel('Velocity')
ax.set_zlabel('Cost-To-Go == -V(s)')
ax.set_title("Cost-To-Go Function")
fig.colorbar(surf)
plt.show()
def plot_running_avg(totalrewards):
N = len(totalrewards)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = totalrewards[max(0, t-100):(t+1)].mean()
plt.plot(running_avg)
plt.title("Running Average")
plt.show()
def main():
env = gym.make('MountainCar-v0')
ft = FeatureTransformer(env)
model = Model(env, ft, "constant")
gamma = 0.99
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
N = 300
totalrewards = np.empty(N)
for n in range(N):
eps = 0.1*(0.97**n)
totalreward = play_one(model, eps, gamma)
totalrewards[n] = totalreward
print("episode:", n, "total reward:", totalreward)
print("avg reward for last 100 episodes:", totalrewards[-100:].mean())
print("total steps:", -totalrewards.sum())
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
plot_running_avg(totalrewards)
plot_cost_to_go(env, model)
if __name__ == '__main__':
main()
|
mit
| 8,215,012,395,460,675,000 | 29.316901 | 104 | 0.702602 | false |
TOTVS/mdmpublic
|
couchbase-cli/lib/python/pysnappy2/snappy_cffi.py
|
1
|
7457
|
import sys
from cffi import FFI
if sys.hexversion > 0x03000000:
unicode = str
ffi = FFI()
ffi.cdef('''
typedef enum {
SNAPPY_OK = 0,
SNAPPY_INVALID_INPUT = 1,
SNAPPY_BUFFER_TOO_SMALL = 2
} snappy_status;
typedef uint32_t crc_t;
int snappy_compress(const char* input,
size_t input_length,
char* compressed,
size_t* compressed_length);
int snappy_uncompress(const char* compressed,
size_t compressed_length,
char* uncompressed,
size_t* uncompressed_length);
size_t snappy_max_compressed_length(size_t source_length);
int snappy_uncompressed_length(const char* compressed,
size_t compressed_length,
size_t* result);
int snappy_validate_compressed_buffer(const char* compressed,
size_t compressed_length);
crc_t crc_init(void);
crc_t crc_finalize(crc_t crc);
crc_t crc_reflect(crc_t data, size_t data_len);
crc_t crc_update(crc_t crc, const unsigned char *data, size_t data_len);
crc_t _crc32c(const char *input, int input_size);
''')
C = ffi.verify('''
#include <stdint.h>
#include <stdlib.h>
#include "snappy-c.h"
/*
* COPY of crc32c
* This is allowed since all crc code is self contained
*/
typedef uint32_t crc_t;
uint32_t crc_table[256] = {
0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4,
0xc79a971f, 0x35f1141c, 0x26a1e7e8, 0xd4ca64eb,
0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b,
0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24,
0x105ec76f, 0xe235446c, 0xf165b798, 0x030e349b,
0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54,
0x5d1d08bf, 0xaf768bbc, 0xbc267848, 0x4e4dfb4b,
0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a,
0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35,
0xaa64d611, 0x580f5512, 0x4b5fa6e6, 0xb93425e5,
0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45,
0xf779deae, 0x05125dad, 0x1642ae59, 0xe4292d5a,
0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a,
0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595,
0x417b1dbc, 0xb3109ebf, 0xa0406d4b, 0x522bee48,
0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687,
0x0c38d26c, 0xfe53516f, 0xed03a29b, 0x1f682198,
0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927,
0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38,
0xdbfc821c, 0x2997011f, 0x3ac7f2eb, 0xc8ac71e8,
0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096,
0xa65c047d, 0x5437877e, 0x4767748a, 0xb50cf789,
0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859,
0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46,
0x7198540d, 0x83f3d70e, 0x90a324fa, 0x62c8a7f9,
0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36,
0x3cdb9bdd, 0xceb018de, 0xdde0eb2a, 0x2f8b6829,
0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c,
0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93,
0x082f63b7, 0xfa44e0b4, 0xe9141340, 0x1b7f9043,
0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3,
0x55326b08, 0xa759e80b, 0xb4091bff, 0x466298fc,
0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c,
0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033,
0xa24bb5a6, 0x502036a5, 0x4370c551, 0xb11b4652,
0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d,
0xef087a76, 0x1d63f975, 0x0e330a81, 0xfc588982,
0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d,
0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622,
0x38cc2a06, 0xcaa7a905, 0xd9f75af1, 0x2b9cd9f2,
0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530,
0x0417b1db, 0xf67c32d8, 0xe52cc12c, 0x1747422f,
0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff,
0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0,
0xd3d3e1ab, 0x21b862a8, 0x32e8915c, 0xc083125f,
0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90,
0x9e902e7b, 0x6cfbad78, 0x7fab5e8c, 0x8dc0dd8f,
0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee,
0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1,
0x69e9f0d5, 0x9b8273d6, 0x88d28022, 0x7ab90321,
0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81,
0x34f4f86a, 0xc69f7b69, 0xd5cf889d, 0x27a40b9e,
0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e,
0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351
};
crc_t crc_init(void)
{
return 0xffffffff;
}
crc_t crc_finalize(crc_t crc)
{
return crc ^ 0xffffffff;
}
crc_t crc_reflect(crc_t data, size_t data_len)
{
unsigned int i;
crc_t ret;
ret = data & 0x01;
for (i = 1; i < data_len; i++) {
data >>= 1;
ret = (ret << 1) | (data & 0x01);
}
return ret;
}
crc_t crc_update(crc_t crc, const unsigned char *data, size_t data_len)
{
unsigned int tbl_idx;
while (data_len--) {
tbl_idx = (crc ^ *data) & 0xff;
crc = (crc_table[tbl_idx] ^ (crc >> 8)) & 0xffffffff;
data++;
}
return crc & 0xffffffff;
}
uint32_t _crc32c(const char *input, int input_size) {
return crc_finalize(crc_update(crc_init(), input, input_size));
}
''', libraries=["snappy"])
class UncompressError(Exception):
pass
class SnappyBufferSmallError(Exception):
pass
def prepare(data):
_out_data = None
_out_size = None
_out_data = ffi.new('char[]', data)
_out_size = ffi.cast('size_t', len(data))
return (_out_data, _out_size)
def compress(data):
if isinstance(data, unicode):
data = data.encode('utf-8')
_input_data, _input_size = prepare(data)
max_compressed = C.snappy_max_compressed_length(_input_size)
_out_data = ffi.new('char[]', max_compressed)
_out_size = ffi.new('size_t*', max_compressed)
rc = C.snappy_compress(_input_data, _input_size, _out_data, _out_size)
if rc != C.SNAPPY_OK:
raise SnappyBufferSmallError()
value = ffi.buffer(ffi.cast('char*', _out_data), _out_size[0])
return value[:]
def uncompress(data):
_out_data, _out_size = prepare(data)
result = ffi.new('size_t*', 0)
rc = C.snappy_validate_compressed_buffer(_out_data, _out_size)
if not rc == C.SNAPPY_OK:
raise UncompressError()
rc = C.snappy_uncompressed_length(_out_data,
_out_size,
result)
if not rc == C.SNAPPY_OK:
raise UncompressError()
_uncompressed_data = ffi.new('char[]', result[0])
rc = C.snappy_uncompress(_out_data, _out_size, _uncompressed_data, result)
if rc != C.SNAPPY_OK:
raise UncompressError()
buf = ffi.buffer(ffi.cast('char*', _uncompressed_data), result[0])
return buf[:]
def isValidCompressed(data):
if isinstance(data, unicode):
data = data.encode('utf-8')
_out_data, _out_size= prepare(data)
rc = C.snappy_validate_compressed_buffer(_out_data, _out_size)
return rc == C.SNAPPY_OK
decompress = uncompress
def _crc32c(data):
c_data = ffi.new('char[]', data)
size = ffi.cast('int', len(data))
return int(C._crc32c(c_data, size))
|
bsd-2-clause
| -6,329,586,353,257,445,000 | 28.243137 | 78 | 0.670913 | false |
fracpete/wekamooc
|
moredataminingwithweka/class-4.2.py
|
1
|
2966
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# More Data Mining with Weka - Class 4.2
# Copyright (C) 2014 Fracpete (fracpete at gmail dot com)
# Use the WEKAMOOC_DATA environment variable to set the location
# for the datasets
import os
data_dir = os.environ.get("WEKAMOOC_DATA")
if data_dir is None:
data_dir = "." + os.sep + "data"
import weka.core.jvm as jvm
from weka.core.converters import Loader
from weka.core.classes import Random
from weka.classifiers import Classifier, Evaluation, SingleClassifierEnhancer
from weka.attribute_selection import ASEvaluation, ASSearch, AttributeSelection
jvm.start()
# load glass
fname = data_dir + os.sep + "glass.arff"
print("\nLoading dataset: " + fname + "\n")
loader = Loader(classname="weka.core.converters.ArffLoader")
data = loader.load_file(fname)
data.class_is_last()
classifiers = [
"weka.classifiers.trees.J48",
"weka.classifiers.lazy.IBk"
]
# cross-validate classifiers
for classifier in classifiers:
cls = Classifier(classname=classifier)
evl = Evaluation(data)
evl.crossvalidate_model(cls, data, 10, Random(1))
print("%s: %0.0f%%" % (classifier, evl.percent_correct))
# wrapper
for classifier in classifiers:
aseval = ASEvaluation(classname="weka.attributeSelection.WrapperSubsetEval",
options=["-B", classifier])
assearch = ASSearch(classname="weka.attributeSelection.BestFirst",
options=[])
attsel = AttributeSelection()
attsel.evaluator(aseval)
attsel.search(assearch)
attsel.select_attributes(data)
reduced = attsel.reduce_dimensionality(data)
cls = Classifier(classname=classifier)
evl = Evaluation(reduced)
evl.crossvalidate_model(cls, reduced, 10, Random(1))
print("%s (reduced): %0.0f%%" % (classifier, evl.percent_correct))
# meta-classifier
for wrappercls in classifiers:
for basecls in classifiers:
meta = SingleClassifierEnhancer(classname="weka.classifiers.meta.AttributeSelectedClassifier")
meta.options = \
["-E", "weka.attributeSelection.WrapperSubsetEval -B " + wrappercls,
"-S", "weka.attributeSelection.BestFirst",
"-W", basecls]
evl = Evaluation(data)
evl.crossvalidate_model(meta, data, 10, Random(1))
print("%s/%s: %0.0f%%" % (wrappercls, basecls, evl.percent_correct))
jvm.stop()
|
gpl-3.0
| 8,800,521,282,337,979,000 | 36.075 | 102 | 0.708699 | false |
aemerick/galaxy_analysis
|
particle_analysis/sn_rate.py
|
1
|
9054
|
#import yt.mods as yt
import yt
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import glob
__all__ = ['future_snr', 'snr']
_core_collapse_labels = ["SNII", "II", "2", "SN_II", "TypeII", "Type 2",
"Type II", "type II", "typeII", 'core collapse']
_snia_labels = ["SN1a", "SNIa", "Type1a", "TypeIa", "Type Ia", "Type 1a",
"type 1a", "type Ia", "type ia", "type1a", "typeIa"]
_agb_labels = ['AGB', 'agb']
def future_snr(ds, data, times = None, sn_type = 'II'):
"""
Looks forward from current time to compute future (projected) SN
rate
"""
current_time = ds.current_time.convert_to_units('Myr').value
if times is None:
bin_spacing = 2.0* yt.units.Myr
times = np.arange(np.min(creation_time) - bin_spacing*2.0, currentTime, bin_spacing)*yt.units.Myr
elif np.size(times) == 1:
bin_spacing = times
if not hasattr(bin_spacing, 'value'):
bin_spacing = bin_spacing * yt.units.Myr
times = np.linspace(current_time, current_time + 2000.0, bin_spacing)
times = times * yt.units.Myr
birth_mass = data['birth_mass'].value
mass = data['particle_mass'].convert_to_units('Msun').value
creation_time = data['creation_time'].convert_to_units('Myr').value
lifetimes = data['dynamical_time'].convert_to_units('Myr').value
pt = data['particle_type']
if any( [sn_type in x for x in _core_collapse_labels]):
collapse_threshold = ds.parameters['IndividualStarDirectCollapseThreshold']
agb_threshold = ds.parameters['IndividualStarSNIIMassCutoff']
pcut = (pt == 11) * (birth_mass <= collapse_threshold) *\
(birth_mass > agb_threshold)
elif any( [sn_type in x for x in _snia_labels]):
pcut = (pt == 12) * (mass > 0.0)
elif any( [sn_type in x for x in _agb_labels]):
pcut = (pt == 11) * (birth_mass < agb_threshold)
explosion_times = creation_time[pcut] + lifetimes[pcut]
explosion_times = explosion_times * yt.units.Myr
times = times.convert_to_units('yr')
snr = np.zeros(np.size(times.value) - 1)
# compute SNR
for i in np.arange(np.size(times) - 1):
dt = times[i+1] - times[i]
dN = np.size( explosion_times[explosion_times <= times[i+1]]) -\
np.size( explosion_times[explosion_times <= times[i]])
snr[i] = dN / dt
return times, snr
def snr(ds, data, times = None, sn_type = 'II'):
"""
Computes the supernova rate of the desired time for a given dataset
as a function of time. The way the particle types and particle lifetimes
are handled, this can be done for the entire galaxy history using a single
snapshot, rather than having to sort through each dump.
One can provide sample times using "times" argument, or leave it alone for
a 10 Myr sample spacing from t = 0 to t = current_time. If a single value
is provided, this is taken to be the sample spacing (dt), sampled over
t = 0 to t = current_time. Units are assumed to be Myr if not provided.
Accounts for direct collapse model in computing SNII rates using
parameter file.
"""
current_time = ds.current_time.convert_to_units('Myr').value
if times is None:
bin_spacing = 10.0 * yt.units.Myr
times = np.linspace(np.min(creation_time), current_time, bin_spacing)*yt.units.Myr
elif np.size(times) == 1:
bin_spacing = times
if not hasattr(bin_spacing, 'value'):
bin_spacing = bin_spacing * yt.units.Myr
times = np.linspace(np.min(creation_time), current_time, bin_spacing)
times = times *yt.units.Myr
# load particle properties
birth_mass = data['birth_mass'].value
mass = data['particle_mass'].convert_to_units("Msun").value
creation_time = data['creation_time'].convert_to_units('Myr').value
metallicity = data['metallicity_fraction'].value
# lifetimes = data['dynamical_time'].convert_to_units('Myr').value
lifetimes = data[('io','particle_model_lifetime')].convert_to_units('Myr').value
pt = data['particle_type'].value
# check to see if there are any SN candidates in the first place
# if not any([ == x for x in np.unique(pt)]):
# print "no supernova of type " + sn_type + " found"
# return times, np.zeros(np.size(times.value) - 1)
# looking for core collapse supernova rate
if any( [sn_type in x for x in _core_collapse_labels]):
pcut = (pt == 13)
# ignore stars that did not actually go supernova
collapse_threshold = ds.parameters['IndividualStarDirectCollapseThreshold']
agb_threshold = ds.parameters['IndividualStarSNIIMassCutoff']
if not any([(x <= collapse_threshold)*(x > agb_threshold) for x in birth_mass[pcut]]):
print("no core collapse supernova present, only direct collapse")
return times, np.zeros(np.size(times.value) - 1)
# slice!
pcut *= (birth_mass <= collapse_threshold)*(birth_mass > agb_threshold)
elif any( [sn_type in x for x in _snia_labels]):
pcut = (pt == 12)
if np.size(mass[pcut]) < 1:
return times, np.zeros(np.size(times))
# SNIa are the ones that are just masless tracers, rest are WD
if not any(mass[pcut] == 0.0):
print("no Type Ia supernova, only white dwarfs")
print("N_WD = %i -- Lowest mass = %.3f Msun"%(np.size(mass[pcut]), np.min(mass[pcut])))
print("Current time = %.2E Myr - Next to explode at t = %.2E Myr"%(current_time, np.min(lifetimes[pcut] + creation_time[pcut])))
return times, np.zeros(np.size(times.value) - 1)
# slice!
pcut *= (mass == 0.0)
elif any( [sn_type in x for x in _agb_labels]):
agb_threshold = ds.parameters['IndividualStarSNIIMassCutoff']
pcut = (pt > 11) # all dead stars
pcut = pcut * (birth_mass <= agb_threshold)
# pcut = (pt == 12)
# pcut *= (mass > 0.0)
# pcut = pcut + ( (pt == 13) * (birth_mass <= agb_threshold))
else:
print("sn_type :" + sn_type + " not a valid option - check spelling")
return -1
#
# now get the explosion times for all supernova
# when stars go SN, lifetime is set to be lifetime*huge_number
# therefore, explosion time can be backed out as:
#
explosion_times = creation_time[pcut] + lifetimes[pcut]/ds.parameters['huge_number']
explosion_times = explosion_times * yt.units.Myr
times = times.convert_to_units('yr')
snr = np.zeros(np.size(times.value) - 1)
# compute SNR
for i in np.arange(np.size(times) - 1):
dt = times[i+1] - times[i]
dN = np.size( explosion_times[explosion_times <= times[i+1]]) -\
np.size( explosion_times[explosion_times <= times[i]])
snr[i] = dN / dt
return times, snr
if __name__ == '__main__':
# example usage - uses most recent data file
log = False
ds_list = np.sort( glob.glob('./DD????/DD????'))
ds = yt.load(ds_list[-1])
data = ds.all_data()
dt = 25.0
times = np.arange(0.0, ds.current_time.convert_to_units('Myr').value + dt, dt)
times = times*yt.units.Myr
times, snrII = snr(ds, data, times = times, sn_type = 'TypeII')
times, snrIa = snr(ds, data, times = times, sn_type = "TypeIa")
center = 0.5 * (times[1:] + times[:-1])
fig, ax = plt.subplots(figsize=(8,8))
snialabel = 'Type Ia x 10'
sniilabel = 'Core Collapse'
ftimes = np.arange(ds.current_time.convert_to_units('Myr').value,
ds.current_time.convert_to_units('Myr').value + 800.0 + 10, 10)
ftimes = ftimes * yt.units.Myr
ftimes, fsnrII = future_snr(ds, data, times = ftimes, sn_type = 'TypeII')
ftimes, fsnrIa = future_snr(ds, data, times = ftimes, sn_type = 'TypeIa')
if log:
ax.plot(center/1.0E6, snrII*1.0E6, color = 'black', lw = 3, ls = '-', label = sniilabel)
ax.plot(center/1.0E6, snrIa*1.0E6*10, color = 'black', lw = 3, ls = '--', label = snialabel)
x.semilogy()
else:
ax.step(times[:-1]/1.0E6, snrII*1.0E6, color ='black', lw = 3, ls = '-', label = sniilabel)
ax.step(times[:-1]/1.0E6, snrIa*1.0E6 * 10, color ='orange', lw = 3, ls = '-', label = snialabel)
ax.step(ftimes[:-1]/1.0E6, fsnrII*1.0E6, color = 'black', lw = 3, ls = ':')
ax.step(ftimes[:-1]/1.0E6, fsnrIa*1.0E6 * 10, color = 'orange', lw = 3, ls = ':')
ax.set_xlabel('Time (Myr)')
ax.set_ylabel(r'SNR (Myr$^{-1}$)')
ax.set_ylim( np.min( [np.min(snrIa), np.min(snrII)])*1.0E6,
np.max( [np.max(snrIa), np.max(snrII)])*1.25*1.0E6)
ax.plot( [ds.current_time.convert_to_units('Myr').value]*2, ax.get_ylim(), ls = '--', lw = 3, color = 'black')
ax.legend(loc ='best')
plt.tight_layout()
ax.minorticks_on()
plt.savefig('snr.png')
|
mit
| -6,070,210,651,879,594,000 | 35.955102 | 140 | 0.594765 | false |
cqhtyi/ADfree-Player-Offline
|
onServer/ruletool/oredirectlist.py
|
1
|
3038
|
[
{
"name": "youkujson",
"find": "https?:\/\/val[fcopb]\.atm\.youku\.com\/v[fcopb]",
"replace": "about:blank",
"extra": "adkillrule"
},
{
"name": "youkuloader",
"find": "https?:\/\/static\.youku\.com(\/v[\d\.]*)?\/v\/swf\/.*\/loaders?\.swf",
"exfind": "(bili|acfun)",
"replace": "hostsite/loader.swf",
"css": ".danmuoff .vpactionv5_iframe_wrap {top: auto !important;}",
"extra": "adkillrule"
},
{
"name": "youkuplayer",
"find": "https?:\/\/static\.youku\.com(\/v[\d\.]*)?\/v\/swf\/.*\/q?player.*\.swf",
"exfind": "(bili|acfun)",
"replace": "hostsite/player.swf",
"css": ".danmuoff .vpactionv5_iframe_wrap {top: auto !important;}",
"extra": "adkillrule"
},
{
"name": "ku6",
"find": "https?:\/\/player\.ku6cdn\.com\/default\/.*\/(v|player)\.swf",
"replace": "hostsite/ku6.swf",
"extra": "adkillrule"
},
{
"name": "tudou",
"find": "http:\/\/static\.youku\.com(\/v[\d\.]*)?\/v\/custom\/.*\/q?player.*\.swf",
"exfind": "narutom",
"replace": "hostsite/tudou.swf",
"css": ".player {height: inherit !important;}",
"extra": "adkillrule"
},
{
"name": "tudou_olc",
"find": "https?:\/\/js\.tudouui\.com\/.*olc[^\.]*\.swf",
"replace": "hostsite/olc_8.swf",
"extra": "adkillrule"
},
{
"name": "tudou_sp",
"find": "https?:\/\/js\.tudouui\.com\/.*SocialPlayer[^\.]*\.swf",
"replace": "hostsite/sp.swf",
"extra": "adkillrule"
},
{
"name": "letvsdk",
"find": "https?:\/\/player\.letvcdn\.com\/.*\/newplayer\/LetvPlayerSDK\.swf",
"exfind": "(bili|acfun|com\/zt|duowan)",
"replace": "hostsite/letvsdk.swf",
"extra": "adkillrule"
},
{
"name": "pplive",
"find": "https?:\/\/player\.pplive\.cn\/ikan\/.*\/player4player2\.swf",
"replace": "hostsite/player4player2.swf",
"extra": "adkillrule"
},
{
"name": "iqiyi",
"find": "https?:\/\/www\.iqiyi\.com\/(player\/\d+\/Player|common\/flashplayer\/\d+\/((Main)?Player_.*|[\d]{4}[a-z]+((?!aa).){6,7}))\.swf",
"exfind": "(baidu|61|178)\.iqiyi\.com|weibo|bilibili|acfun|(music|tieba)\.baidu",
"replace": "hostsite/iqiyi5.swf",
"extra": "adkillrule"
},
{
"name": "pps",
"find": "https?:\/\/www\.iqiyi\.com\/common\/.*\/pps[\w]+.swf",
"replace": "hostsite/iqiyi_out.swf",
"extra": "adkillrule"
},
{
"name": "sohu_live",
"find": "https?:\/\/(tv\.sohu\.com\/upload\/swf\/(p2p\/)?\d+|(\d+\.){3}\d+\/webplayer)\/Main\.swf",
"exfind": "(bili|acfun)",
"replace": "hostsite/sohu_live.swf",
"extra": "adkillrule"
},
{
"name": "duowan",
"find": "https?:\/\/untitled\.dwstatic\.com\/.*",
"replace": "about:blank",
"extra": "adkillrule"
}
]
|
gpl-3.0
| -8,872,727,850,452,770,000 | 33.134831 | 146 | 0.473009 | false |
Mariusz-v7/MZCreeper
|
upload_all.py
|
1
|
4210
|
#-*- coding: utf-8 -*-
import time
import getpass
import os
from selenium import webdriver
from selenium.webdriver.support.ui import Select
max_wait_time = 60
#page_url = "http://gamesstats.hopto.org/"
#page_url = "http://gamesstats.loc/"
def login(driver):
file_ = open("config/upload")
page_url = file_.readlines()
page_url = page_url[-1]#\n
page_url = page_url[0:-1]#\n
print "trying to login on " + page_url
if not os.path.exists("config/logings"):
login = raw_input("login: ")
else:
file_ = open("config/logings")
login = file_.read()
login = login[0:-1] #remove \n... wtf..?
#
if not os.path.exists("config/passwdgs"):
passwd = getpass.getpass()
else:
file_ = open("config/passwdgs")
passwd = file_.read()
passwd = passwd[0:-1] #remove \n... wtf..?
print "loading page " + page_url
driver.get(page_url)
print "page loaded", driver.current_url
print "waiting for content"
try:
driver.find_element_by_tag_name('head')
except Exception:
print "failed"
return False
print "trying to fill and send login form"
try:
driver.find_element_by_id('login').send_keys(login)
driver.find_element_by_id('pass').send_keys(passwd)
driver.find_element_by_tag_name('button').click()
except Exception:
print "failed"
return False
t = time.time()
logged_in = False
while not logged_in:
if time.time() - t > max_wait_time:
break
try:
test = driver.find_element_by_class_name("error0")
print "login succesfull"
logged_in = True
except:
time.sleep(1)
if not logged_in:
print "failed to login"
try:
driver.save_screenshot("errors/"+str(t)+"_logings_failed.png")
except Exception:
print "failed to save screenshot"
return False
return True
def training(driver):
file_ = open("config/upload")
page_url = file_.readlines()
page_url = page_url[-1]
page_url = page_url[0:-1]+"/upload" #\n
trainings = os.listdir("upload/training_reports")
for training in trainings:
print "trying to load page " + page_url
driver.get(page_url)
print "page loaded, waiting for content"
try:
driver.find_element_by_tag_name('head')
except Exception:
print "failed"
return False
print "tying to send report"
try:
file_ = open("upload/training_reports/"+training)
data = file_.read()
driver.find_element_by_tag_name('textarea').send_keys(data)
driver.find_element_by_tag_name('button').click()
time.sleep(5)
driver.find_element_by_tag_name('head')
os.remove("upload/training_reports/"+training)
except Exception:
print "failed"
return False
return True
def squad(driver):
file_ = open("config/upload")
page_url = file_.readlines()
page_url = page_url[-1]
page_url = page_url[0:-1]+"/upload" #\n
trainings = os.listdir("upload/squad")
for training in trainings:
print "trying to load page " + page_url
driver.get(page_url)
print "page loaded, waiting for content"
try:
driver.find_element_by_tag_name('head')
except Exception:
print "failed"
return False
print "trying to send squad"
try:
select = Select(driver.find_element_by_tag_name("select"))
select.select_by_value("mz_soccer_squad")
file_ = open("upload/squad/"+training)
data = file_.read()
data = unicode(data, errors='replace')
driver.find_element_by_tag_name('textarea').send_keys(data)
driver.find_element_by_tag_name('button').click()
time.sleep(5)
driver.find_element_by_tag_name('head')
os.remove("upload/squad/"+training)
except Exception:
print "failed"
return False
return True
|
gpl-2.0
| -7,388,926,398,939,125,000 | 25.987179 | 74 | 0.569359 | false |
spacy-io/sense2vec
|
bin/cythonize.py
|
1
|
6170
|
#!/usr/bin/env python
""" cythonize
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'spacy'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script thinks that the pyx files have changed relative to the C files
by comparing hashes stored in a database file.
Simple script to invoke Cython (and Tempita) on all .pyx (.pyx.in)
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
For now, this script should be run by developers when changing Cython files
only, and the resulting C files checked in, so that end-users (and Python-only
developers) do not get the Cython/Tempita dependencies.
Originally written by Dag Sverre Seljebotn, and copied here from:
https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files.
"""
from __future__ import division, print_function, absolute_import
import os
import re
import sys
import hashlib
import subprocess
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'spacy'
VENDOR = 'spaCy'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
#
# Rules
#
def process_pyx(fromfile, tofile):
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.19'):
raise Exception('Building %s requires Cython >= 0.19' % VENDOR)
except ImportError:
pass
flags = ['--fast-fail']
if tofile.endswith('.cpp'):
flags += ['--cplus']
try:
try:
r = subprocess.call(['cython'] + flags + ["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see gh-2397.
r = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main import '
'setuptools_main as main; sys.exit(main())'] + flags +
["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
raise OSError('Cython needs to be installed')
def process_tempita_pyx(fromfile, tofile):
try:
try:
from Cython import Tempita as tempita
except ImportError:
import tempita
except ImportError:
raise Exception('Building %s requires Tempita: '
'pip install --user Tempita' % VENDOR)
with open(fromfile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
assert fromfile.endswith('.pyx.in')
pyxfile = fromfile[:-len('.pyx.in')] + '.pyx'
with open(pyxfile, "w") as f:
f.write(pyxcontent)
process_pyx(pyxfile, tofile)
rules = {
# fromext : function
'.pyx' : process_pyx,
'.pyx.in' : process_tempita_pyx
}
#
# Hash db
#
def load_hashes(filename):
# Return { filename : (sha1 of input, sha1 of output) }
if os.path.isfile(filename):
hashes = {}
with open(filename, 'r') as f:
for line in f:
filename, inhash, outhash = line.split()
hashes[filename] = (inhash, outhash)
else:
hashes = {}
return hashes
def save_hashes(hash_db, filename):
with open(filename, 'w') as f:
for key, value in sorted(hash_db.items()):
f.write("%s %s %s\n" % (key, value[0], value[1]))
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
#
# Main program
#
def normpath(path):
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash(frompath, topath):
from_hash = sha1_of_file(frompath)
to_hash = sha1_of_file(topath) if os.path.exists(topath) else None
return (from_hash, to_hash)
def process(path, fromfile, tofile, processor_function, hash_db):
fullfrompath = os.path.join(path, fromfile)
fulltopath = os.path.join(path, tofile)
current_hash = get_hash(fullfrompath, fulltopath)
if current_hash == hash_db.get(normpath(fullfrompath), None):
print('%s has not changed' % fullfrompath)
return
orig_cwd = os.getcwd()
try:
os.chdir(path)
print('Processing %s' % fullfrompath)
processor_function(fromfile, tofile)
finally:
os.chdir(orig_cwd)
# changed target file, recompute hash
current_hash = get_hash(fullfrompath, fulltopath)
# store hash in db
hash_db[normpath(fullfrompath)] = current_hash
def find_process_files(root_dir):
hash_db = load_hashes(HASH_FILE)
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
in_file = os.path.join(cur_dir, filename + ".in")
if filename.endswith('.pyx') and os.path.isfile(in_file):
continue
for fromext, function in rules.items():
if filename.endswith(fromext):
toext = ".cpp"
# with open(os.path.join(cur_dir, filename), 'rb') as f:
# data = f.read()
# m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
# if m:
# toext = ".cxx"
fromfile = filename
tofile = filename[:-len(fromext)] + toext
process(cur_dir, fromfile, tofile, function, hash_db)
save_hashes(hash_db, HASH_FILE)
def main():
try:
root_dir = sys.argv[1]
except IndexError:
root_dir = DEFAULT_ROOT
find_process_files(root_dir)
if __name__ == '__main__':
main()
|
mit
| -7,298,232,426,281,872,000 | 30.01005 | 107 | 0.602431 | false |
jeremiah-c-leary/vhdl-style-guide
|
vsg/rules/single_space_before_token.py
|
1
|
1995
|
from vsg import parser
from vsg import rule
from vsg import violation
from vsg.rules import utils as rules_utils
class single_space_before_token(rule.Rule):
'''
Checks for a single space between two tokens.
Parameters
----------
name : string
The group the rule belongs to.
identifier : string
unique identifier. Usually in the form of 00N.
lTokens : token type object list
A list of tokens to check for a single space after.
'''
def __init__(self, name, identifier, lTokens):
rule.Rule.__init__(self, name=name, identifier=identifier)
self.solution = None
self.phase = 2
self.lTokens = lTokens
def _get_tokens_of_interest(self, oFile):
return oFile.get_token_and_n_tokens_before_it(self.lTokens, 1)
def _analyze(self, lToi):
for oToi in lToi:
lTokens = oToi.get_tokens()
if isinstance(lTokens[0], parser.carriage_return):
continue
if not isinstance(lTokens[0], parser.whitespace):
sSolution = 'Ensure a single space before ' + lTokens[1].get_value()
oViolation = violation.New(oToi.get_line_number(), oToi, sSolution)
oViolation.set_action('insert')
self.add_violation(oViolation)
else:
if lTokens[0].get_value() != ' ':
sSolution = 'Ensure a single space before ' + lTokens[1].get_value()
oViolation = violation.New(oToi.get_line_number(), oToi, sSolution)
oViolation.set_action('adjust')
self.add_violation(oViolation)
def _fix_violation(self, oViolation):
lTokens = oViolation.get_tokens()
sAction = oViolation.get_action()
if sAction == 'insert':
rules_utils.insert_whitespace(lTokens, 1)
elif sAction == 'adjust':
lTokens[0].set_value(' ')
oViolation.set_tokens(lTokens)
|
gpl-3.0
| 6,567,652,907,927,217,000 | 32.813559 | 88 | 0.59198 | false |
Lindy21/CSE498-LRS
|
lrs/urls.py
|
1
|
1256
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('lrs.views',
url(r'^$', 'home'),
url(r'^statements/more/(?P<more_id>.{32})$', 'statements_more'),
url(r'^statements', 'statements'),
url(r'^activities/state', 'activity_state'),
url(r'^activities/profile', 'activity_profile'),
url(r'^activities', 'activities'),
url(r'^agents/profile', 'agent_profile'),
url(r'^agents', 'agents'),
url(r'^actexample/$', 'actexample'),
url(r'^actexample2/$', 'actexample2'),
url(r'^actexample3/$', 'actexample3'),
url(r'^actexample4/$', 'actexample4'),
url(r'^register/$', 'register'),
url(r'^regclient/$', 'reg_client'),
url(r'^OAuth/', include('oauth_provider.urls')),
# just urls for some user interface... not part of xapi
url(r'^me/statements/', 'my_statements'),
url(r'^me/groups/', 'my_groups'),
url(r'^me/apps/', 'my_app_status'),
url(r'^me/tokens/', 'delete_token'),
url(r'^me/', 'me'),
url(r'^about', 'about'),
url(r'^statementvalidator', 'stmt_validator')
)
urlpatterns += patterns('',
url(r'^accounts/login/$', 'django.contrib.auth.views.login', name="login"),
url(r'^accounts/logout/$', 'lrs.views.logout_view', name="logout"),
)
|
apache-2.0
| 124,041,952,411,922,200 | 38.25 | 77 | 0.60828 | false |
joshrule/LOTlib
|
LOTlib/Hypotheses/Proposers/RegenerationProposal.py
|
1
|
1997
|
"""Regenerate proposals - chooses a node of type X and replaces it
with a newly sampled value of type X.
"""
from LOTlib.BVRuleContextManager import BVRuleContextManager
from LOTlib.FunctionNode import FunctionNode, NodeSamplingException
from LOTlib.Hypotheses.Proposers import ProposalFailedException
from LOTlib.Miscellaneous import lambdaOne
from copy import copy
from math import log
class RegenerationProposal(object):
def propose(self, **kwargs):
ret_value, fb = None, None
while True: # keep trying to propose
try:
ret_value, fb = regeneration_proposal(self.grammar, self.value, **kwargs)
break
except ProposalFailedException:
pass
ret = self.__copy__(value=ret_value)
return ret, fb
def regeneration_proposal(grammar, t, resampleProbability=lambdaOne):
"""Propose, returning the new tree and the prob. of sampling it."""
newt = copy(t)
try:
# sample a subnode
n, lp = newt.sample_subnode(resampleProbability=resampleProbability)
except NodeSamplingException:
# If we've been given resampleProbability that can't sample
raise ProposalFailedException
assert getattr(n, "resampleProbability", 1.0) > 0.0, "*** Error in propose_tree %s ; %s" % (resampleProbability(t), t)
# In the context of the parent, resample n according to the grammar
# We recurse_up in order to add all the parent's rules
with BVRuleContextManager(grammar, n.parent, recurse_up=True):
n.setto(grammar.generate(n.returntype))
# compute the forward/backward probability (i.e. the acceptance distribution)
f = lp + grammar.log_probability(newt) # p_of_choosing_node_in_old_tree * p_of_new_tree
b = (log(1.0*resampleProbability(n)) - log(newt.sample_node_normalizer(resampleProbability=resampleProbability)))\
+ grammar.log_probability(t) # p_of_choosing_node_in_new_tree * p_of_old_tree
return [newt, f-b]
|
gpl-3.0
| -8,992,234,683,923,649,000 | 37.403846 | 122 | 0.696044 | false |
AdamWill/bodhi
|
bodhi/server/services/packages.py
|
1
|
2320
|
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import math
from cornice import Service
from sqlalchemy import func, distinct
from bodhi.server.models import Package
import bodhi.server.schemas
import bodhi.server.security
import bodhi.server.services.errors
packages = Service(name='packages', path='/packages/',
description='PkgDB packages',
cors_origins=bodhi.server.security.cors_origins_ro)
@packages.get(schema=bodhi.server.schemas.ListPackageSchema, renderer='json',
error_handler=bodhi.server.services.errors.json_handler,
validators=(
# None yet...
))
def query_packages(request):
db = request.db
data = request.validated
query = db.query(Package)
name = data.get('name')
if name is not None:
query = query.filter(Package.name == name)
like = data.get('like')
if like is not None:
query = query.filter(Package.name.like('%%%s%%' % like))
# We can't use ``query.count()`` here because it is naive with respect to
# all the joins that we're doing above.
count_query = query.with_labels().statement\
.with_only_columns([func.count(distinct(Package.name))])\
.order_by(None)
total = db.execute(count_query).scalar()
page = data.get('page')
rows_per_page = data.get('rows_per_page')
pages = int(math.ceil(total / float(rows_per_page)))
query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page)
return dict(
packages=query.all(),
page=page,
pages=pages,
rows_per_page=rows_per_page,
total=total,
)
|
gpl-2.0
| -6,014,736,767,507,379,000 | 34.151515 | 81 | 0.673276 | false |
oscarforri/WebServices_Json
|
server.py
|
1
|
3555
|
#!flask/bin/python
from flask import Flask, jsonify, abort, make_response, request
from resources import posts, todos, comments, albums, photos, users
app = Flask(__name__)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.errorhandler(400)
def bad_request(error):
return make_response(jsonify({'error': 'Bad request'}), 400)
#METHODS = 'GET'
@app.route('/posts', methods=['GET'])
def get_posts():
return jsonify({'posts': posts})
@app.route('/todos', methods=['GET'])
def get_todos():
return jsonify({'todos': todos})
@app.route('/comments', methods=['GET'])
def get_comments():
return jsonify({'comments': comments})
@app.route('/albums', methods=['GET'])
def get_albums():
return jsonify({'albums': albums})
@app.route('/photos', methods=['GET'])
def get_photos():
return jsonify({'photos': photos})
@app.route('/users', methods=['GET'])
def get_users():
return jsonify({'users': users})
#METHODS = "GET ID"
@app.route('/posts/<int:post_id>', methods=['GET'])
def get_task(post_id):
post = [post for post in posts if post['id'] == post_id]
if len(post) == 0:
abort(404)
return jsonify({'post': post[0]})
@app.route('/todos/<int:todo_id>', methods=['GET'])
def get_todos(todo_id):
todo = [todo for todo in todos if todo['id'] == todo_id]
if len(todo) == 0:
abort(404)
return jsonify({'todo': todo[0]})
@app.route('/comments/<int:comment_id>', methods=['GET'])
def get_comments(comment_id):
comment = [comment for comment in comments if comment['id'] == comment_id]
if len(comment) == 0:
abort(404)
return jsonify({'comment': comment[0]})
@app.route('/albums/<int:album_id>', methods=['GET'])
def get_albums(album_id):
album = [album for album in albums if album['id'] == album_id]
if len(album) == 0:
abort(404)
return jsonify({'album': album[0]})
@app.route('/photos/<int:photo_id>', methods=['GET'])
def get_photos(photo_id):
photo = [photo for photo in photos if photo['id'] == photo_id]
if len(photo) == 0:
abort(404)
return jsonify({'photo': photo[0]})
@app.route('/users/<int:user_id>', methods=['GET'])
def get_users(user_id):
user = [user for user in users if user['id'] == user_id]
if len(user) == 0:
abort(404)
return jsonify({'user': user[0]})
#METHODS = 'POST'
@app.route('/posts', methods=['POST'])
def create_post():
if not request.json or not 'title' in request.json or not 'userId' in request.json or not 'body' in request.json:
abort(400)
post = {
'id': posts[-1]['id'] + 1,
'userId': request.json['userId'],
'title': request.json['title'],
'body': request.json['body'],
}
posts.append(post)
return jsonify({'post': post}), 201
@app.route('/posts/<int:post_id>', methods=['PUT'])
def update_post(post_id):
post = [post for post in posts if post['id'] == post_id]
if len(post) == 0:
abort(404)
if not request.json:
abort(400)
post[0]['title'] = request.json.get('title', post[0]['title'])
post[0]['body'] = request.json.get('body', post[0]['body'])
return jsonify({'post': post[0]})
@app.route('/posts/<int:post_id>', methods=['DELETE'])
def delete_post(post_id):
post = [post for post in posts if post['id'] == post_id]
if len(post) == 0:
abort(404)
posts.remove(post[0])
return jsonify({'result': True})
if __name__ == '__main__':
app.debug = True
app.run("0.0.0.0")
|
gpl-3.0
| -146,195,189,189,963,100 | 27.902439 | 117 | 0.603657 | false |
iglpdc/dmrg_helpers
|
scripts/append_metadata_from_xml.py
|
1
|
2434
|
#!/usr/bin/env python
"""Appends metadata to estimators files reading from input xml files.
The current version of the DMRG does not include any metadata into the
estimator files. Metadata are comment lines that have information about the run
the estimator was obtained from, such as the value of the Hamiltonian
parameters. These data can be useful when calling the scripts that extract,
analyze, and plot the estimator data.
This script crawls down a directory finding all the estimator file, i.e. those
whose name is 'estimators.dat'. For each of them, it finds the corresponding
input xml file, which was generated by the DMRG code and contains the value of
the parameters of the run. It extracts the information for selected parameters,
and, finally, prepends this information in the proper metadata format to each
estimator file.
The script only works with fromScratch runs, and probably fails when you have
restarts.
The list of paramaters you want to add as metadata, `keys_to_watch`, should be
modified depending on your project.
Usage:
append_metadata_from_xml.py [--dir=DIR]
append_metadata_from_xml.py -h | --help
Options:
-h --help Shows this screen.
--dir=DIR Ouput directory [default: ./]
"""
import os
# Temporary patch to avoid installing the dmrg_helpers package.
import inspect
import sys
script_full_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
sys.path.insert(0, os.path.dirname(os.path.dirname(script_full_path)))
# patch ends
from dmrg_helpers.extract.input_file_reader import InputFileReader
from dmrg_helpers.extract.locate_estimator_files import locate_estimator_files
from itertools import izip
from docopt import docopt
def parent_dir_of_parent_dir(filename):
"""Returns the parent dir of the dir where the file lives.
"""
filename = os.path.abspath(filename)
return os.path.dirname(os.path.dirname(filename))
def main(args):
estimator_files = locate_estimator_files(args['--dir'])
input_files = [os.path.join(parent_dir_of_parent_dir(f), 'input.log')
for f in estimator_files]
keys_to_watch = ['t', 'tp', 'U', 'J1', 'J2', 'Kring', 'numberOfSites']
for pair in izip(estimator_files, input_files):
reader = InputFileReader(keys_to_watch)
reader.read(pair[1])
reader.prepend_data_to_file(pair[0])
if __name__ == '__main__':
args = docopt(__doc__, version = 0.1)
main(args)
|
mit
| -7,388,700,737,663,430,000 | 37.634921 | 79 | 0.732128 | false |
metamath/set.mm
|
scripts/iset-match.py
|
1
|
1795
|
#!/usr/bin/env python3
# iset-match.py: Report where iset.mm statements differ from set.mm.
# Author: David A. Wheeler
# SPDX-License-Identifier: MIT
import os,re
# Generate list of statements in set.mm and iset.mm.
os.system("metamath 'read set.mm' 'set width 9999' 'show statement *' quit > ,set-mm-statements")
os.system("metamath 'read iset.mm' 'set width 9999' 'show statement *' quit > ,iset-mm-statements")
# The lines we want have this form:
# 70 mpd $p |- ( ph -> ch ) $= ... $.
# with a beginning number, label, $[aep], and statement.
useful = re.compile(r'[0-9]+ ([^ ]+) (\$[aep]) (.*)')
# Utility functions to clean up statements.
# https://stackoverflow.com/questions/3663450/python-remove-substring-only-at-the-end-of-string
def rchop(thestring, ending):
if thestring.endswith(ending):
return thestring[:-len(ending)]
return thestring
def cleanup_expr(expr):
t1 = rchop(expr, ' $= ... $.')
t2 = rchop(t1, ' $.')
return t2.strip()
setmm_statements = {}
# Read set.mm statement list
with open(',set-mm-statements', 'r') as setmm:
for line in setmm:
# print(line)
res = useful.match(line)
if res:
label = res[1]
expr = cleanup_expr(res[3])
# print(label + ' ' + expr)
setmm_statements[label] = expr
# print(setmm_statements)
# Read iset.mm statement list, report ones differing from set.mm.
with open(',iset-mm-statements', 'r') as isetmm:
for line in isetmm:
# print(line)
res = useful.match(line)
if res:
label = res[1]
label_type = res[2]
expr = cleanup_expr(res[3])
# print(label + ' ' + expr)
if label in setmm_statements and setmm_statements[label] != expr:
print('{} {}: {} DIFFERENT_FROM {}'.format(
label, label_type, setmm_statements[label], expr))
|
cc0-1.0
| 8,621,906,787,565,850,000 | 29.948276 | 99 | 0.640111 | false |
SlicerRt/SlicerDebuggingTools
|
PyDevRemoteDebug/ptvsd-4.1.3/ptvsd/_local.py
|
1
|
4791
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import sys
import time
import pydevd
from _pydevd_bundle.pydevd_comm import get_global_debugger
from ptvsd.pydevd_hooks import install
from ptvsd.runner import run as no_debug_runner
from ptvsd.socket import Address
from ptvsd._util import new_hidden_thread
PYDEVD_DEFAULTS = {
'--qt-support=auto',
}
def _set_pydevd_defaults(pydevd_args):
args_to_append = []
for arg in PYDEVD_DEFAULTS:
if arg not in pydevd_args:
args_to_append.append(arg)
return pydevd_args + args_to_append
########################
# high-level functions
def debug_main(address, name, kind, *extra, **kwargs):
if not kwargs.pop('wait', False) and address.isserver:
def unblock_debugger():
debugger = get_global_debugger()
while debugger is None:
time.sleep(0.1)
debugger = get_global_debugger()
debugger.ready_to_run = True
new_hidden_thread('ptvsd.unblock_debugger', unblock_debugger).start()
if kind == 'module':
run_module(address, name, *extra, **kwargs)
else:
run_file(address, name, *extra, **kwargs)
def run_main(address, name, kind, *extra, **kwargs):
addr = Address.from_raw(address)
sys.argv[:] = _run_main_argv(name, extra)
runner = kwargs.pop('_runner', no_debug_runner)
runner(addr, name, kind == 'module', *extra, **kwargs)
########################
# low-level functions
def run_module(address, modname, *extra, **kwargs):
"""Run pydevd for the given module."""
addr = Address.from_raw(address)
if not addr.isserver:
kwargs['singlesession'] = True
run = kwargs.pop('_run', _run)
prog = kwargs.pop('_prog', sys.argv[0])
filename = modname + ':'
argv = _run_argv(addr, filename, extra, _prog=prog)
argv.insert(argv.index('--file'), '--module')
run(argv, addr, **kwargs)
def run_file(address, filename, *extra, **kwargs):
"""Run pydevd for the given Python file."""
addr = Address.from_raw(address)
if not addr.isserver:
kwargs['singlesession'] = True
run = kwargs.pop('_run', _run)
prog = kwargs.pop('_prog', sys.argv[0])
argv = _run_argv(addr, filename, extra, _prog=prog)
run(argv, addr, **kwargs)
def _run_argv(address, filename, extra, _prog=sys.argv[0]):
"""Convert the given values to an argv that pydevd.main() supports."""
if '--' in extra:
pydevd = list(extra[:extra.index('--')])
extra = list(extra[len(pydevd) + 1:])
else:
pydevd = []
extra = list(extra)
pydevd = _set_pydevd_defaults(pydevd)
host, port = address
argv = [
_prog,
'--port', str(port),
]
if not address.isserver:
argv.extend([
'--client', host or 'localhost',
])
return argv + pydevd + [
'--file', filename,
] + extra
def _run_main_argv(filename, extra):
if '--' in extra:
pydevd = list(extra[:extra.index('--')])
extra = list(extra[len(pydevd) + 1:])
else:
extra = list(extra)
return [filename] + extra
def _run(argv, addr, _pydevd=pydevd, _install=install, **kwargs):
"""Start pydevd with the given commandline args."""
#print(' '.join(argv))
# Pydevd assumes that the "__main__" module is the "pydevd" module
# and does some tricky stuff under that assumption. For example,
# when the debugger starts up it calls save_main_module()
# (in pydevd_bundle/pydevd_utils.py). That function explicitly sets
# sys.modules["pydevd"] to sys.modules["__main__"] and then sets
# the __main__ module to a new one. This makes some sense since
# it gives the debugged script a fresh __main__ module.
#
# This complicates things for us since we are running a different
# file (i.e. this one) as the __main__ module. Consequently,
# sys.modules["pydevd"] gets set to ptvsd/__main__.py. Subsequent
# imports of the "pydevd" module then return the wrong module. We
# work around this by avoiding lazy imports of the "pydevd" module.
# We also replace the __main__ module with the "pydevd" module here.
if sys.modules['__main__'].__file__ != _pydevd.__file__:
sys.modules['__main___orig'] = sys.modules['__main__']
sys.modules['__main__'] = _pydevd
daemon = _install(_pydevd, addr, **kwargs)
sys.argv[:] = argv
try:
_pydevd.main()
except SystemExit as ex:
daemon.exitcode = int(ex.code)
raise
|
bsd-3-clause
| -6,504,013,464,289,144,000 | 31.503497 | 77 | 0.594448 | false |
saisankargochhayat/algo_quest
|
leetcode/1423. Maximum Points You Can Obtain from Cards/soln.py
|
1
|
1199
|
# Get prefix sum array, find n-k window for which n-k is minimum.
class Solution:
def maxScore(self, cardPoints: List[int], k: int) -> int:
n = len(cardPoints)
pSum = [0] * n
pSum[0] = cardPoints[0]
for i in range(1, n):
pSum[i] = cardPoints[i] + pSum[i-1]
wSize = n-k # window size to minimize
minNK = pSum[wSize-1]
# We minimize n-k
for i in range(wSize, n):
minNK = min(minNK, pSum[i]-pSum[i-wSize])
return pSum[n-1] - minNK
# Times out, but works.
from functools import lru_cache
class Solution:
def maxScore(self, cardPoints: List[int], k: int) -> int:
# Helps find the maximum score.
@lru_cache(maxsize=None)
def helper(l, r, k, score):
# print(cardPoints[l:r+1], k, score)
# Base condition
if k == 0:
return score
pickingLeft = helper(l+1, r, k-1, score+cardPoints[l])
pickingRight = helper(l, r-1, k-1, score+cardPoints[r])
return max(pickingLeft, pickingRight)
return helper(0, len(cardPoints)-1, k, 0)
|
apache-2.0
| -272,638,181,483,588,160 | 32.305556 | 76 | 0.527106 | false |
robintw/Py6S
|
tests/test_profiles.py
|
1
|
19426
|
# This file is part of Py6S.
#
# Copyright 2012 Robin Wilson and contributors listed in the CONTRIBUTORS file.
#
# Py6S is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Py6S is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Py6S. If not, see <http://www.gnu.org/licenses/>.
import unittest
from Py6S import AeroProfile, AtmosProfile, ParameterError, SixS
class AtmosProfileTests(unittest.TestCase):
def test_atmos_profile(self):
aps = [
AtmosProfile.Tropical,
AtmosProfile.NoGaseousAbsorption,
AtmosProfile.UserWaterAndOzone(0.9, 3),
]
results = [0.2723143, 0.2747224, 0.2476101]
for i in range(len(aps)):
s = SixS()
s.atmos_profile = aps[i]
s.run()
self.assertAlmostEqual(
s.outputs.apparent_reflectance,
results[i],
msg="Error in atmos profile with ID %s. Got %f, expected %f."
% (str(aps[i]), s.outputs.apparent_reflectance, results[i]),
delta=0.002,
)
def test_from_lat_and_date(self):
ap = AtmosProfile.FromLatitudeAndDate(53, "2015-07-14")
assert ap == AtmosProfile.PredefinedType(AtmosProfile.SubarcticSummer)
class AeroProfileTests(unittest.TestCase):
def test_aero_profile(self):
user_ap = AeroProfile.UserProfile(AeroProfile.Maritime)
user_ap.add_layer(5, 0.34)
aps = [
AeroProfile.Continental,
AeroProfile.NoAerosols,
AeroProfile.User(dust=0.3, oceanic=0.7),
user_ap,
]
results = [122.854, 140.289, 130.866, 136.649]
for i in range(len(aps)):
s = SixS()
s.aero_profile = aps[i]
s.run()
self.assertAlmostEqual(
s.outputs.apparent_radiance,
results[i],
"Error in aerosol profile with ID %s. Got %f, expected %f."
% (str(aps[i]), s.outputs.apparent_radiance, results[i]),
delta=0.002,
)
def test_aero_profile_errors(self):
with self.assertRaises(ParameterError):
AeroProfile.User(dust=0.8, oceanic=0.4)
def test_sun_photo_dist_errors1(self):
with self.assertRaises(ParameterError):
# Different numbers of elements for first two arguments
AeroProfile.SunPhotometerDistribution(
[
0.050000001,
0.065604001,
0.086076997,
0.112939,
0.148184001,
0.194428995,
0.255104989,
0.334715992,
0.439173013,
0.576227009,
0.756052017,
0.99199599,
1.30157101,
1.707757,
2.24070191,
2.93996596,
3.85745192,
5.06126022,
6.64074516,
8.71314526,
],
[
0.001338098,
0.007492487,
0.026454749,
0.058904506,
0.082712278,
0.073251031,
0.040950641,
0.014576218,
0.003672085,
0.001576356,
0.002422644,
0.004472982,
0.007452302,
0.011037065,
0.014523974,
0.016981738,
0.017641816,
0.016284294,
0.01335547,
0.009732267,
0.006301342,
0.003625077,
],
[1.47] * 20,
[0.0093] * 20,
)
def test_sun_photo_dist_errors2(self):
with self.assertRaises(ParameterError):
# Different numbers of elements for first two arguments
AeroProfile.SunPhotometerDistribution(
[
0.050000001,
0.065604001,
0.086076997,
0.112939,
0.148184001,
0.194428995,
0.255104989,
0.334715992,
0.439173013,
0.576227009,
0.756052017,
0.99199599,
1.30157101,
1.707757,
2.24070191,
2.93996596,
3.85745192,
5.06126022,
6.64074516,
8.71314526,
11.4322901,
15,
],
[
0.001338098,
0.007492487,
0.026454749,
0.058904506,
0.082712278,
0.073251031,
0.040950641,
0.014576218,
0.003672085,
0.001576356,
0.002422644,
0.004472982,
0.007452302,
0.011037065,
0.014523974,
0.016981738,
0.017641816,
0.016284294,
0.01335547,
0.009732267,
0.006301342,
0.003625077,
],
[1.47] * 15,
[0.0093] * 20,
)
def test_sun_photo_dist_errors3(self):
# Different numbers of elements for first two arguments
ap1 = AeroProfile.SunPhotometerDistribution(
[
0.050000001,
0.065604001,
0.086076997,
0.112939,
0.148184001,
0.194428995,
0.255104989,
0.334715992,
0.439173013,
0.576227009,
0.756052017,
0.99199599,
1.30157101,
1.707757,
2.24070191,
2.93996596,
3.85745192,
5.06126022,
6.64074516,
8.71314526,
11.4322901,
15,
],
[
0.001338098,
0.007492487,
0.026454749,
0.058904506,
0.082712278,
0.073251031,
0.040950641,
0.014576218,
0.003672085,
0.001576356,
0.002422644,
0.004472982,
0.007452302,
0.011037065,
0.014523974,
0.016981738,
0.017641816,
0.016284294,
0.01335547,
0.009732267,
0.006301342,
0.003625077,
],
[1.47] * 20,
[2.3] * 20,
)
ap2 = AeroProfile.SunPhotometerDistribution(
[
0.050000001,
0.065604001,
0.086076997,
0.112939,
0.148184001,
0.194428995,
0.255104989,
0.334715992,
0.439173013,
0.576227009,
0.756052017,
0.99199599,
1.30157101,
1.707757,
2.24070191,
2.93996596,
3.85745192,
5.06126022,
6.64074516,
8.71314526,
11.4322901,
15,
],
[
0.001338098,
0.007492487,
0.026454749,
0.058904506,
0.082712278,
0.073251031,
0.040950641,
0.014576218,
0.003672085,
0.001576356,
0.002422644,
0.004472982,
0.007452302,
0.011037065,
0.014523974,
0.016981738,
0.017641816,
0.016284294,
0.01335547,
0.009732267,
0.006301342,
0.003625077,
],
1.47,
2.3,
)
self.assertEqual(ap1, ap2)
def test_multimodal_dist_errors1(self):
with self.assertRaises(ParameterError):
ap = AeroProfile.MultimodalLogNormalDistribution(0.001, 20)
# Add > 4 components
ap.add_component(
0.05,
2.03,
0.538,
[
1.508,
1.500,
1.500,
1.500,
1.500,
1.500,
1.500,
1.500,
1.495,
1.490,
1.490,
1.490,
1.486,
1.480,
1.470,
1.460,
1.456,
1.443,
1.430,
1.470,
],
[
3.24e-07,
3.0e-08,
2.86e-08,
2.51e-08,
2.2e-08,
2.0e-08,
1.0e-08,
1.0e-08,
1.48e-08,
2.0e-08,
6.85e-08,
1.0e-07,
1.25e-06,
3.0e-06,
3.5e-04,
6.0e-04,
6.86e-04,
1.7e-03,
4.0e-03,
1.4e-03,
],
)
ap.add_component(
0.0695,
2.03,
0.457,
[
1.452,
1.440,
1.438,
1.433,
1.432,
1.431,
1.431,
1.430,
1.429,
1.429,
1.429,
1.428,
1.427,
1.425,
1.411,
1.401,
1.395,
1.385,
1.364,
1.396,
],
[
1.0e-08,
1.0e-08,
1.0e-08,
1.0e-08,
1.0e-08,
1.0e-08,
1.0e-08,
1.0e-08,
1.38e-08,
1.47e-08,
1.68e-08,
1.93e-08,
4.91e-08,
1.75e-07,
9.66e-06,
1.94e-04,
3.84e-04,
1.12e-03,
2.51e-03,
1.31e-01,
],
)
ap.add_component(
0.4,
2.03,
0.005,
[
1.508,
1.500,
1.500,
1.500,
1.500,
1.500,
1.500,
1.500,
1.495,
1.490,
1.490,
1.490,
1.486,
1.480,
1.470,
1.460,
1.456,
1.443,
1.430,
1.470,
],
[
3.24e-07,
3.0e-08,
2.86e-08,
2.51e-08,
2.2e-08,
2.0e-08,
1.0e-08,
1.0e-08,
1.48e-08,
2.0e-08,
6.85e-08,
1.0e-07,
1.25e-06,
3.0e-06,
3.5e-04,
6.0e-04,
6.86e-04,
1.7e-03,
4.0e-03,
1.4e-03,
],
)
ap.add_component(
0.4,
2.03,
0.005,
[
1.508,
1.500,
1.500,
1.500,
1.500,
1.500,
1.500,
1.500,
1.495,
1.490,
1.490,
1.490,
1.486,
1.480,
1.470,
1.460,
1.456,
1.443,
1.430,
1.470,
],
[
3.24e-07,
3.0e-08,
2.86e-08,
2.51e-08,
2.2e-08,
2.0e-08,
1.0e-08,
1.0e-08,
1.48e-08,
2.0e-08,
6.85e-08,
1.0e-07,
1.25e-06,
3.0e-06,
3.5e-04,
6.0e-04,
6.86e-04,
1.7e-03,
4.0e-03,
1.4e-03,
],
)
ap.add_component(
0.4,
2.03,
0.005,
[
1.508,
1.500,
1.500,
1.500,
1.500,
1.500,
1.500,
1.500,
1.495,
1.490,
1.490,
1.490,
1.486,
1.480,
1.470,
1.460,
1.456,
1.443,
1.430,
1.470,
],
[
3.24e-07,
3.0e-08,
2.86e-08,
2.51e-08,
2.2e-08,
2.0e-08,
1.0e-08,
1.0e-08,
1.48e-08,
2.0e-08,
6.85e-08,
1.0e-07,
1.25e-06,
3.0e-06,
3.5e-04,
6.0e-04,
6.86e-04,
1.7e-03,
4.0e-03,
1.4e-03,
],
)
def test_multimodal_dist_errors2(self):
with self.assertRaises(ParameterError):
ap = AeroProfile.MultimodalLogNormalDistribution(0.001, 20)
ap.add_component(
0.05,
2.03,
0.538,
[
1.508,
1.500,
1.500,
1.500,
1.500,
1.500,
1.500,
1.500,
1.495,
1.490,
1.490,
1.490,
1.486,
1.480,
1.470,
1.460,
1.456,
1.443,
1.430,
1.470,
],
[
3.24e-07,
3.0e-08,
2.86e-08,
2.51e-08,
2.2e-08,
2.0e-08,
1.0e-08,
1.0e-08,
1.48e-08,
2.0e-08,
6.85e-08,
1.0e-07,
1.25e-06,
3.0e-06,
3.5e-04,
6.0e-04,
6.86e-04,
],
)
def test_multimodal_dist_errors3(self):
with self.assertRaises(ParameterError):
ap = AeroProfile.MultimodalLogNormalDistribution(0.001, 20)
ap.add_component(
0.4,
2.03,
0.005,
[
1.508,
1.500,
1.500,
1.500,
1.500,
1.500,
1.500,
1.500,
1.495,
1.490,
1.490,
1.490,
1.486,
1.480,
1.470,
1.460,
1.456,
1.443,
1.430,
1.470,
1.999,
1.999,
0,
],
[
3.24e-07,
3.0e-08,
2.86e-08,
2.51e-08,
2.2e-08,
2.0e-08,
1.0e-08,
1.0e-08,
1.48e-08,
2.0e-08,
6.85e-08,
1.0e-07,
1.25e-06,
3.0e-06,
3.5e-04,
6.0e-04,
6.86e-04,
1.7e-03,
4.0e-03,
1.4e-03,
],
)
def test_running_multiple_add_components(self):
s = SixS()
real_intp = [0.0] * 20
imag_intp = [0.0] * 20
# Running these lines more than twice used to give an error
for i in range(4):
s.aeroprofile = AeroProfile.MultimodalLogNormalDistribution(0.085, 2.9)
s.aeroprofile.add_component(
rmean=2.65,
sigma=0.62,
percentage_density=0.093,
refr_real=real_intp,
refr_imag=imag_intp,
)
|
lgpl-3.0
| -2,371,384,820,571,932,700 | 27.95082 | 83 | 0.304952 | false |
johnnoone/json-spec
|
tests/test_commands.py
|
1
|
4664
|
"""
tests.tests_cli
~~~~~~~~~~~~~~~
"""
import pytest
from subprocess import Popen, PIPE
from jsonspec import cli
import json
from . import move_cwd
def runner(cmd, args, success, result):
try:
args = cmd.parse_args(args)
response = cmd(args)
if not success:
raise Exception('error expected', response)
except Exception as error:
if success:
raise Exception('success expected', error)
else:
assert json.loads(response) == result
scenarii = [
# Pointer scenarii
# inline json
("""json extract '#/foo/1' --document-json='{"foo": ["bar", "baz"]}'""", True),
("""echo '{"foo": ["bar", "baz"]}' | json extract '#/foo/1'""", True),
("""json extract '#/foo/2' --document-json='{"foo": ["bar", "baz"]}'""", False),
("""echo '{"foo": ["bar", "baz"]}' | json extract '#/foo/2'""", False),
# existant file
("""cat fixtures/first.data1.json | json extract '#/name'""", True),
("""json extract '#/name' < fixtures/first.data1.json""", True),
("""json extract '#/name' --document-file=fixtures/first.data1.json""", True),
# existant file, but pointer does not match
("""cat fixtures/first.data1.json | json extract '#/foo/bar'""", False),
("""json extract '#/foo/bar' < fixtures/first.data1.json""", False),
("""json extract '#/foo/bar' --document-file=fixtures/first.data1.json""", False),
# inexistant file
("""json extract '#/foo/1' --document-file=doc.json""", False),
("""json extract '#/foo/1' < doc.json""", False),
("""cat doc.json | json extract '#/foo/1'""", False),
# Schema scenarii
#
("""json validate --schema-file=fixtures/three.schema.json < fixtures/three.data1.json""", False),
("""json validate --schema-file=fixtures/three.schema.json < fixtures/three.data2.json""", True),
]
@pytest.mark.parametrize('cmd, success', scenarii)
def test_cli(cmd, success):
with move_cwd():
proc = Popen(cmd, stderr=PIPE, stdout=PIPE, shell=True)
stdout, stderr = proc.communicate()
ret = proc.returncode
if success and ret > 0:
assert False, (ret, stdout, stderr)
if not success and ret == 0:
assert False, (ret, stdout, stderr)
add_scenes = [
('#/foo/bar', {'foo': 'bar'}, {'baz': 'quux'}, False, None),
('#/baz', {'foo': 'bar'}, 'quux', True, {'foo': 'bar', 'baz': 'quux'}),
]
@pytest.mark.parametrize('pointer, document, fragment, success, result',
add_scenes)
def test_cli_add(pointer, document, fragment, success, result):
doc, frag = json.dumps(document), json.dumps(fragment)
cmd = cli.AddCommand()
runner(cmd, [pointer, '--document-json', doc, '--fragment-json', frag],
success, result)
remove_scenes = [
('#/foo/bar', {'foo': 'bar'}, False, None),
('#/baz', {'foo': 'bar', 'baz': 'bar'}, True, {'foo': 'bar'}),
]
@pytest.mark.parametrize('pointer, document, success, result', remove_scenes)
def test_cli_remove(pointer, document, success, result):
doc = json.dumps(document)
cmd = cli.RemoveCommand()
runner(cmd, [pointer, '--document-json', doc], success, result)
replace_scenes = [
('#/bar', {'foo': 'bar'}, 'quux', False, None),
('#/foo', {'foo': 'bar'}, 'quux', True, {'foo': 'quux'}),
]
@pytest.mark.parametrize('pointer, document, fragment, success, result',
replace_scenes)
def test_cli_replace(pointer, document, fragment, success, result):
doc, frag = json.dumps(document), json.dumps(fragment)
cmd = cli.ReplaceCommand()
runner(cmd, [pointer, '--document-json', doc, '--fragment-json', frag],
success, result)
move_scenes = [
('#/bar', {'foo': 'bar'}, '#/foo', False, None),
('#/foo', {'foo': 'bar'}, '#/baz', True, {'baz': 'bar'}),
]
@pytest.mark.parametrize('pointer, document, target, success, result',
move_scenes)
def test_cli_move(pointer, document, target, success, result):
doc = json.dumps(document)
cmd = cli.MoveCommand()
runner(cmd, [pointer, '--document-json', doc, '--target-pointer', target],
success, result)
copy_scenes = [
('#/bar', {'foo': 'bar'}, '#/foo', False, None),
('#/foo', {'foo': 'bar'}, '#/baz', True, {'foo': 'bar', 'baz': 'bar'}),
]
@pytest.mark.parametrize('pointer, document, target, success, result',
copy_scenes)
def test_cli_copy(pointer, document, target, success, result):
doc = json.dumps(document)
cmd = cli.CopyCommand()
runner(cmd, [pointer, '--document-json', doc, '--target-pointer', target],
success, result)
|
bsd-3-clause
| -8,597,363,922,463,089,000 | 31.84507 | 102 | 0.582976 | false |
tinkerinestudio/Tinkerine-Suite
|
TinkerineSuite/Cura/cura_sf/fabmetheus_utilities/geometry/geometry_utilities/evaluate_elements/creation.py
|
1
|
1903
|
"""
Boolean geometry utilities.
"""
from __future__ import absolute_import
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities import archive
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def _getAccessibleAttribute(attributeName, elementNode):
'Get the accessible attribute.'
functionName = attributeName[len('get') :].lower()
if functionName not in evaluate.globalCreationDictionary:
print('Warning, functionName not in globalCreationDictionary in _getAccessibleAttribute in creation for:')
print(functionName)
print(elementNode)
return None
pluginModule = archive.getModuleWithPath(evaluate.globalCreationDictionary[functionName])
if pluginModule == None:
print('Warning, _getAccessibleAttribute in creation can not get a pluginModule for:')
print(functionName)
print(elementNode)
return None
return Creation(elementNode, pluginModule).getCreation
class Creation(object):
'Class to handle a creation.'
def __init__(self, elementNode, pluginModule):
'Initialize.'
self.elementNode = elementNode
self.pluginModule = pluginModule
def __repr__(self):
"Get the string representation of this creation."
return self.elementNode
def getCreation(self, *arguments):
"Get creation."
dictionary = {'_fromCreationEvaluator': 'true'}
firstArgument = None
if len(arguments) > 0:
firstArgument = arguments[0]
if firstArgument.__class__ == dict:
dictionary.update(firstArgument)
return self.pluginModule.getGeometryOutput(None, self.elementNode.getCopyShallow(dictionary))
copyShallow = self.elementNode.getCopyShallow(dictionary)
return self.pluginModule.getGeometryOutputByArguments(arguments, copyShallow)
|
agpl-3.0
| 4,311,426,377,472,465,400 | 32.982143 | 108 | 0.76721 | false |
gppg1994/Machine_Learning
|
Mail_Check/Main.py
|
1
|
1521
|
'''This automation helps you to automatically sign in to yahoo mail
without a single mouse/keyboard stroke. To run this piece of code successfully,
you need to have an existing yahoo account'''
from selenium import webdriver
import time
from Configure import Configure
def if_exists(idstr):
try:
elem = browser.find_element_by_class_name(idstr)
elem = elem.find_element_by_class_name('username')
elem.click()
flag = 1
except Exception as e:
flag = -1
return flag
try:
c = Configure()
chk = c.check()
if chk == -1:
chk = c.create()
if chk == 1:
print('Please logout from all of your account while configuring...')
time.sleep(3)
c.configure()
fp = open(r'config.dat', 'r')
[username, password] = fp.readline().split(',')
browser = webdriver.Edge()
browser.get(r'yahoo.co.in')
time.sleep(2)
elem = browser.find_element_by_id('uh-mail-link')
elem.click()
time.sleep(10)
chk = if_exists('account-card loggedOut')
time.sleep(5)
elem = browser.find_element_by_id('login-username')
elem.clear()
elem.send_keys(username)
elem = browser.find_element_by_id('login-signin')
elem.click()
time.sleep(5)
elem = browser.find_element_by_id('login-passwd')
elem.send_keys(password)
elem = browser.find_element_by_id('login-signin')
elem.submit()
except Exception as e:
print(str(e))
|
gpl-3.0
| -900,942,733,077,118,500 | 27.823529 | 80 | 0.610125 | false |
anybox/anybox.recipe.odoo
|
anybox/recipe/odoo/vcs/tests/test_bzr.py
|
1
|
21368
|
"""VCS tests: Bazaar."""
import os
import subprocess
from zc.buildout import UserError
from ..testing import COMMIT_USER_FULL
from ..testing import VcsTestCase
from ..bzr import BzrBranch
from ..bzr import working_directory_keeper
from ..base import UpdateError
from ..base import CloneError
class BzrBaseTestCase(VcsTestCase):
"""Common utilities for Bazaard test cases."""
def create_src(self):
os.chdir(self.src_dir)
subprocess.call(['bzr', 'init', 'src-branch'])
self.src_repo = os.path.join(self.src_dir, 'src-branch')
os.chdir(self.src_repo)
subprocess.call(['bzr', 'whoami', '--branch', COMMIT_USER_FULL])
f = open('tracked', 'w')
f.write("first" + os.linesep)
f.close()
subprocess.call(['bzr', 'add'])
subprocess.call(['bzr', 'commit', '-m', 'initial commit'])
f = open('tracked', 'w')
f.write("last" + os.linesep)
f.close()
subprocess.call(['bzr', 'commit', '-m', 'last version'])
def assertRevision(self, branch, rev, first_line, msg=None):
"""Assert that branch is at prescribed revision
:param branch: instance of :class:`BzrBranch` to work on
:param rev: revision number (revno)
:param first_line: expected first line of the 'tracked' file
:param msg: passed to underlying assertions
Double check with expected first line of 'tracked' file."""
target_dir = branch.target_dir
self.assertTrue(os.path.isdir(target_dir), msg=msg)
f = open(os.path.join(target_dir, 'tracked'))
lines = f.readlines()
f.close()
self.assertEquals(lines[0].strip(), first_line, msg=msg)
self.assertEquals(branch.parents(as_revno=True), [rev], msg=msg)
def assertRevision1(self, branch, **kw):
"""Assert that branch is at revision 1."""
self.assertRevision(branch, '1', 'first', **kw)
def assertRevision2(self, branch, **kw):
"""Assert that branch is at revision 2."""
self.assertRevision(branch, '2', 'last', **kw)
class BzrTestCase(BzrBaseTestCase):
def test_branch(self):
target_dir = os.path.join(self.dst_dir, "My branch")
branch = BzrBranch(target_dir, self.src_repo)
branch('last:1')
self.assertRevision2(branch)
def test_branch_on_revision_retry(self):
"""Test retry system if direct branching to revison fails."""
target_dir = os.path.join(self.dst_dir, "My branch")
branch = BzrBranch(target_dir, self.src_repo)
normal_method = branch._branch
monkey_called = []
def branch_no_rev(revision):
"""Monkey patch to simulate the error."""
monkey_called.append(revision)
if revision:
raise CloneError("fake branch cmd", 3)
return normal_method(revision)
branch._branch = branch_no_rev
branch('last:1')
# ensures that we actually did test something:
self.assertEqual(monkey_called, ['last:1', None])
self.assertRevision2(branch) # branching worked
def test_parents_revid(self):
target_dir = os.path.join(self.dst_dir, "My branch")
branch = BzrBranch(target_dir, self.src_repo)
branch('last:1')
self.assertRevision2(branch, msg="Test impaired by other problem")
parents = branch.parents()
self.assertEquals(len(parents), 1)
self.assertTrue(parents[0].startswith('revid:test@example.org-'),
msg="Result does not look to be a revid")
def test_parents_pip(self):
target_dir = os.path.join(self.dst_dir, "My branch")
branch = BzrBranch(target_dir, self.src_repo)
branch('last:1')
self.assertRevision2(branch, msg="Test impaired by other problem")
parents = branch.parents(pip_compatible=True)
self.assertEquals(parents, ['2'])
def test_branch_options_conflict(self):
target_dir = os.path.join(self.dst_dir, "My branch")
branch = BzrBranch(target_dir, self.src_repo,
**{'bzr-init': 'branch',
'bzr-stacked-branches': 'True'})
self.assertRaises(Exception, branch, "last:1")
def test_branch_bzr_init(self):
target_dir = os.path.join(self.dst_dir, "My branch")
branch = BzrBranch(target_dir, self.src_repo,
**{'bzr-init': 'branch'})
branch('last:1')
self.assertRevision2(branch)
def test_branch_stacked_deprecated(self):
target_dir = os.path.join(self.dst_dir, "My branch")
branch = BzrBranch(target_dir, self.src_repo,
**{'bzr-stacked-branches': 'True'})
branch('last:1')
self.assertRevision2(branch)
def test_branch_stacked(self):
target_dir = os.path.join(self.dst_dir, "My branch")
branch = BzrBranch(target_dir, self.src_repo,
**{'bzr-init': 'stacked-branch'})
branch('last:1')
self.assertRevision2(branch)
def test_checkout_lightweight(self):
target_dir = os.path.join(self.dst_dir, "My branch")
branch = BzrBranch(target_dir, self.src_repo,
**{'bzr-init': 'lightweight-checkout'})
branch('1')
self.assertRevision1(branch)
branch('last:1')
self.assertRevision2(branch)
def test_branch_to_rev(self):
"""Directly clone and update to given revision."""
target_dir = os.path.join(self.dst_dir, "My branch")
branch = BzrBranch(target_dir, self.src_repo)
branch('1')
self.assertRevision1(branch)
def test_update(self):
"""Update to a revision that's not the latest available in target"""
target_dir = os.path.join(self.dst_dir, "clone to update")
branch = BzrBranch(target_dir, self.src_repo)('last:1')
# Testing starts here
branch = BzrBranch(target_dir, self.src_repo)
branch('1')
self.assertRevision1(branch)
def test_update_tag(self):
"""Update to an avalailable rev, identified by tag.
"""
with working_directory_keeper:
os.chdir(self.src_repo)
subprocess.check_call(['bzr', 'tag', '-r', '1', 'sometag'])
target_dir = os.path.join(self.dst_dir, "clone to update")
# Testing starts here
branch = BzrBranch(target_dir, self.src_repo)
branch('sometag')
self.assertRevision1(branch)
def test_update_needs_pull(self):
"""Update to a revision that needs to be pulled from target."""
target_dir = os.path.join(self.dst_dir, "clone to update")
branch = BzrBranch(target_dir, self.src_repo)('1')
# We really don't have rev 2 in branch
self.assertRaises(LookupError, branch.get_revid, '2')
branch('2')
self.assertRevision2(branch)
def test_update_revid_needs_pull(self):
"""Update to a rev that needs to be pulled from source, by revid."""
target_dir = os.path.join(self.dst_dir, "clone to update")
branch = BzrBranch(target_dir, self.src_repo)('1')
# We really don't have rev 2 in branch
self.assertRaises(LookupError, branch.get_revid, '2')
revid = BzrBranch(self.src_repo, self.src_repo).get_revid('2')
branch('revid:' + revid)
self.assertRevision2(branch)
def test_clean(self):
"""Test clean method, and esp. that no local mod occurs.
See launchpad: #1192973, bzr tracks empty directories. We must not
remove them.
"""
with working_directory_keeper:
os.chdir(self.src_repo)
os.mkdir('subdir')
subprocess.check_call(['bzr', 'add'])
subprocess.call(['bzr', 'whoami', '--branch', COMMIT_USER_FULL])
subprocess.check_call(['bzr', 'commit', '-m', "bidule"])
target_dir = os.path.join(self.dst_dir, "clone to clean")
branch = BzrBranch(target_dir, self.src_repo)
try:
branch.clean()
except:
self.fail("clean() should not fail if "
"branch not already done")
branch('last:1')
untracked = os.path.join(branch.target_dir, 'subdir', 'untracked.pyc')
with open(untracked, 'w') as f:
f.write('untracked content')
branch.clean()
self.assertFalse(os.path.exists(untracked))
with working_directory_keeper:
os.chdir(branch.target_dir)
bzr_status = subprocess.Popen(['bzr', 'status'],
stdout=subprocess.PIPE)
out = bzr_status.communicate()[0]
self.assertEquals(bzr_status.returncode, 0)
# output of 'bzr status' should be empty : neither unknown file nor
# any local modification, including removal of 'subdir'
self.assertEquals(out.strip(), '')
def test_uncommitted_changes_tracked(self):
target_dir = os.path.join(self.dst_dir, "clone to dirty")
branch = BzrBranch(target_dir, self.src_repo)('last:1')
self.assertFalse(branch.uncommitted_changes())
with open(os.path.join(target_dir, 'tracked'), 'w') as f:
f.write('some change')
self.assertTrue(branch.uncommitted_changes())
def test_uncommitted_changes_untracked(self):
target_dir = os.path.join(self.dst_dir, "clone to dirty")
branch = BzrBranch(target_dir, self.src_repo)('last:1')
self.assertFalse(branch.uncommitted_changes())
with open(os.path.join(target_dir, 'unknownfile'), 'w') as f:
f.write('some change')
self.assertTrue(branch.uncommitted_changes())
def test_revert(self):
target_dir = os.path.join(self.dst_dir, "clone to clean")
branch = BzrBranch(target_dir, self.src_repo)
branch('last:1')
path = os.path.join(target_dir, 'tracked')
with open(path, 'r') as f:
original = f.readlines()
with open(path, 'w') as f:
f.write('a local mod')
branch.revert('last:1')
with open(path, 'r') as f:
self.assertEqual(f.readlines(), original)
def test_archive(self):
target_dir = os.path.join(self.dst_dir, "clone to archive")
branch = BzrBranch(target_dir, self.src_repo)
branch('1')
archive_dir = os.path.join(self.dst_dir, "archive directory")
branch.archive(archive_dir)
with open(os.path.join(archive_dir, 'tracked')) as f:
self.assertEquals(f.readlines()[0].strip(), 'first')
def test_url_update(self):
"""Method to update branch.conf does it and stores old values"""
# Setting up a prior branch
target_dir = os.path.join(self.dst_dir, "clone to update")
branch = BzrBranch(target_dir, self.src_repo)
branch('1')
# src may have become relative, let's keep it in that form
old_src = branch.parse_conf()['parent_location']
# first rename.
# We test that pull actually works rather than
# just checking branch.conf to avoid logical loop testing nothing
new_src = os.path.join(self.src_dir, 'new-src-repo')
os.rename(self.src_repo, new_src)
branch = BzrBranch(target_dir, new_src)
branch('last:1')
self.assertEquals(branch.parse_conf(), dict(
buildout_save_parent_location_1=old_src,
parent_location=new_src))
# second rename, on a fixed revno. The pull should be issued in that
# case, even if we already have that revno in original source
# (see lp:1320198)
new_src2 = os.path.join(self.src_dir, 'new-src-repo2')
os.rename(new_src, new_src2)
branch = BzrBranch(target_dir, new_src2)
orig_pull = branch._pull
def logging_pull():
self.pulled = True
return orig_pull()
branch._pull = logging_pull
self.pulled = False
branch('1')
self.assertTrue(self.pulled)
self.assertEquals(branch.parse_conf(), dict(
buildout_save_parent_location_1=old_src,
buildout_save_parent_location_2=new_src,
parent_location=new_src2))
def test_url_update_1133248(self):
"""Method to update branch.conf is resilient wrt to actual content.
See lp:1133248 for details
"""
# Setting up a prior branch
target_dir = os.path.join(self.dst_dir, "clone to update")
branch = BzrBranch(target_dir, self.src_repo)
branch('1')
conf_path = os.path.join(target_dir, '.bzr', 'branch', 'branch.conf')
with open(conf_path, 'a') as conf:
conf.seek(0, os.SEEK_END)
conf.write(os.linesep + "Some other stuff" + os.linesep)
# src may have become relative, let's keep it in that form
old_src = branch.parse_conf()['parent_location']
# first rename.
# We test that pull actually works rather than
# just checking branch.conf to avoid logical loop testing nothing
new_src = os.path.join(self.src_dir, 'new-src-repo')
os.rename(self.src_repo, new_src)
branch = BzrBranch(target_dir, new_src)
branch('last:1')
self.assertEquals(branch.parse_conf(), dict(
buildout_save_parent_location_1=old_src,
parent_location=new_src))
def test_lp_url(self):
"""lp: locations are being rewritten to the actual target."""
branch = BzrBranch('', 'lp:anybox.recipe.openerp')
# just testing for now that it's been rewritten
self.failIf(branch.url.startswith('lp:'))
# checking idempotency of rewritting
branch2 = BzrBranch('', branch.url)
self.assertEquals(branch2.url, branch.url)
def test_lp_url_nobzrlib(self):
"""We can't safely handle lp: locations without bzrlib."""
from ... import vcs
save = vcs.bzr.LPDIR
vcs.bzr.LPDIR = None
self.assertRaises(RuntimeError, BzrBranch, '',
'lp:anybox.recipe.openerp')
vcs.bzr.LPDIR = save
def test_update_clear_locks(self):
"""Testing update with clear locks option."""
# Setting up a prior branch
target_dir = os.path.join(self.dst_dir, "clone to update")
BzrBranch(target_dir, self.src_repo)('last:1')
# Testing starts here
branch = BzrBranch(target_dir, self.src_repo, clear_locks=True)
branch('1')
self.assertRevision1(branch)
def test_failed(self):
target_dir = os.path.join(self.dst_dir, "My branch")
branch = BzrBranch(target_dir, '/does-not-exist')
self.assertRaises(subprocess.CalledProcessError,
branch.get_update, 'default')
def test_merge(self):
current = os.getcwd()
to_merge = os.path.join(self.dst_dir, "proposed branch")
BzrBranch(to_merge, self.src_repo)('last:1')
os.chdir(to_merge)
added_file = 'added'
f = open(added_file, 'w')
f.write("content" + os.linesep)
f.close()
subprocess.call(['bzr', 'add'])
subprocess.call(['bzr', 'whoami', '--branch', COMMIT_USER_FULL])
subprocess.call(['bzr', 'commit', '-m', 'poposal commit'])
target_dir = os.path.join(self.dst_dir, "branch with merge")
BzrBranch(target_dir, self.src_repo)('last:1')
BzrBranch(
target_dir, to_merge, **{'bzr-init': 'merge'})('last:1')
os.chdir(current)
self.assertTrue(os.path.exists(os.path.join(target_dir, added_file)))
class BzrOfflineTestCase(BzrBaseTestCase):
def make_local_branch(self, path, initial_rev, options=None):
"""Make a local branch of the source at initial_rev and forbid pulls.
"""
if options is None:
options = {}
target_dir = os.path.join(self.dst_dir, path)
# initial branching (non offline
build_branch = BzrBranch(target_dir, self.src_repo)
build_branch(initial_rev)
build_branch.update_conf() # just to get an absolute path in there
# crippled offline branch
branch = BzrBranch(target_dir, self.src_repo, offline=True, **options)
def _pull():
self.fail("Should not pull !")
branch._pull = _pull
return branch
def test_update_needs_pull(self):
"""[offline mode] updating to a non available rev raises UpdateError.
"""
branch = self.make_local_branch("clone to update", '1')
self.assertRaises(UpdateError, branch, '2')
def test_update_last(self):
"""[offline mode] update to a last:1 rev does nothing."""
branch = self.make_local_branch("clone to update", '1')
branch('last:1')
self.assertRevision1(branch)
def test_update_available_revno(self):
"""[offline mode] update to an available revno works"""
branch = self.make_local_branch("clone to update", 'last:1')
branch('1')
self.assertRevision1(branch)
def test_update_available_revno_url_change(self):
"""[offline mode] upd to an available revno with URL change is an error
"""
branch = self.make_local_branch("clone to update", 'last:1')
branch('1')
self.assertRevision1(branch, msg="Test is impaired")
new_branch = BzrBranch(branch.target_dir, 'http://other.url.example',
offline=True)
self.assertRaises(UserError, new_branch, '1')
# conf has not changed
self.assertEquals(new_branch.parse_conf(), branch.parse_conf())
def test_update_live_rev_url_change(self):
"""[offline mode] upd to a live revspec with URL change is an error
"""
branch = self.make_local_branch("clone to update", 'last:1')
branch('1')
self.assertRevision1(branch, msg="Test is impaired")
new_branch = BzrBranch(branch.target_dir, 'http://other.url.example',
offline=True)
self.assertRaises(UserError, new_branch, 'last:1')
self.assertEquals(new_branch.parse_conf(), branch.parse_conf())
def test_update_available_revid_url_change(self):
"""[offline mode] upd to an available revid with URL change is ok
"""
branch = self.make_local_branch("clone to update", 'last:1')
branch('1')
revid = branch.parents()[0]
self.assertTrue(revid.startswith, 'revid:')
new_branch = BzrBranch(branch.target_dir, 'http://other.url.example',
offline=True)
new_branch(revid)
self.assertRevision1(new_branch)
def test_update_available_revid(self):
"""[offline mode] update to an available revid works.
"""
branch = self.make_local_branch("clone to update", 'last:1')
revid = branch.get_revid('1')
branch('revid:' + revid)
self.assertRevision1(branch)
def test_update_available_tag_is_local_fixed_revision(self):
"""[offline mode] update to an available tag works.
"""
branch = self.make_local_branch("clone to update", 'last:1')
subprocess.check_call(['bzr', 'tag', '-r', '1', 'sometag'],
cwd=branch.target_dir)
branch('tag:sometag')
self.assertRevision1(branch)
self.assertTrue(branch.is_local_fixed_revision('tag:sometag'))
self.assertFalse(branch.is_local_fixed_revision('tag:unknown'))
self.assertFalse(branch.is_local_fixed_revision('-1'))
def test_lightweight_checkout_noupdate(self):
"""[offline mode] lightweight checkouts shall not be updated."""
branch = self.make_local_branch(
"clone to update", '1',
options={'bzr-init': 'lightweight-checkout'})
def _update(*a, **kw):
self.fail("Should not update !")
branch._update = _update
branch('last:1')
self.assertRevision1(branch)
def test_lightweight_checkout_noupdate_fixed_rev(self):
"""[offline mode] lightweight checkouts shall not be updated."""
branch = self.make_local_branch(
"clone to update", 'last:1',
options={'bzr-init': 'lightweight-checkout'})
def _update(*a, **kw):
self.fail("Should not update !")
branch._update = _update
branch('1')
self.assertRevision2(branch)
def test_lp_url_offline(self):
"""[offline mode] lp: locations are not to be resolved.
See lp:1249566, resolving lp: performs outgoing requests,
and resolving the remote URL is irrelevant anyway, since it won't
be used.
"""
brdir = os.path.join(self.dst_dir, 'lp_branch')
os.makedirs(os.path.join(brdir, '.bzr', 'branch'))
branch = BzrBranch(brdir, 'lp:something', offline=True)
self.assertEqual(branch.url, 'lp:something')
# making sure that the unresolved lp: location is not written
# to branch.conf
parent_loc = 'bzr+ssh://previously/resolved'
branch.write_conf(dict(parent_location=parent_loc))
branch.update_conf()
self.assertEqual(branch.parse_conf()['parent_location'], parent_loc)
|
agpl-3.0
| -7,794,256,435,786,472,000 | 37.850909 | 79 | 0.599588 | false |
ulfalizer/Kconfiglib
|
examples/find_symbol.py
|
1
|
3437
|
# Prints all menu nodes that reference a given symbol any of their properties
# or property conditions, along with their parent menu nodes.
#
# Usage:
#
# $ make [ARCH=<arch>] scriptconfig SCRIPT=Kconfiglib/examples/find_symbol.py SCRIPT_ARG=<name>
#
# Example output for SCRIPT_ARG=X86:
#
# Found 470 locations that reference X86:
#
# ========== Location 1 (init/Kconfig:1108) ==========
#
# config SGETMASK_SYSCALL
# bool
# prompt "sgetmask/ssetmask syscalls support" if EXPERT
# default PARISC || M68K || PPC || MIPS || X86 || SPARC || MICROBLAZE || SUPERH
# help
# sys_sgetmask and sys_ssetmask are obsolete system calls
# no longer supported in libc but still enabled by default in some
# architectures.
#
# If unsure, leave the default option here.
#
# ---------- Parent 1 (init/Kconfig:1077) ----------
#
# menuconfig EXPERT
# bool
# prompt "Configure standard kernel features (expert users)"
# select DEBUG_KERNEL
# help
# This option allows certain base kernel options and settings
# to be disabled or tweaked. This is for specialized
# environments which can tolerate a "non-standard" kernel.
# Only use this if you really know what you are doing.
#
# ---------- Parent 2 (init/Kconfig:39) ----------
#
# menu "General setup"
#
# ========== Location 2 (arch/Kconfig:29) ==========
#
# config OPROFILE_EVENT_MULTIPLEX
# bool
# prompt "OProfile multiplexing support (EXPERIMENTAL)"
# default "n"
# depends on OPROFILE && X86
# help
# The number of hardware counters is limited. The multiplexing
# feature enables OProfile to gather more events than counters
# are provided by the hardware. This is realized by switching
# between events at a user specified time interval.
#
# If unsure, say N.
#
# ---------- Parent 1 (arch/Kconfig:16) ----------
#
# config OPROFILE
# tristate
# prompt "OProfile system profiling"
# select RING_BUFFER
# select RING_BUFFER_ALLOW_SWAP
# depends on PROFILING && HAVE_OPROFILE
# help
# OProfile is a profiling system capable of profiling the
# whole system, include the kernel, kernel modules, libraries,
# and applications.
#
# If unsure, say N.
#
# ---------- Parent 2 (init/Kconfig:39) ----------
#
# menu "General setup"
#
# ... (tons more)
import sys
import kconfiglib
if len(sys.argv) < 3:
sys.exit('Pass symbol name (without "CONFIG_" prefix) with SCRIPT_ARG=<name>')
kconf = kconfiglib.Kconfig(sys.argv[1])
sym_name = sys.argv[2]
if sym_name not in kconf.syms:
print("No symbol {} exists in the configuration".format(sym_name))
sys.exit(0)
referencing = [node for node in kconf.node_iter()
if kconf.syms[sym_name] in node.referenced]
if not referencing:
print("No references to {} found".format(sym_name))
sys.exit(0)
print("Found {} locations that reference {}:\n"
.format(len(referencing), sym_name))
for i, node in enumerate(referencing, 1):
print("========== Location {} ({}:{}) ==========\n\n{}"
.format(i, node.filename, node.linenr, node))
# Print the parents of the menu node too
node = node.parent
parent_i = 1
while node is not kconf.top_node:
print("---------- Parent {} ({}:{}) ----------\n\n{}"
.format(parent_i, node.filename, node.linenr, node))
node = node.parent
parent_i += 1
|
isc
| 8,374,800,690,160,126,000 | 29.6875 | 97 | 0.629328 | false |
LazoCoder/Pokemon-Terminal
|
tests/test_broken.py
|
1
|
1183
|
#!/usr/bin/env python3
# To run the tests, use: python3 -m pytest --capture=sys
from pokemonterminal.database import Database
from tests.test_utils import expected_len
def broken_test_extra_length(region_name='extra'):
assert len(Database().get_extra()) == expected_len(region_name)
def broken_test_kanto_length(region_name='kanto'):
assert len(Database().get_kanto()) == expected_len(region_name)
def broken_test_johto_length(region_name='johto'):
assert len(Database().get_johto()) == expected_len(region_name)
def broken_test_hoenn_length(region_name='hoenn'):
assert len(Database().get_hoenn()) == expected_len(region_name)
def broken_test_sinnoh_length(region_name='sinnoh'):
assert len(Database().get_sinnoh()) == expected_len(region_name)
def broken_test_unova_length(region_name='unova'):
assert len(Database().get_unova()) == expected_len(region_name)
def broken_test_kalos_length(region_name='kalos'):
assert len(Database().get_kalos()) == expected_len(region_name)
def broken_test_all_length(region_name='all'):
expected = expected_len(region_name) + expected_len('extra')
assert len(Database().get_all()) == expected
|
gpl-3.0
| 5,885,401,772,013,508,000 | 29.333333 | 68 | 0.710904 | false |
CFIS-Octarine/octarine
|
src/daomop/wcs.py
|
1
|
12869
|
import warnings
from copy import deepcopy
from astropy.units import Quantity
import math
from astropy import wcs as astropy_wcs
from astropy import units
import numpy
import logging
__author__ = "David Rusk <drusk@uvic.ca>"
PI180 = 57.2957795130823208767981548141052
class WCS(astropy_wcs.WCS):
def __init__(self, header):
"""
Create the bits needed for working with sky2xy
"""
astropy_header = deepcopy(header)
del (astropy_header['PV*'])
with warnings.catch_warnings(record=True):
warnings.resetwarnings()
warnings.simplefilter(
"ignore", astropy_wcs.FITSFixedWarning, append=True)
super(WCS, self).__init__(astropy_header)
self.header = header
@property
def cd(self):
"""
CD Rotation matrix values.
"""
return parse_cd(self.header)
@property
def dc(self):
"""
CD Rotation matrix INVERTED i.e. []^-1
"""
return numpy.array(numpy.mat(self.cd).I)
@property
def pv(self):
"""
Array of PV keywords used for hi-odered astrogwyn mapping
"""
return parse_pv(self.header)
@property
def crpix1(self):
"""
1st reference coordinate
"""
return self.header['CRPIX1']
@property
def crpix2(self):
"""
2nd reference coordinate
"""
return self.header['CRPIX2']
@property
def crval1(self):
"""
Reference Coordinate of 1st reference pixel
"""
return self.header['CRVAL1']
@property
def crval2(self):
"""
Reference Coordinate of 2nd reference pixel
"""
return self.header['CRVAL2']
@property
def nord(self):
"""
The order of the PV fit, provided by astgwyn
"""
return self.header['NORDFIT']
def xy2sky(self, x, y, usepv=True):
if usepv:
try:
return xy2skypv(x=numpy.array(x), y=numpy.array(y),
crpix1=self.crpix1,
crpix2=self.crpix2,
crval1=self.crval1,
crval2=self.crval2,
cd=self.cd,
pv=self.pv,
nord=self.nord)
except Exception as ex:
logging.warning("Error {} {}".format(type(ex), ex))
logging.warning("Reverted to CD-Matrix WCS.")
xy = numpy.array([x, y]).transpose()
pos = self.wcs_pix2world(xy, 1).transpose()
return pos[0] * units.degree, pos[1] * units.degree
def sky2xy(self, ra, dec, usepv=True):
if isinstance(ra, Quantity):
ra = ra.to(units.degree).value
if isinstance(dec, Quantity):
dec = dec.to(units.degree).value
try:
if usepv:
return sky2xypv(ra=ra,
dec=dec,
crpix1=self.crpix1,
crpix2=self.crpix2,
crval1=self.crval1,
crval2=self.crval2,
dc=self.dc,
pv=self.pv,
nord=self.nord)
except Exception as ex:
logging.warning("sky2xy raised exception: {0}".format(ex))
logging.warning("Reverted to CD-Matrix WCS to convert: {0} {1} ".format(ra, dec))
pos = self.wcs_world2pix([[ra, dec], ], 1)
return pos[0][0], pos[0][1]
def all_pix2world(self, *args, **kwargs):
logging.warning("Trying the Monkey Patched WCS function.")
logging.warning("args: {}".format(args))
logging.warning("kwargs: {}".format(kwargs))
try:
super(WCS, self).all_pix2world(*args, **kwargs)
except Exception as ex:
logging.warning("all_pix2world raised exception: {0}".format(ex))
self.xy2sky(*args, **kwargs)
def sky2xypv(ra, dec, crpix1, crpix2, crval1, crval2, dc, pv, nord, maxiter=300):
"""
Transforms from celestial coordinates to pixel coordinates to taking
non-linear distortion into account with the World Coordinate System
FITS keywords as used in MegaPipe.
For the inverse operation see xy2sky.
Reference material:
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/megapipe/docs/CD_PV_keywords.pdf
Args:
ra: float
Right ascension
dec: float
Declination
crpix1: float
Tangent point x, pixels
crpix2: float
Tangent point y, pixels
crval1: float
Tangent point RA, degrees
crval2:
Tangent point Dec, degrees
dc: 2d array
Expresses the scale, the rotation and any possible skew of the image
with respect to the sky. It is the inverse of the cd matrix in xy2sky.
pv: 2d array
nord: int
order of the fit
Returns:
x, y: float
Pixel coordinates
:param nord:
:param pv:
:param dc:
:param ra:
:param dec:
:param crpix1:
:param crpix2:
:param crval1:
:param crval2:
:param maxiter:
"""
if math.fabs(ra - crval1) > 100:
if crval1 < 180:
ra -= 360
else:
ra += 360
ra /= PI180
dec /= PI180
tdec = math.tan(dec)
ra0 = crval1 / PI180
dec0 = crval2 / PI180
ctan = math.tan(dec0)
ccos = math.cos(dec0)
traoff = math.tan(ra - ra0)
craoff = math.cos(ra - ra0)
etar = (1 - ctan * craoff / tdec) / (ctan + craoff / tdec)
xir = traoff * ccos * (1 - etar * ctan)
xi = xir * PI180
eta = etar * PI180
if nord < 0:
# The simple solution
x = xi
y = eta
else:
# Reverse by Newton's method
tolerance = 0.001 / 3600
# Initial guess
x = xi
y = eta
iteration = 0
converged = False
while not converged:
assert nord >= 0
# Estimates
f = pv[0][0]
g = pv[1][0]
# Derivatives
fx = 0
fy = 0
gx = 0
gy = 0
if nord >= 1:
r = math.sqrt(x ** 2 + y ** 2)
f += pv[0][1] * x + pv[0][2] * y + pv[0][3] * r
g += pv[1][1] * y + pv[1][2] * x + pv[1][3] * r
fx += pv[0][1] + pv[0][3] * x / r
fy += pv[0][2] + pv[0][3] * y / r
gx += pv[1][2] + pv[1][3] * x / r
gy += pv[1][1] + pv[1][3] * y / r
if nord >= 2:
x2 = x ** 2
xy = x * y
y2 = y ** 2
f += pv[0][4] * x2 + pv[0][5] * xy + pv[0][6] * y2
g += pv[1][4] * y2 + pv[1][5] * xy + pv[1][6] * x2
fx += pv[0][4] * 2 * x + pv[0][5] * y
fy += pv[0][5] * x + pv[0][6] * 2 * y
gx += pv[1][5] * y + pv[1][6] * 2 * x
gy += pv[1][4] * 2 * y + pv[1][5] * x
if nord >= 3:
x3 = x ** 3
x2y = x2 * y
xy2 = x * y2
y3 = y ** 3
f += pv[0][7] * x3 + pv[0][8] * x2y + pv[0][9] * xy2 + pv[0][10] * y3
g += pv[1][7] * y3 + pv[1][8] * xy2 + pv[1][9] * x2y + pv[1][10] * x3
fx += pv[0][7] * 3 * x2 + pv[0][8] * 2 * xy + pv[0][9] * y2
fy += pv[0][8] * x2 + pv[0][9] * 2 * xy + pv[0][10] * 3 * y2
gx += pv[0][8] * y2 + pv[1][9] * 2 * xy + pv[1][10] * 3 * x2
gy += pv[1][7] * 3 * y2 + pv[0][8] * 2 * xy + pv[1][9] * x2
f -= xi
g -= eta
dx = (-f * gy + g * fy) / (fx * gy - fy * gx)
dy = (-g * fx + f * gx) / (fx * gy - fy * gx)
x += dx
y += dy
if math.fabs(dx) < tolerance and math.fabs(dy) < tolerance:
converged = True
iteration += 1
if iteration > maxiter:
break
xp = dc[0][0] * x + dc[0][1] * y
yp = dc[1][0] * x + dc[1][1] * y
x = xp + crpix1
y = yp + crpix2
return x, y
def xy2skypv(x, y, crpix1, crpix2, crval1, crval2, cd, pv, nord):
"""
Transforms from pixel coordinates to celestial coordinates taking
non-linear distortion into account with the World Coordinate System
FITS keywords as used in MegaPipe.
For the inverse operation see sky2xy
Reference material:
http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/megapipe/docs/CD_PV_keywords.pdf
Args:
x, y: float
Input pixel coordinate
crpix1: float
Tangent point x, pixels
crpix2: float
Tangent point y, pixels
crval1: float
Tangent point RA, degrees
crval2:
Tangent point Dec, degrees
cd: 2d array
Expresses the scale, the rotation and any possible skew of the image
with respect to the sky.
pv: 2d array
nord: int
order of the fit
Returns:
ra: float
Right ascension
dec: float
Declination
:param nord:
:param pv:
:param cd:
:param crval2:
:param crval1:
:param crpix2:
:param crpix1:
:param y:
:param x:
"""
xp = x - crpix1
yp = y - crpix2
# IMPORTANT NOTE: 0-based indexing in Python means indexing for values
# in cd and pv will be shifted from in the paper.
x_deg = cd[0][0] * xp + cd[0][1] * yp
y_deg = cd[1][0] * xp + cd[1][1] * yp
if nord < 0:
xi = x
eta = y
else:
xi = pv[0][0]
eta = pv[1][0]
if nord >= 1:
r = numpy.sqrt(x_deg ** 2 + y_deg ** 2)
xi += pv[0][1] * x_deg + pv[0][2] * y_deg + pv[0][3] * r
eta += pv[1][1] * y_deg + pv[1][2] * x_deg + pv[1][3] * r
if nord >= 2:
x2 = x_deg ** 2
xy = x_deg * y_deg
y2 = y_deg ** 2
xi += pv[0][4] * x2 + pv[0][5] * xy + pv[0][6] * y2
eta += pv[1][4] * y2 + pv[1][5] * xy + pv[1][6] * x2
if nord >= 3:
x3 = x_deg ** 3
x2y = x2 * y_deg
xy2 = x_deg * y2
y3 = y_deg ** 3
xi += pv[0][7] * x3 + pv[0][8] * x2y + pv[0][9] * xy2 + pv[0][10] * y3
eta += pv[1][7] * y3 + pv[1][8] * xy2 + pv[1][9] * x2y + pv[1][10] * x3
xir = xi / PI180
etar = eta / PI180
ra0 = crval1 / PI180
dec0 = crval2 / PI180
ctan = numpy.tan(dec0)
ccos = numpy.cos(dec0)
raoff = numpy.arctan2(xir / ccos, 1 - etar * ctan)
ra = raoff + ra0
dec = numpy.arctan(numpy.cos(raoff) / ((1 - (etar * ctan)) / (etar + ctan)))
ra *= PI180
ra = numpy.where(ra < 0, ra + 360, ra)
ra = numpy.where(ra > 360, ra - 360, ra)
# f = numpy.int(ra < 0) + 360
# print f
# if ra < 0:
# ra += 360
# if ra >= 360:
# ra -= 360
dec *= PI180
return ra * units.degree, dec * units.degree
def parse_cd(header):
"""
Parses the CD array from an astropy FITS header.
Args:
header: astropy.io.fits.header.Header
The header containing the CD values.
Returns:
cd: 2d array (list(list(float))
[[CD1_1, CD1_2], [CD2_1, CD2_2]]
"""
return [[header["CD1_1"], header["CD1_2"]],
[header["CD2_1"], header["CD2_2"]]]
def parse_pv(header):
"""
Parses the PV array from an astropy FITS header.
Args:
header: astropy.io.fits.header.Header
The header containing the PV values.
Returns:
cd: 2d array (list(list(float))
[[PV1_0, PV1_1, ... PV1_N], [PV2_0, PV2_1, ... PV2_N]]
Note that N depends on the order of the fit. For example, an
order 3 fit goes up to PV?_10.
"""
order_fit = parse_order_fit(header)
def parse_with_base(i):
key_base = "PV%d_" % i
pvi_x = [header[key_base + "0"]]
def parse_range(lower, upper):
for j in range(lower, upper + 1):
pvi_x.append(header[key_base + str(j)])
if order_fit >= 1:
parse_range(1, 3)
if order_fit >= 2:
parse_range(4, 6)
if order_fit >= 3:
parse_range(7, 10)
return pvi_x
return [parse_with_base(1), parse_with_base(2)]
def parse_order_fit(header):
"""
Parses the order of the fit for PV terms.
Args:
header: astropy.io.fits.header.Header
The header containing the PV values.
Returns:
order_fit: int
The order of the fit.
"""
return int(header["NORDFIT"])
|
gpl-3.0
| 2,123,899,054,085,605,400 | 26.794816 | 93 | 0.477737 | false |
afrubin/FQRead
|
docs/conf.py
|
1
|
8421
|
# -*- coding: utf-8 -*-
#
# FQRead documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 6 13:49:02 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FQRead'
copyright = u'2014, Alan F. Rubin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FQReaddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'FQRead.tex', u'FQRead Documentation',
u'Alan F. Rubin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fqread', u'FQRead Documentation',
[u'Alan F. Rubin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FQRead', u'FQRead Documentation',
u'Alan F. Rubin', 'FQRead', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
mit
| 7,981,866,955,841,524,000 | 30.421642 | 79 | 0.705261 | false |
googleapis/java-bigquery
|
synth.py
|
1
|
1127
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool.languages.java as java
AUTOSYNTH_MULTIPLE_COMMITS = True
java.common_templates(excludes=[
'.kokoro/continuous/java8-samples.cfg',
'.kokoro/continuous/java11-samples.cfg',
'.kokoro/nightly/java8-samples.cfg',
'.kokoro/nightly/java11-samples.cfg',
'.kokoro/nightly/samples.cfg',
'.kokoro/presubmit/java8-samples.cfg',
'.kokoro/presubmit/java11-samples.cfg',
'.kokoro/dependencies.sh',
'codecov.yaml',
'renovate.json',
])
|
apache-2.0
| 7,252,506,326,706,707,000 | 34.21875 | 74 | 0.735581 | false |
mosra/m.css
|
plugins/m/link.py
|
1
|
2187
|
#
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019, 2020 Vladimír Vondruš <mosra@centrum.cz>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import re
from docutils import nodes, utils
from docutils.parsers import rst
from docutils.parsers.rst.roles import set_classes
# to avoid dependencies, link_regexp and parse_link() is common for m.abbr,
# m.gh, m.gl, m.link and m.vk
link_regexp = re.compile(r'(?P<title>.*) <(?P<link>.+)>')
def parse_link(text):
link = utils.unescape(text)
m = link_regexp.match(link)
if m: return m.group('title', 'link')
return None, link
def link(name, rawtext, text, lineno, inliner, options={}, content=[]):
title, url = parse_link(text)
if not title: title = url
# TODO: mailto URLs, internal links (need to gut out docutils for that)
set_classes(options)
node = nodes.reference(rawtext, title, refuri=url, **options)
return [node], []
def register_mcss(**kwargs):
rst.roles.register_local_role('link', link)
# Below is only Pelican-specific functionality. If Pelican is not found, these
# do nothing.
register = register_mcss # for Pelican
|
mit
| -436,906,536,956,411,650 | 38.709091 | 78 | 0.722527 | false |
dagwieers/ansible
|
lib/ansible/modules/cloud/google/gcp_storage_bucket.py
|
1
|
44498
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_storage_bucket
description:
- The Buckets resource represents a bucket in Google Cloud Storage. There is a single
global namespace shared by all buckets. For more information, see Bucket Name Requirements.
- Buckets contain objects which can be accessed by their own methods. In addition
to the acl property, buckets contain bucketAccessControls, for use in fine-grained
manipulation of an existing bucket's access controls.
- A bucket is always owned by the project team owners group.
short_description: Creates a GCP Bucket
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
acl:
description:
- Access controls on the bucket.
required: false
suboptions:
bucket:
description:
- The name of the bucket.
- 'This field represents a link to a Bucket resource in GCP. It can be specified
in two ways. First, you can place in the name of the resource here as a
string Alternatively, you can add `register: name-of-resource` to a gcp_storage_bucket
task and then set this bucket field to "{{ name-of-resource }}"'
required: true
entity:
description:
- 'The entity holding the permission, in one of the following forms: user-userId
user-email group-groupId group-email domain-domain project-team-projectId
allUsers allAuthenticatedUsers Examples: The user liz@example.com would
be user-liz@example.com.'
- The group example@googlegroups.com would be group-example@googlegroups.com.
- To refer to all members of the Google Apps for Business domain example.com,
the entity would be domain-example.com.
required: true
entity_id:
description:
- The ID for the entity.
required: false
project_team:
description:
- The project team associated with the entity.
required: false
suboptions:
project_number:
description:
- The project team associated with the entity.
required: false
team:
description:
- The team.
required: false
choices:
- editors
- owners
- viewers
role:
description:
- The access permission for the entity.
required: false
choices:
- OWNER
- READER
- WRITER
cors:
description:
- The bucket's Cross-Origin Resource Sharing (CORS) configuration.
required: false
suboptions:
max_age_seconds:
description:
- The value, in seconds, to return in the Access-Control-Max-Age header used
in preflight responses.
required: false
method:
description:
- 'The list of HTTP methods on which to include CORS response headers, (GET,
OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and means
"any method".'
required: false
origin:
description:
- The list of Origins eligible to receive CORS response headers.
- 'Note: "*" is permitted in the list of origins, and means "any Origin".'
required: false
response_header:
description:
- The list of HTTP headers other than the simple response headers to give
permission for the user-agent to share across domains.
required: false
default_object_acl:
description:
- Default access controls to apply to new objects when no ACL is provided.
required: false
version_added: 2.7
suboptions:
bucket:
description:
- The name of the bucket.
- 'This field represents a link to a Bucket resource in GCP. It can be specified
in two ways. First, you can place in the name of the resource here as a
string Alternatively, you can add `register: name-of-resource` to a gcp_storage_bucket
task and then set this bucket field to "{{ name-of-resource }}"'
required: true
entity:
description:
- 'The entity holding the permission, in one of the following forms: * user-{{userId}}
* user-{{email}} (such as "user-liz@example.com") * group-{{groupId}} *
group-{{email}} (such as "group-example@googlegroups.com") * domain-{{domain}}
(such as "domain-example.com") * project-team-{{projectId}} * allUsers *
allAuthenticatedUsers .'
required: true
object:
description:
- The name of the object, if applied to an object.
required: false
role:
description:
- The access permission for the entity.
required: true
choices:
- OWNER
- READER
lifecycle:
description:
- The bucket's lifecycle configuration.
- See U(https://developers.google.com/storage/docs/lifecycle) for more information.
required: false
suboptions:
rule:
description:
- A lifecycle management rule, which is made of an action to take and the
condition(s) under which the action will be taken.
required: false
suboptions:
action:
description:
- The action to take.
required: false
suboptions:
storage_class:
description:
- Target storage class. Required iff the type of the action is SetStorageClass.
required: false
type:
description:
- Type of the action. Currently, only Delete and SetStorageClass are
supported.
required: false
choices:
- Delete
- SetStorageClass
condition:
description:
- The condition(s) under which the action will be taken.
required: false
suboptions:
age_days:
description:
- Age of an object (in days). This condition is satisfied when an
object reaches the specified age.
required: false
created_before:
description:
- A date in RFC 3339 format with only the date part (for instance,
"2013-01-15"). This condition is satisfied when an object is created
before midnight of the specified date in UTC.
required: false
is_live:
description:
- Relevant only for versioned objects. If the value is true, this
condition matches live objects; if the value is false, it matches
archived objects.
required: false
type: bool
matches_storage_class:
description:
- Objects having any of the storage classes specified by this condition
will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE,
COLDLINE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.
required: false
num_newer_versions:
description:
- Relevant only for versioned objects. If the value is N, this condition
is satisfied when there are at least N versions (including the live
version) newer than this version of the object.
required: false
location:
description:
- The location of the bucket. Object data for objects in the bucket resides in
physical storage within this region. Defaults to US. See the developer's guide
for the authoritative list.
required: false
logging:
description:
- The bucket's logging configuration, which defines the destination bucket and
optional name prefix for the current bucket's logs.
required: false
suboptions:
log_bucket:
description:
- The destination bucket where the current bucket's logs should be placed.
required: false
log_object_prefix:
description:
- A prefix for log object names.
required: false
metageneration:
description:
- The metadata generation of this bucket.
required: false
name:
description:
- The name of the bucket.
required: false
owner:
description:
- The owner of the bucket. This is always the project team's owner group.
required: false
suboptions:
entity:
description:
- The entity, in the form project-owner-projectId.
required: false
storage_class:
description:
- The bucket's default storage class, used whenever no storageClass is specified
for a newly-created object. This defines how objects in the bucket are stored
and determines the SLA and the cost of storage.
- Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY.
If this value is not specified when the bucket is created, it will default to
STANDARD. For more information, see storage classes.
required: false
choices:
- MULTI_REGIONAL
- REGIONAL
- STANDARD
- NEARLINE
- COLDLINE
- DURABLE_REDUCED_AVAILABILITY
versioning:
description:
- The bucket's versioning configuration.
required: false
suboptions:
enabled:
description:
- While set to true, versioning is fully enabled for this bucket.
required: false
type: bool
website:
description:
- The bucket's website configuration, controlling how the service behaves when
accessing bucket contents as a web site. See the Static Website Examples for
more information.
required: false
suboptions:
main_page_suffix:
description:
- If the requested object path is missing, the service will ensure the path
has a trailing '/', append this suffix, and attempt to retrieve the resulting
object. This allows the creation of index.html objects to represent directory
pages.
required: false
not_found_page:
description:
- If the requested object path is missing, and any mainPageSuffix object is
missing, if applicable, the service will return the named object from this
bucket as the content for a 404 Not Found result.
required: false
project:
description:
- A valid API project identifier.
required: false
predefined_default_object_acl:
description:
- Apply a predefined set of default object access controls to this bucket.
- 'Acceptable values are: - "authenticatedRead": Object owner gets OWNER access,
and allAuthenticatedUsers get READER access.'
- '- "bucketOwnerFullControl": Object owner gets OWNER access, and project team
owners get OWNER access.'
- '- "bucketOwnerRead": Object owner gets OWNER access, and project team owners
get READER access.'
- '- "private": Object owner gets OWNER access.'
- '- "projectPrivate": Object owner gets OWNER access, and project team members
get access according to their roles.'
- '- "publicRead": Object owner gets OWNER access, and allUsers get READER access.'
required: false
choices:
- authenticatedRead
- bucketOwnerFullControl
- bucketOwnerRead
- private
- projectPrivate
- publicRead
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a bucket
gcp_storage_bucket:
name: ansible-storage-module
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
acl:
description:
- Access controls on the bucket.
returned: success
type: complex
contains:
bucket:
description:
- The name of the bucket.
returned: success
type: str
domain:
description:
- The domain associated with the entity.
returned: success
type: str
email:
description:
- The email address associated with the entity.
returned: success
type: str
entity:
description:
- 'The entity holding the permission, in one of the following forms: user-userId
user-email group-groupId group-email domain-domain project-team-projectId
allUsers allAuthenticatedUsers Examples: The user liz@example.com would be
user-liz@example.com.'
- The group example@googlegroups.com would be group-example@googlegroups.com.
- To refer to all members of the Google Apps for Business domain example.com,
the entity would be domain-example.com.
returned: success
type: str
entityId:
description:
- The ID for the entity.
returned: success
type: str
id:
description:
- The ID of the access-control entry.
returned: success
type: str
projectTeam:
description:
- The project team associated with the entity.
returned: success
type: complex
contains:
projectNumber:
description:
- The project team associated with the entity.
returned: success
type: str
team:
description:
- The team.
returned: success
type: str
role:
description:
- The access permission for the entity.
returned: success
type: str
cors:
description:
- The bucket's Cross-Origin Resource Sharing (CORS) configuration.
returned: success
type: complex
contains:
maxAgeSeconds:
description:
- The value, in seconds, to return in the Access-Control-Max-Age header used
in preflight responses.
returned: success
type: int
method:
description:
- 'The list of HTTP methods on which to include CORS response headers, (GET,
OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and means
"any method".'
returned: success
type: list
origin:
description:
- The list of Origins eligible to receive CORS response headers.
- 'Note: "*" is permitted in the list of origins, and means "any Origin".'
returned: success
type: list
responseHeader:
description:
- The list of HTTP headers other than the simple response headers to give permission
for the user-agent to share across domains.
returned: success
type: list
defaultObjectAcl:
description:
- Default access controls to apply to new objects when no ACL is provided.
returned: success
type: complex
contains:
bucket:
description:
- The name of the bucket.
returned: success
type: str
domain:
description:
- The domain associated with the entity.
returned: success
type: str
email:
description:
- The email address associated with the entity.
returned: success
type: str
entity:
description:
- 'The entity holding the permission, in one of the following forms: * user-{{userId}}
* user-{{email}} (such as "user-liz@example.com") * group-{{groupId}} * group-{{email}}
(such as "group-example@googlegroups.com") * domain-{{domain}} (such as "domain-example.com")
* project-team-{{projectId}} * allUsers * allAuthenticatedUsers .'
returned: success
type: str
entityId:
description:
- The ID for the entity.
returned: success
type: str
generation:
description:
- The content generation of the object, if applied to an object.
returned: success
type: int
id:
description:
- The ID of the access-control entry.
returned: success
type: str
object:
description:
- The name of the object, if applied to an object.
returned: success
type: str
projectTeam:
description:
- The project team associated with the entity.
returned: success
type: complex
contains:
projectNumber:
description:
- The project team associated with the entity.
returned: success
type: str
team:
description:
- The team.
returned: success
type: str
role:
description:
- The access permission for the entity.
returned: success
type: str
id:
description:
- The ID of the bucket. For buckets, the id and name properities are the same.
returned: success
type: str
lifecycle:
description:
- The bucket's lifecycle configuration.
- See U(https://developers.google.com/storage/docs/lifecycle) for more information.
returned: success
type: complex
contains:
rule:
description:
- A lifecycle management rule, which is made of an action to take and the condition(s)
under which the action will be taken.
returned: success
type: complex
contains:
action:
description:
- The action to take.
returned: success
type: complex
contains:
storageClass:
description:
- Target storage class. Required iff the type of the action is SetStorageClass.
returned: success
type: str
type:
description:
- Type of the action. Currently, only Delete and SetStorageClass are
supported.
returned: success
type: str
condition:
description:
- The condition(s) under which the action will be taken.
returned: success
type: complex
contains:
ageDays:
description:
- Age of an object (in days). This condition is satisfied when an object
reaches the specified age.
returned: success
type: int
createdBefore:
description:
- A date in RFC 3339 format with only the date part (for instance, "2013-01-15").
This condition is satisfied when an object is created before midnight
of the specified date in UTC.
returned: success
type: str
isLive:
description:
- Relevant only for versioned objects. If the value is true, this condition
matches live objects; if the value is false, it matches archived objects.
returned: success
type: bool
matchesStorageClass:
description:
- Objects having any of the storage classes specified by this condition
will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE,
COLDLINE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.
returned: success
type: list
numNewerVersions:
description:
- Relevant only for versioned objects. If the value is N, this condition
is satisfied when there are at least N versions (including the live
version) newer than this version of the object.
returned: success
type: int
location:
description:
- The location of the bucket. Object data for objects in the bucket resides in physical
storage within this region. Defaults to US. See the developer's guide for the
authoritative list.
returned: success
type: str
logging:
description:
- The bucket's logging configuration, which defines the destination bucket and optional
name prefix for the current bucket's logs.
returned: success
type: complex
contains:
logBucket:
description:
- The destination bucket where the current bucket's logs should be placed.
returned: success
type: str
logObjectPrefix:
description:
- A prefix for log object names.
returned: success
type: str
metageneration:
description:
- The metadata generation of this bucket.
returned: success
type: int
name:
description:
- The name of the bucket.
returned: success
type: str
owner:
description:
- The owner of the bucket. This is always the project team's owner group.
returned: success
type: complex
contains:
entity:
description:
- The entity, in the form project-owner-projectId.
returned: success
type: str
entityId:
description:
- The ID for the entity.
returned: success
type: str
projectNumber:
description:
- The project number of the project the bucket belongs to.
returned: success
type: int
storageClass:
description:
- The bucket's default storage class, used whenever no storageClass is specified
for a newly-created object. This defines how objects in the bucket are stored
and determines the SLA and the cost of storage.
- Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY.
If this value is not specified when the bucket is created, it will default to
STANDARD. For more information, see storage classes.
returned: success
type: str
timeCreated:
description:
- The creation time of the bucket in RFC 3339 format.
returned: success
type: str
updated:
description:
- The modification time of the bucket in RFC 3339 format.
returned: success
type: str
versioning:
description:
- The bucket's versioning configuration.
returned: success
type: complex
contains:
enabled:
description:
- While set to true, versioning is fully enabled for this bucket.
returned: success
type: bool
website:
description:
- The bucket's website configuration, controlling how the service behaves when accessing
bucket contents as a web site. See the Static Website Examples for more information.
returned: success
type: complex
contains:
mainPageSuffix:
description:
- If the requested object path is missing, the service will ensure the path
has a trailing '/', append this suffix, and attempt to retrieve the resulting
object. This allows the creation of index.html objects to represent directory
pages.
returned: success
type: str
notFoundPage:
description:
- If the requested object path is missing, and any mainPageSuffix object is
missing, if applicable, the service will return the named object from this
bucket as the content for a 404 Not Found result.
returned: success
type: str
project:
description:
- A valid API project identifier.
returned: success
type: str
predefinedDefaultObjectAcl:
description:
- Apply a predefined set of default object access controls to this bucket.
- 'Acceptable values are: - "authenticatedRead": Object owner gets OWNER access,
and allAuthenticatedUsers get READER access.'
- '- "bucketOwnerFullControl": Object owner gets OWNER access, and project team
owners get OWNER access.'
- '- "bucketOwnerRead": Object owner gets OWNER access, and project team owners
get READER access.'
- '- "private": Object owner gets OWNER access.'
- '- "projectPrivate": Object owner gets OWNER access, and project team members
get access according to their roles.'
- '- "publicRead": Object owner gets OWNER access, and allUsers get READER access.'
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
acl=dict(
type='list',
elements='dict',
options=dict(
bucket=dict(required=True),
entity=dict(required=True, type='str'),
entity_id=dict(type='str'),
project_team=dict(
type='dict', options=dict(project_number=dict(type='str'), team=dict(type='str', choices=['editors', 'owners', 'viewers']))
),
role=dict(type='str', choices=['OWNER', 'READER', 'WRITER']),
),
),
cors=dict(
type='list',
elements='dict',
options=dict(
max_age_seconds=dict(type='int'),
method=dict(type='list', elements='str'),
origin=dict(type='list', elements='str'),
response_header=dict(type='list', elements='str'),
),
),
default_object_acl=dict(
type='list',
elements='dict',
options=dict(
bucket=dict(required=True),
entity=dict(required=True, type='str'),
object=dict(type='str'),
role=dict(required=True, type='str', choices=['OWNER', 'READER']),
),
),
lifecycle=dict(
type='dict',
options=dict(
rule=dict(
type='list',
elements='dict',
options=dict(
action=dict(
type='dict', options=dict(storage_class=dict(type='str'), type=dict(type='str', choices=['Delete', 'SetStorageClass']))
),
condition=dict(
type='dict',
options=dict(
age_days=dict(type='int'),
created_before=dict(type='str'),
is_live=dict(type='bool'),
matches_storage_class=dict(type='list', elements='str'),
num_newer_versions=dict(type='int'),
),
),
),
)
),
),
location=dict(type='str'),
logging=dict(type='dict', options=dict(log_bucket=dict(type='str'), log_object_prefix=dict(type='str'))),
metageneration=dict(type='int'),
name=dict(type='str'),
owner=dict(type='dict', options=dict(entity=dict(type='str'))),
storage_class=dict(type='str', choices=['MULTI_REGIONAL', 'REGIONAL', 'STANDARD', 'NEARLINE', 'COLDLINE', 'DURABLE_REDUCED_AVAILABILITY']),
versioning=dict(type='dict', options=dict(enabled=dict(type='bool'))),
website=dict(type='dict', options=dict(main_page_suffix=dict(type='str'), not_found_page=dict(type='str'))),
project=dict(type='str'),
predefined_default_object_acl=dict(
type='str', choices=['authenticatedRead', 'bucketOwnerFullControl', 'bucketOwnerRead', 'private', 'projectPrivate', 'publicRead']
),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/devstorage.full_control']
state = module.params['state']
kind = 'storage#bucket'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'storage')
return return_if_object(module, auth.post(link, resource_to_request(module)), kind)
def update(module, link, kind):
auth = GcpSession(module, 'storage')
return return_if_object(module, auth.put(link, resource_to_request(module)), kind)
def delete(module, link, kind):
auth = GcpSession(module, 'storage')
return return_if_object(module, auth.delete(link), kind)
def resource_to_request(module):
request = {
u'kind': 'storage#bucket',
u'project': module.params.get('project'),
u'predefinedDefaultObjectAcl': module.params.get('predefined_default_object_acl'),
u'acl': BucketAclArray(module.params.get('acl', []), module).to_request(),
u'cors': BucketCorsArray(module.params.get('cors', []), module).to_request(),
u'defaultObjectAcl': BucketDefaultobjectaclArray(module.params.get('default_object_acl', []), module).to_request(),
u'lifecycle': BucketLifecycle(module.params.get('lifecycle', {}), module).to_request(),
u'location': module.params.get('location'),
u'logging': BucketLogging(module.params.get('logging', {}), module).to_request(),
u'metageneration': module.params.get('metageneration'),
u'name': module.params.get('name'),
u'owner': BucketOwner(module.params.get('owner', {}), module).to_request(),
u'storageClass': module.params.get('storage_class'),
u'versioning': BucketVersioning(module.params.get('versioning', {}), module).to_request(),
u'website': BucketWebsite(module.params.get('website', {}), module).to_request(),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'storage')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/storage/v1/b/{name}?projection=full".format(**module.params)
def collection(module):
return "https://www.googleapis.com/storage/v1/b?project={project}".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'acl': BucketAclArray(response.get(u'acl', []), module).from_response(),
u'cors': BucketCorsArray(response.get(u'cors', []), module).from_response(),
u'defaultObjectAcl': BucketDefaultobjectaclArray(module.params.get('default_object_acl', []), module).to_request(),
u'id': response.get(u'id'),
u'lifecycle': BucketLifecycle(response.get(u'lifecycle', {}), module).from_response(),
u'location': response.get(u'location'),
u'logging': BucketLogging(response.get(u'logging', {}), module).from_response(),
u'metageneration': response.get(u'metageneration'),
u'name': response.get(u'name'),
u'owner': BucketOwner(response.get(u'owner', {}), module).from_response(),
u'projectNumber': response.get(u'projectNumber'),
u'storageClass': response.get(u'storageClass'),
u'timeCreated': response.get(u'timeCreated'),
u'updated': response.get(u'updated'),
u'versioning': BucketVersioning(response.get(u'versioning', {}), module).from_response(),
u'website': BucketWebsite(response.get(u'website', {}), module).from_response(),
}
class BucketAclArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'bucket': replace_resource_dict(item.get(u'bucket', {}), 'name'),
u'entity': item.get('entity'),
u'entityId': item.get('entity_id'),
u'projectTeam': BucketProjectteam(item.get('project_team', {}), self.module).to_request(),
u'role': item.get('role'),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{
u'bucket': item.get(u'bucket'),
u'entity': item.get(u'entity'),
u'entityId': item.get(u'entityId'),
u'projectTeam': BucketProjectteam(item.get(u'projectTeam', {}), self.module).from_response(),
u'role': item.get(u'role'),
}
)
class BucketProjectteam(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'projectNumber': self.request.get('project_number'), u'team': self.request.get('team')})
def from_response(self):
return remove_nones_from_dict({u'projectNumber': self.request.get(u'projectNumber'), u'team': self.request.get(u'team')})
class BucketCorsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'maxAgeSeconds': item.get('max_age_seconds'),
u'method': item.get('method'),
u'origin': item.get('origin'),
u'responseHeader': item.get('response_header'),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{
u'maxAgeSeconds': item.get(u'maxAgeSeconds'),
u'method': item.get(u'method'),
u'origin': item.get(u'origin'),
u'responseHeader': item.get(u'responseHeader'),
}
)
class BucketDefaultobjectaclArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'bucket': replace_resource_dict(item.get(u'bucket', {}), 'name'),
u'entity': item.get('entity'),
u'object': item.get('object'),
u'role': item.get('role'),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{u'bucket': item.get(u'bucket'), u'entity': item.get(u'entity'), u'object': item.get(u'object'), u'role': item.get(u'role')}
)
class BucketProjectteam(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'projectNumber': self.request.get('project_number'), u'team': self.request.get('team')})
def from_response(self):
return remove_nones_from_dict({u'projectNumber': self.request.get(u'projectNumber'), u'team': self.request.get(u'team')})
class BucketLifecycle(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'rule': BucketRuleArray(self.request.get('rule', []), self.module).to_request()})
def from_response(self):
return remove_nones_from_dict({u'rule': BucketRuleArray(self.request.get(u'rule', []), self.module).from_response()})
class BucketRuleArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict(
{
u'action': BucketAction(item.get('action', {}), self.module).to_request(),
u'condition': BucketCondition(item.get('condition', {}), self.module).to_request(),
}
)
def _response_from_item(self, item):
return remove_nones_from_dict(
{
u'action': BucketAction(item.get(u'action', {}), self.module).from_response(),
u'condition': BucketCondition(item.get(u'condition', {}), self.module).from_response(),
}
)
class BucketAction(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'storageClass': self.request.get('storage_class'), u'type': self.request.get('type')})
def from_response(self):
return remove_nones_from_dict({u'storageClass': self.request.get(u'storageClass'), u'type': self.request.get(u'type')})
class BucketCondition(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'age': self.request.get('age_days'),
u'createdBefore': self.request.get('created_before'),
u'isLive': self.request.get('is_live'),
u'matchesStorageClass': self.request.get('matches_storage_class'),
u'numNewerVersions': self.request.get('num_newer_versions'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'age': self.request.get(u'age'),
u'createdBefore': self.request.get(u'createdBefore'),
u'isLive': self.request.get(u'isLive'),
u'matchesStorageClass': self.request.get(u'matchesStorageClass'),
u'numNewerVersions': self.request.get(u'numNewerVersions'),
}
)
class BucketLogging(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'logBucket': self.request.get('log_bucket'), u'logObjectPrefix': self.request.get('log_object_prefix')})
def from_response(self):
return remove_nones_from_dict({u'logBucket': self.request.get(u'logBucket'), u'logObjectPrefix': self.request.get(u'logObjectPrefix')})
class BucketOwner(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'entity': self.request.get('entity')})
def from_response(self):
return remove_nones_from_dict({u'entity': self.request.get(u'entity')})
class BucketVersioning(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'enabled': self.request.get('enabled')})
def from_response(self):
return remove_nones_from_dict({u'enabled': self.request.get(u'enabled')})
class BucketWebsite(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'mainPageSuffix': self.request.get('main_page_suffix'), u'notFoundPage': self.request.get('not_found_page')})
def from_response(self):
return remove_nones_from_dict({u'mainPageSuffix': self.request.get(u'mainPageSuffix'), u'notFoundPage': self.request.get(u'notFoundPage')})
if __name__ == '__main__':
main()
|
gpl-3.0
| -5,727,255,997,407,053,000 | 34.120758 | 151 | 0.593802 | false |
TheModMaker/shaka-player
|
build/check.py
|
1
|
6755
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is used to validate that the library is correct.
This checks:
* All files in lib/ appear when compiling +@complete
* Runs a compiler pass over the test code to check for type errors
* Run the linter to check for style violations.
"""
import argparse
import logging
import os
import re
import sys
import build
import shakaBuildHelpers
def get_lint_files():
"""Returns the absolute paths to all the files to run the linter over."""
match = re.compile(r'.*\.js$')
base = shakaBuildHelpers.get_source_base()
def get(arg):
return shakaBuildHelpers.get_all_files(os.path.join(base, arg), match)
return get('test') + get('lib') + get('externs') + get('demo')
def check_js_lint(args):
"""Runs the JavaScript linter."""
# TODO: things not enforced: property doc requirements
logging.info('Running eslint...')
eslint = shakaBuildHelpers.get_node_binary('eslint')
cmd_line = eslint + get_lint_files()
if args.fix:
cmd_line += ['--fix']
return shakaBuildHelpers.execute_get_code(cmd_line) == 0
def check_html_lint(_):
"""Runs the HTML linter over the HTML files.
Returns:
True on success, False on failure.
"""
logging.info('Running htmlhint...')
htmlhint = shakaBuildHelpers.get_node_binary('htmlhint')
base = shakaBuildHelpers.get_source_base()
files = ['index.html', 'demo/index.html', 'support.html']
file_paths = [os.path.join(base, x) for x in files]
config_path = os.path.join(base, '.htmlhintrc')
cmd_line = htmlhint + ['--config=' + config_path] + file_paths
return shakaBuildHelpers.execute_get_code(cmd_line) == 0
def check_complete(_):
"""Checks whether the 'complete' build references every file.
This is used by the build script to ensure that every file is included in at
least one build type.
Returns:
True on success, False on failure.
"""
logging.info('Checking that the build files are complete...')
complete = build.Build()
# Normally we don't need to include @core, but because we look at the build
# object directly, we need to include it here. When using main(), it will
# call addCore which will ensure core is included.
if not complete.parse_build(['+@complete', '+@core'], os.getcwd()):
logging.error('Error parsing complete build')
return False
match = re.compile(r'.*\.js$')
base = shakaBuildHelpers.get_source_base()
all_files = shakaBuildHelpers.get_all_files(os.path.join(base, 'lib'), match)
missing_files = set(all_files) - complete.include
if missing_files:
logging.error('There are files missing from the complete build:')
for missing in missing_files:
# Convert to a path relative to source base.
logging.error(' ' + os.path.relpath(missing, base))
return False
return True
def check_tests(_):
"""Runs an extra compile pass over the test code to check for type errors.
Returns:
True on success, False on failure.
"""
logging.info('Checking the tests for type errors...')
match = re.compile(r'.*\.js$')
base = shakaBuildHelpers.get_source_base()
def get(*args):
return shakaBuildHelpers.get_all_files(os.path.join(base, *args), match)
files = set(get('lib') + get('externs') + get('test') +
get('third_party', 'closure'))
files.add(os.path.join(base, 'demo', 'common', 'assets.js'))
test_build = build.Build(files)
closure_opts = build.common_closure_opts + build.common_closure_defines
closure_opts += build.debug_closure_opts + build.debug_closure_defines
# Ignore missing goog.require since we assume the whole library is
# already included.
closure_opts += [
'--jscomp_off=missingRequire', '--jscomp_off=strictMissingRequire',
'--checks-only', '-O', 'SIMPLE'
]
return test_build.build_raw(closure_opts)
def check_externs(_):
"""Runs an extra compile pass over the generated externs to ensure that they
are usable.
Returns:
True on success, False on failure.
"""
logging.info('Checking the usability of generated externs...')
# Create a complete "build" object.
externs_build = build.Build()
if not externs_build.parse_build(['+@complete'], os.getcwd()):
return False
externs_build.add_core()
# Use it to generate externs for the next check.
if not externs_build.generate_externs('check'):
return False
# Create a custom "build" object, add all manually-written externs, then add
# the generated externs we just generated.
source_base = shakaBuildHelpers.get_source_base()
manual_externs = shakaBuildHelpers.get_all_files(
os.path.join(source_base, 'externs'), re.compile(r'.*\.js$'))
generated_externs = os.path.join(
source_base, 'dist', 'shaka-player.check.externs.js')
check_build = build.Build()
check_build.include = set(manual_externs)
check_build.include.add(generated_externs)
# Build with the complete set of externs, but without any application code.
# This will help find issues in the generated externs, independent of the app.
# Since we have no app, don't use the defines. Unused defines cause a
# compilation error.
closure_opts = build.common_closure_opts + build.debug_closure_opts + [
'--checks-only', '-O', 'SIMPLE'
]
ok = check_build.build_raw(closure_opts)
# Clean up the temporary externs we just generated.
os.unlink(generated_externs)
# Return the success/failure of the build above.
return ok
def usage():
print 'Usage:', sys.argv[0]
print
print __doc__
def main(args):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--fix',
help='Automatically fix style violations.',
action='store_true')
parsed_args = parser.parse_args(args)
# Update node modules if needed.
if not shakaBuildHelpers.update_node_modules():
return 1
steps = [
check_js_lint,
check_html_lint,
check_complete,
check_tests,
check_externs,
]
for step in steps:
if not step(parsed_args):
return 1
return 0
if __name__ == '__main__':
shakaBuildHelpers.run_main(main)
|
apache-2.0
| 7,833,908,527,984,224,000 | 30.418605 | 80 | 0.691192 | false |
ContributeToScience/participant-booking-app
|
booking/core/ip2geo/__init__.py
|
1
|
10628
|
import math
import mmap
import gzip
import os
import codecs
import pytz
import const
from util import ip2long
from timezone import time_zone_by_country_and_region
MMAP_CACHE = const.MMAP_CACHE
MEMORY_CACHE = const.MEMORY_CACHE
STANDARD = const.STANDARD
class GeoIPError(Exception):
pass
class GeoIPMetaclass(type):
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_instances'):
cls._instances = {}
if len(args) > 0:
filename = args[0]
elif 'filename' in kwargs:
filename = kwargs['filename']
if not filename in cls._instances:
cls._instances[filename] = type.__new__(cls, *args, **kwargs)
return cls._instances[filename]
GeoIPBase = GeoIPMetaclass('GeoIPBase', (object,), {})
class GeoIP(GeoIPBase):
def __init__(self, filename, flags=0):
"""
Initialize the class.
@param filename: path to a geoip database. If MEMORY_CACHE is used,
the file can be gzipped.
@type filename: str
@param flags: flags that affect how the database is processed.
Currently the only supported flags are STANDARD (the default),
MEMORY_CACHE (preload the whole file into memory), and
MMAP_CACHE (access the file via mmap).
@type flags: int
"""
self._filename = filename
self._flags = flags
if self._flags & const.MMAP_CACHE:
with open(filename, 'rb') as f:
self._filehandle = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
elif self._flags & const.MEMORY_CACHE:
try:
self._filehandle = gzip.open(filename, 'rb')
self._memoryBuffer = self._filehandle.read()
except IOError:
self._filehandle = codecs.open(filename, 'rb', 'latin_1')
self._memoryBuffer = self._filehandle.read()
else:
self._filehandle = codecs.open(filename, 'rb', 'latin_1')
self._setup_segments()
def _setup_segments(self):
"""
Parses the database file to determine what kind of database is being used and setup
segment sizes and start points that will be used by the seek*() methods later.
"""
self._databaseType = const.CITY_EDITION_REV1
self._recordLength = const.STANDARD_RECORD_LENGTH
filepos = self._filehandle.tell()
self._filehandle.seek(-3, os.SEEK_END)
for i in range(const.STRUCTURE_INFO_MAX_SIZE):
delim = self._filehandle.read(3)
if delim == unichr(255) * 3:
self._databaseType = ord(self._filehandle.read(1))
if self._databaseType in (const.CITY_EDITION_REV0,
const.CITY_EDITION_REV1):
self._databaseSegments = 0
buf = self._filehandle.read(const.SEGMENT_RECORD_LENGTH)
for j in range(const.SEGMENT_RECORD_LENGTH):
self._databaseSegments += (ord(buf[j]) << (j * 8))
break
else:
self._filehandle.seek(-4, os.SEEK_CUR)
self._filehandle.seek(filepos, os.SEEK_SET)
def _seek_country(self, ipnum):
"""
Using the record length and appropriate start points, seek to the
country that corresponds to the converted IP address integer.
@param ipnum: result of ip2long conversion
@type ipnum: int
@return: offset of start of record
@rtype: int
"""
offset = 0
for depth in range(31, -1, -1):
if self._flags & const.MEMORY_CACHE:
startIndex = 2 * self._recordLength * offset
length = 2 * self._recordLength
endIndex = startIndex + length
buf = self._memoryBuffer[startIndex:endIndex]
else:
self._filehandle.seek(2 * self._recordLength * offset, os.SEEK_SET)
buf = self._filehandle.read(2 * self._recordLength)
x = [0, 0]
for i in range(2):
for j in range(self._recordLength):
x[i] += ord(buf[self._recordLength * i + j]) << (j * 8)
if ipnum & (1 << depth):
if x[1] >= self._databaseSegments:
return x[1]
offset = x[1]
else:
if x[0] >= self._databaseSegments:
return x[0]
offset = x[0]
raise Exception('Error traversing database - perhaps it is corrupt?')
def _get_record(self, ipnum):
"""
Populate location dict for converted IP.
@param ipnum: converted IP address
@type ipnum: int
@return: dict with country_code, country_code3, country_name,
region, city, postal_code, latitude, longitude,
dma_code, metro_code, area_code, region_name, time_zone
@rtype: dict
"""
seek_country = self._seek_country(ipnum)
if seek_country == self._databaseSegments:
return None
record_pointer = seek_country + (2 * self._recordLength - 1) * self._databaseSegments
self._filehandle.seek(record_pointer, os.SEEK_SET)
record_buf = self._filehandle.read(const.FULL_RECORD_LENGTH)
record = {}
record_buf_pos = 0
char = ord(record_buf[record_buf_pos])
record['country_code'] = const.COUNTRY_CODES[char]
record['country_code3'] = const.COUNTRY_CODES3[char]
record['country_name'] = const.COUNTRY_NAMES[char]
record_buf_pos += 1
str_length = 0
# get region
char = ord(record_buf[record_buf_pos + str_length])
while (char != 0):
str_length += 1
char = ord(record_buf[record_buf_pos + str_length])
if str_length > 0:
record['region_name'] = record_buf[record_buf_pos:record_buf_pos + str_length]
record_buf_pos += str_length + 1
str_length = 0
# get city
char = ord(record_buf[record_buf_pos + str_length])
while (char != 0):
str_length += 1
char = ord(record_buf[record_buf_pos + str_length])
if str_length > 0:
record['city'] = record_buf[record_buf_pos:record_buf_pos + str_length]
else:
record['city'] = ''
record_buf_pos += str_length + 1
str_length = 0
# get the postal code
char = ord(record_buf[record_buf_pos + str_length])
while (char != 0):
str_length += 1
char = ord(record_buf[record_buf_pos + str_length])
if str_length > 0:
record['postal_code'] = record_buf[record_buf_pos:record_buf_pos + str_length]
else:
record['postal_code'] = None
record_buf_pos += str_length + 1
str_length = 0
latitude = 0
longitude = 0
for j in range(3):
char = ord(record_buf[record_buf_pos])
record_buf_pos += 1
latitude += (char << (j * 8))
record['latitude'] = (latitude / 10000.0) - 180.0
for j in range(3):
char = ord(record_buf[record_buf_pos])
record_buf_pos += 1
longitude += (char << (j * 8))
record['longitude'] = (longitude / 10000.0) - 180.0
if self._databaseType == const.CITY_EDITION_REV1:
dmaarea_combo = 0
if record['country_code'] == 'US':
for j in range(3):
char = ord(record_buf[record_buf_pos])
record_buf_pos += 1
dmaarea_combo += (char << (j * 8))
record['dma_code'] = int(math.floor(dmaarea_combo / 1000))
record['area_code'] = dmaarea_combo % 1000
else:
record['dma_code'] = 0
record['area_code'] = 0
if 'dma_code' in record and record['dma_code'] in const.DMA_MAP:
record['metro_code'] = const.DMA_MAP[record['dma_code']]
else:
record['metro_code'] = ''
if 'country_code' in record:
record['time_zone'] = time_zone_by_country_and_region(
record['country_code'], record.get('region_name')) or ''
else:
record['time_zone'] = ''
return record
def ipaddress_to_timezone(self, ipaddress):
"""
Look up the time zone for a given IP address.
Use this method if you have a Region or City database.
@param hostname: IP address
@type hostname: str
@return: A datetime.tzinfo implementation for the given timezone
@rtype: datetime.tzinfo
"""
try:
ipnum = ip2long(ipaddress)
if not ipnum:
raise ValueError("Invalid IP address: %s" % ipaddress)
if not self._databaseType in (const.CITY_EDITION_REV0, const.CITY_EDITION_REV1):
raise GeoIPError('Invalid database type; region_* methods expect ' \
'Region or City database')
tz_name = self._get_record(ipnum)['time_zone']
if tz_name:
tz = pytz.timezone(tz_name)
else:
tz = None
return tz
except ValueError:
raise GeoIPError(
'*_by_addr methods only accept IP addresses. Use *_by_name for hostnames. (Address: %s)' % ipaddress)
def ipaddress_to_location(self, ipaddress):
"""
Look up the time zone for a given IP address.
Use this method if you have a Region or City database.
@param hostname: IP address
@type hostname: str
@return: location of lng and lat
@rtype: string
"""
try:
ipnum = ip2long(ipaddress)
if not ipnum:
raise ValueError("Invalid IP address: %s" % ipaddress)
if not self._databaseType in (const.CITY_EDITION_REV0, const.CITY_EDITION_REV1):
raise GeoIPError('Invalid database type; region_* methods expect ' \
'Region or City database')
lng = self._get_record(ipnum)['longitude']
lat = self._get_record(ipnum)['latitude']
return {
'lng': lng,
'lat': lat
}
except ValueError:
raise GeoIPError(
'*_by_addr methods only accept IP addresses. Use *_by_name for hostnames. (Address: %s)' % ipaddress)
|
gpl-2.0
| -1,734,714,524,999,281,000 | 31.604294 | 117 | 0.5414 | false |
VirusTotal/vt-graph-api
|
tests/test_search.py
|
1
|
25999
|
"""Test search VTGraph methods."""
import pytest
import six
import vt_graph_api
try:
from unittest.mock import call
from unittest.mock import Mock
import urllib.parse as urlparse
except ImportError:
from mock import call
from mock import Mock
import urlparse
test_graph = vt_graph_api.VTGraph(
"Dummy api key", verbose=False, private=False, name="Graph test",
user_editors=["agfernandez"], group_viewers=["virustotal"])
def test_search_connection_first_level(mocker):
"""Test search connection and found it in the first level."""
rq_id = "7c11c7ccd384fd9f377da499fc059fa08fdc33a1bb870b5bc3812d24dd421a16"
request_data = {
"data": [
{
"attributes": {},
"id": rq_id,
"type": "file"
}
]
}
mocker.spy(test_graph, "_get_expansion_nodes")
mocker.spy(test_graph, "_parallel_expansion")
node_a = vt_graph_api.Node(
"26c808a1eb3eaa7bb29ec2ab834559f06f2636b87d5f542223426d6f238ff906",
"file")
node_b = vt_graph_api.Node(
"7c11c7ccd384fd9f377da499fc059fa08fdc33a1bb870b5bc3812d24dd421a16",
"file")
m = mocker.Mock(status_code=200, json=mocker.Mock(return_value=request_data))
mocker.patch("requests.get", return_value=m)
assert test_graph._search_connection(node_a, [node_b], 1000, 5, 100)
assert test_graph._get_expansion_nodes.call_count == len(
node_a.expansions_available)
assert test_graph._parallel_expansion.call_count == 1
mocker.resetall()
def test_search_connection_second_level(mocker):
"""Test search connection and found it in the second level."""
rq_id = "7c11c7ccd384fd9f377da499fc059fa08fdc33a1bb870b5bc3812d24dd421a16"
request_response_first_level = [
{
"data": [
{
"attributes": {},
"id": rq_id,
"type": "file"
}
]
}
]
request_response_second_level = [
{
"data": [
{
"attributes": {},
"id": "nsis.sf.net",
"type": "domain"
}
]
},
]
side_effects = list(
request_response_first_level *
len(vt_graph_api.Node.NODE_EXPANSIONS["file"]))
side_effects += (
request_response_second_level *
len(vt_graph_api.Node.NODE_EXPANSIONS["file"]))
node_a = vt_graph_api.Node(
"26c808a1eb3eaa7bb29ec2ab834559f06f2636b87d5f542223426d6f238ff906",
"file")
node_b = vt_graph_api.Node("nsis.sf.net", "domain")
m = mocker.Mock(status_code=200, json=mocker.Mock(side_effect=side_effects))
mocker.patch("requests.get", return_value=m)
mocker.spy(test_graph, "_get_expansion_nodes")
mocker.spy(test_graph, "_parallel_expansion")
assert test_graph._search_connection(node_a, [node_b], 1000, 5, 100)
assert test_graph._get_expansion_nodes.call_count == len(side_effects)
# 2 is the number of distinct nodes that the algorithm will explore
assert test_graph._parallel_expansion.call_count == 2
mocker.resetall()
def test_search_connection_third_level(mocker):
"""Test search connection and found it in the third level."""
rq_id = "7c11c7ccd384fd9f377da499fc059fa08fdc33a1bb870b5bc3812d24dd421a16"
rq_id_2 = "660903b139d5c7ec80af124e93320c18895de32135450d4acd14096e6c0dd2ef"
request_response_first_level = [
{
"data": [
{
"attributes": {},
"id": rq_id,
"type": "file"
}
]
}
]
request_response_second_level = [
{
"data": [
{
"attributes": {},
"id": "nsis.sf.net",
"type": "domain"
}
]
},
]
request_response_third_level = [
{
"data": [
{
"attributes": {},
"id": rq_id_2,
"type": "file"
}
]
},
]
side_effects = list(
request_response_first_level *
len(vt_graph_api.Node.NODE_EXPANSIONS["file"])
)
side_effects += (
request_response_second_level *
len(vt_graph_api.Node.NODE_EXPANSIONS["file"])
)
side_effects += (
request_response_third_level *
len(vt_graph_api.Node.NODE_EXPANSIONS["domain"])
)
node_a = vt_graph_api.Node(
"26c808a1eb3eaa7bb29ec2ab834559f06f2636b87d5f542223426d6f238ff906",
"file")
node_b = vt_graph_api.Node(
"660903b139d5c7ec80af124e93320c18895de32135450d4acd14096e6c0dd2ef",
"file")
m = mocker.Mock(status_code=200, json=mocker.Mock(side_effect=side_effects))
mocker.patch("requests.get", return_value=m)
mocker.spy(test_graph, "_get_expansion_nodes")
mocker.spy(test_graph, "_parallel_expansion")
assert test_graph._search_connection(node_a, [node_b], 3000, 5, 1000)
assert test_graph._get_expansion_nodes.call_count == len(side_effects)
# 3 is the number of distinct nodes that the algorithm will explore
assert test_graph._parallel_expansion.call_count == 3
mocker.resetall()
def test_search_connection_not_found_and_consumes_max_api_quotas(mocker):
"""Test search connection and found it in the third level."""
rq_id = "7c11c7ccd384fd9f377da499fc059fa08fdc33a1bb870b5bc3812d24dd421a16"
rq_id_2 = "660903b139d5c7ec80af124e93320c18895de32135450d4acd14096e6c0dd2ef"
request_response_first_level = [
{
"data": [
{
"attributes": {},
"id": rq_id,
"type": "file"
}
]
}
]
request_response_second_level = [
{
"data": [
{
"attributes": {},
"id": "nsis.sf.net",
"type": "domain"
}
]
},
]
request_response_third_level = [
{
"data": [
{
"attributes": {},
"id": rq_id_2,
"type": "file"
}
]
},
]
side_effects = list(request_response_first_level * 17)
side_effects += request_response_second_level * 289
side_effects += request_response_third_level*2023
node_a = vt_graph_api.Node(
"26c808a1eb3eaa7bb29ec2ab834559f06f2636b87d5f542223426d6f238ff906",
"file" )
node_b = vt_graph_api.Node(
"660903b139d5c7ec80af124e93320c18895de32135450d4acd14096e6c0dd2ef",
"file")
m = mocker.Mock(status_code=200, json=mocker.Mock(side_effect=side_effects))
mocker.patch("requests.get", return_value=m)
mocker.spy(test_graph, "_get_expansion_nodes")
mocker.spy(test_graph, "_parallel_expansion")
assert not test_graph._search_connection(node_a, [node_b], 100, 5, 1000)
assert test_graph._get_expansion_nodes.call_count <= 100
mocker.resetall()
###############################################################################
# END TO END TEST #
###############################################################################
SOURCE_NODE_ID = (
"26c808a1eb3eaa7bb29ec2ab834559f06f2636b87d5f542223426d6f238ff906")
INTERMEDIATE_NODE_ID = (
"bde526ed27ce0630401ad24794014b68e32de413de6bc7f37319e4cc4afa283d")
TARGET_NODE_ID = "nsis.sf.net"
EXPANSION_NODES = {
SOURCE_NODE_ID: "file",
INTERMEDIATE_NODE_ID: "file",
"bde526ed27ce0630401ad24794014b68e32de413de6bc7f37319e4cc4afa283d": "file",
"070f603e0443b1fae57425210fb3b27c2f77d8983cfefefb0ee185de572df33d": "file",
"e575a260b7f9efe98a3674eb7347d01d447cebce0e6ef2b9b2444bdd0a98b0a2": "file",
"d44cc91c43f7099a2c7b5cc4c56e4db903532e96f0b9c7c0a7f1b16117679b1e": "file",
"e3ecdaf963efcfe5cf20559b4d68dd624ebb83f08d6be15d252a8baf0125eeb2": "file",
"fb0b6044347e972e21b6c376e37e1115dab494a2c6b9fb28b92b1e45b45d0ebc": "file",
"download.eu-west-3.fromsmash.co": "domain",
"76.68.25.125": "ip_address",
"99.52.126.32": "ip_address",
"ohd.vault.cf": "domain",
"http://junior.catsecurity.net/~tmdahr1245/wannacry.exe": "url",
"428f22a9afd2797ede7c0583d34a052c32693cbb55f567a60298587b6e675c6f": "file",
"junior.catsecurity.net": "domain",
"http://cdn.discordapp.com/attachments/564096601342083104/5931234022" +
"15325722/hungarianproject.exe": "url",
"https://cdn.discordapp.com/attachments/564096601342083104/593123402" +
"215325722/hungarianproject.exe": "url",
"blackhatmail.com": "domain",
"cdn-20.anonfile.com": "domain",
"85ce324b8f78021ecfc9b811c748f19b82e61bb093ff64f2eab457f9ef19b186": "file",
"5c1f4f69c45cff9725d9969f9ffcf79d07bd0f624e06cfa5bcbacd2211046ed6": "file",
"a93ee7ea13238bd038bcbec635f39619db566145498fe6e0ea60e6e76d614bd3": "file",
}
EXPANSION_SIDE_EFFECTS = {
SOURCE_NODE_ID: {
"bundled_files": {
"data": [
{
"attributes": {},
"id": INTERMEDIATE_NODE_ID,
"type": "file"
},
{
"attributes": {},
"id": "070f603e0443b1fae57425210fb3b27c2f77d8983" +
"cfefefb0ee185de572df33d",
"type": "file"
},
{
"attributes": {},
"id": "e575a260b7f9efe98a3674eb7347d01d447cebce0" +
"e6ef2b9b2444bdd0a98b0a2",
"type": "file"
},
{
"attributes": {},
"id": "d44cc91c43f7099a2c7b5cc4c56e4db903532e96f" +
"0b9c7c0a7f1b16117679b1e",
"type": "file"
},
{
"attributes": {},
"id": "e3ecdaf963efcfe5cf20559b4d68dd624ebb83f08" +
"d6be15d252a8baf0125eeb2",
"type": "file"
}
]
},
"carbonblack_children": {
"data": []
},
"carbonblack_parents": {
"data": [
{
"attributes": {},
"id": "fb0b6044347e972e21b6c376e37e1115dab494a2c" +
"6b9fb28b92b1e45b45d0ebc",
"type": "file"
}
]
},
"compressed_parents": {
"data": [
{
"attributes": {},
"id": "fb0b6044347e972e21b6c376e37e1115dab494a2c" +
"6b9fb28b92b1e45b45d0ebc",
"type": "file"
}
]
},
"contacted_domains": {
"data": [
{
"attributes": {},
"id": "download.eu-west-3.fromsmash.co",
"type": "domain"
},
{
"attributes": {},
"id": "ohd.vault.cf",
"type": "domain"
}
]
},
"contacted_ips": {
"data": [
{
"attributes": {},
"id": "76.68.25.125",
"type": "ip_address"
},
{
"attributes": {},
"id": "99.52.126.32",
"type": "ip_address"
}
]
},
"contacted_urls": {
"data": []
},
"email_parents": {
"data": []
},
"embedded_domains": {
"data": [
{
"attributes": {},
"id": "ohd.vault.cf",
"type": "domain"
}
]
},
"embedded_urls": {
"data": [
{
"attributes": {},
"id": "http://junior.catsecurity.net/~tmdahr1245" +
"/wannacry.exe",
"type": "url"
}
]
},
"embedded_ips": {
"data": []
},
"execution_parents": {
"data": [
{
"attributes": {},
"id": "fb0b6044347e972e21b6c376e37e1115dab494a2c" +
"6b9fb28b92b1e45b45d0ebc",
"type": "file"
},
{
"attributes": {},
"id": "428f22a9afd2797ede7c0583d34a052c32693cbb5" +
"5f567a60298587b6e675c6f",
"type": "file"
}
]
},
"itw_domains": {
"data": [
{
"attributes": {},
"id": "junior.catsecurity.net",
"type": "domain"
},
{
"attributes": {},
"id": "download.eu-west-3.fromsmash.co",
"type": "domain"
},
{
"attributes": {},
"id": "ohd.vault.cf",
"type": "domain"
}
]
},
"itw_urls": {
"data": [
{
"attributes": {},
"id": "http://junior.catsecurity.net/~tmdahr1245" +
"/wannacry.exe",
"type": "url"
},
{
"attributes": {},
"id": "http://cdn.discordapp.com/attachments/564" +
"096601342083104/593123402215325722/hungar" +
"ianproject.exe",
"type": "url"
},
{
"attributes": {},
"id": "https://cdn.discordapp.com/attachments/564" +
"096601342083104/593123402215325722/hungari" +
"anproject.exe",
"type": "url"
}
]
},
"overlay_parents": {
"data": []
},
"pcap_parents": {
"data": [
{
"attributes": {},
"id": "blackhatmail.com",
"type": "domain"
},
{
"attributes": {},
"id": "cdn-20.anonfile.com",
"type": "domain"
}
]
},
"pe_resource_parents": {
"data": [
{
"attributes": {},
"id": "fb0b6044347e972e21b6c376e37e1115dab494a2c6b9" +
"fb28b92b1e45b45d0ebc",
"type": "file"
},
{
"attributes": {},
"id": "428f22a9afd2797ede7c0583d34a052c32693cbb55f5" +
"67a60298587b6e675c6f",
"type": "file"
},
{
"attributes": {},
"id": "85ce324b8f78021ecfc9b811c748f19b82e61bb093ff6" +
"4f2eab457f9ef19b186",
"type": "file"
},
{
"attributes": {},
"id": "5c1f4f69c45cff9725d9969f9ffcf79d07bd0f624e06c" +
"fa5bcbacd2211046ed6",
"type": "file"
},
{
"attributes": {},
"id": "a93ee7ea13238bd038bcbec635f39619db566145498fe" +
"6e0ea60e6e76d614bd3",
"type": "file"
}
]
},
"similar_files": {
"data": []
},
},
# Intermediate node will achieve target node in his fifth expansion.
INTERMEDIATE_NODE_ID: {
"bundled_files": {
"data": [
{
"attributes": {},
"id": "fb0b6044347e972e21b6c376e37e1115dab494a2" +
"c6b9fb28b92b1e45b45d0ebc",
"type": "file"
}
]
},
"carbonblack_children": {
"data": []
},
"carbonblack_parents": {
"data": []
},
"compressed_parents": {
"data": []
},
"contacted_domains": {
"data": []
},
"contacted_ips": {
"data": []
},
"contacted_urls": {
"data": []
},
"email_parents": {
"data": []
},
"embedded_domains": {
"data": [
{
"attributes": {},
"id": TARGET_NODE_ID,
"type": "domain"
}
]
},
"embedded_urls": {
"data": [
{
"attributes": {},
"id": "http://junior.catsecurity.net/~tmdahr1245" +
"/wannacry.exe",
"type": "url"
}
]
},
"embedded_ips": {
"data": []
},
"execution_parents": {
"data": [
{
"attributes": {},
"id": "fb0b6044347e972e21b6c376e37e1115dab494a2c" +
"6b9fb28b92b1e45b45d0ebc",
"type": "file"
},
{
"attributes": {},
"id": "428f22a9afd2797ede7c0583d34a052c32693cbb5" +
"5f567a60298587b6e675c6f",
"type": "file"
}
]
},
"itw_domains": {
"data": [
{
"attributes": {},
"id": "junior.catsecurity.net",
"type": "domain"
},
{
"attributes": {},
"id": "download.eu-west-3.fromsmash.co",
"type": "domain"
},
{
"attributes": {},
"id": "ohd.vault.cf",
"type": "domain"
}
]
},
"itw_urls": {
"data": [
{
"attributes": {},
"id": "http://junior.catsecurity.net/~tmdahr1245" +
"/wannacry.exe",
"type": "url"
},
{
"attributes": {},
"id": "http://cdn.discordapp.com/attachments/564" +
"096601342083104/593123402215325722/hungar" +
"ianproject.exe",
"type": "url"
},
{
"attributes": {},
"id": "https://cdn.discordapp.com/attachments/564" +
"096601342083104/593123402215325722/hungari" +
"anproject.exe",
"type": "url"
}
]
},
"overlay_parents": {
"data": []
},
"pcap_parents": {
"data": [
{
"attributes": {},
"id": "blackhatmail.com",
"type": "domain"
},
{
"attributes": {},
"id": "cdn-20.anonfile.com",
"type": "domain"
}
]
},
"pe_resource_parents": {
"data": [
{
"attributes": {},
"id": "fb0b6044347e972e21b6c376e37e1115dab494a2c6b9" +
"fb28b92b1e45b45d0ebc",
"type": "file"
},
{
"attributes": {},
"id": "428f22a9afd2797ede7c0583d34a052c32693cbb55f5" +
"67a60298587b6e675c6f",
"type": "file"
},
{
"attributes": {},
"id": "85ce324b8f78021ecfc9b811c748f19b82e61bb093ff6" +
"4f2eab457f9ef19b186",
"type": "file"
},
{
"attributes": {},
"id": "5c1f4f69c45cff9725d9969f9ffcf79d07bd0f624e06c" +
"fa5bcbacd2211046ed6",
"type": "file"
},
{
"attributes": {},
"id": "a93ee7ea13238bd038bcbec635f39619db566145498fe" +
"6e0ea60e6e76d614bd3",
"type": "file"
}
]
},
"similar_files": {
"data": []
},
}
}
def mock_request(url, headers, timeout):
"""Mock for method request.get()."""
assert "x-apikey" in headers
assert timeout == vt_graph_api.VTGraph.REQUEST_TIMEOUT
# url path format "/api/v3/<type>/<id>/<expansion>"
# if id is url it will require extra parse.
url = urlparse.urlparse(url)
path = url.path.split("/api/v3/")[1].split("/")
expansion = path[-1]
# if url join path again
node_id = "/".join(path[1:-1])
if node_id not in EXPANSION_NODES:
pytest.xfail("This call have never been invoked")
if node_id not in EXPANSION_SIDE_EFFECTS:
mock = Mock(status_code=200, json=Mock(return_value={"data": []}))
else:
mock = Mock(
status_code=200,
json=Mock(return_value=EXPANSION_SIDE_EFFECTS[node_id][expansion]))
return mock
def test_search_connection_second_level_real_data(mocker):
"""Test search connection end to end.
+-----------------+SOURCE_NODE+-----------------+
| + |
| +-----+---------+ |
v v v v
bundled_files carbonblack_children ... similar_files
+ + + +
+-----------+----+ +-----+-----+ | +-------------+
| | | | | | | | |
v v v v v v v v v
INTERMEDIATE_NODE ... ... ... ... ... ... ... ...
+
+-----+---------------------+
| | |
v v v
... contacted_domains ...
+
|
v
TARGET_NODE
"""
node_a = vt_graph_api.Node(
"26c808a1eb3eaa7bb29ec2ab834559f06f2636b87d5f542223426d6f238ff906",
"file"
)
intermediate_node = vt_graph_api.Node(
INTERMEDIATE_NODE_ID,
"file"
)
node_b = vt_graph_api.Node(
"nsis.sf.net",
"domain"
)
mocker.patch("requests.get", mock_request)
mocker.spy(test_graph, "_get_expansion_nodes")
mocker.spy(test_graph, "_parallel_expansion")
total_nodes_first_level = len(
node_a.expansions_available)
assert test_graph._search_connection(node_a, [node_b], 1000, 5, 100)
# Check that _get_expansion_nodes was called with the correct arguments.
calls = [
call(node_a, "bundled_files", 40),
call(node_a, "carbonblack_children", 40),
call(node_a, "carbonblack_parents", 40),
call(node_a, "compressed_parents", 40),
call(node_a, "contacted_domains", 40),
call(node_a, "contacted_ips", 40),
call(node_a, "contacted_urls", 40),
call(node_a, "email_parents", 40),
call(node_a, "embedded_domains", 40),
call(node_a, "embedded_urls", 40),
call(node_a, "embedded_ips", 40),
call(node_a, "execution_parents", 40),
call(node_a, "itw_domains", 40),
call(node_a, "itw_urls", 40),
call(node_a, "overlay_parents", 40),
call(node_a, "pcap_parents", 40),
call(node_a, "pe_resource_parents", 40),
call(node_a, "similar_files", 40),
call(intermediate_node, "bundled_files", 40),
call(intermediate_node, "carbonblack_children", 40),
call(intermediate_node, "carbonblack_parents", 40),
call(intermediate_node, "compressed_parents", 40),
call(intermediate_node, "contacted_domains", 40),
call(intermediate_node, "contacted_ips", 40),
call(intermediate_node, "contacted_urls", 40),
call(intermediate_node, "email_parents", 40),
call(intermediate_node, "embedded_domains", 40),
call(intermediate_node, "embedded_urls", 40),
call(intermediate_node, "embedded_ips", 40),
call(intermediate_node, "execution_parents", 40),
call(intermediate_node, "itw_domains", 40),
call(intermediate_node, "itw_urls", 40),
call(intermediate_node, "overlay_parents", 40),
call(intermediate_node, "pcap_parents", 40),
call(intermediate_node, "pe_resource_parents", 40),
call(intermediate_node, "similar_files", 40),
]
test_graph._get_expansion_nodes.assert_has_calls(calls, any_order=True)
total_expansion_calls = 0
for node_type in six.itervalues(EXPANSION_NODES):
total_expansion_calls += len(vt_graph_api.Node.NODE_EXPANSIONS[node_type])
# all assertions are less than instead of equal because of the difficult of
# stopping threads when solution is found.
assert test_graph._get_expansion_nodes.call_count <= total_expansion_calls
assert test_graph._parallel_expansion.call_count <= (
1 +
total_nodes_first_level +
289 # max expansions in second level
)
mocker.resetall()
|
apache-2.0
| 9,134,466,188,610,243,000 | 32.764935 | 79 | 0.456402 | false |
afein/docker-microservice
|
service/service.py
|
1
|
3362
|
from flask import Flask, request, abort
import os
import time
import random
import requests
app = Flask("microservice")
service_name = os.environ["SERVICE_NAME"]
log = open("/var/log/micro/" + service_name + ".log", "w+")
requestID_counter = 1
def busy_wait(seconds):
dt = time.strptime(str(seconds), "%S")
now = time.time()
while time.time() < now + seconds:
pass
'''
POST: the body must be a JSON-formatted object:
{
"requestID" : 15 // 0 if first request
"process" : [10, 5, 2, 10], // processing time spent by each service in seconds
"block" : [10, 0, 2, 2] // sleeping time spent by the service in seconds
"path": [
{ "microA" : 0.8,
"microB" : 0.2
},
{
"microC" : 0.5,
"microA" : 0.5
}
],
"visited" : ["micro0"]
'''
@app.route("/", methods=["POST"])
def handle():
global requestID_counter
if request.method == "POST":
obj = request.get_json(force=True)
if "requestID" not in obj or \
"process" not in obj or \
"block" not in obj or \
"path" not in obj or \
"visited" not in obj:
abort(400, "Invalid Request")
if len(obj["process"]) != 1 and len(obj["process"]) != len(obj["block"]):
abort(400, "Could not determine length of call path")
# Build response requestID
resp = {}
if obj["requestID"] == 0:
resp["requestID"] = requestID_counter
# TODO: use distributed requestID counter for multiple entrypoints
requestID_counter += 1
else:
resp["requestID"] = obj["requestID"]
obj["requestID"] = resp["requestID"]
log.write(str(time.time()) + " Received Request: " + str(obj) + "\n")
log.flush()
if service_name in obj["visited"]:
return "OK"
process = obj["process"].pop(0)
block = obj["block"].pop(0)
# perform processing and blocking
busy_wait(process)
time.sleep(block)
# Handle having no further targets
if len(obj["path"]) == 0:
return "Path Completed at service: " + service_name
# Build the rest of the response
resp["process"] = obj["process"]
resp["block"] = obj["block"]
resp["visited"] = [obj["visited"]] + [service_name]
if len(obj["path"]) == 1:
resp["path"] = []
else:
resp["path"] = obj["path"][1:]
# Determine next microservice to call
if len(obj["path"]) == 0:
return "Path Completed at service: " + service_name
targets = obj["path"].pop(0)
total_prob = 0
roll = random.random()
for target, prob in targets.iteritems():
total_prob += prob
if target == service_name:
continue
if roll < total_prob:
final_target = target
# Send POST to final target
log.write(str(time.time()) + " Sending Response: " + str(resp) + "\n")
log.flush()
returned = requests.post("http://" + final_target, json=resp)
return returned.text
app.run(host="0.0.0.0", port=80, debug=True)
|
apache-2.0
| -9,102,237,164,221,150,000 | 28.491228 | 91 | 0.516359 | false |
clems71/pogle
|
pyassimp/structs.py
|
1
|
34579
|
#-*- coding: UTF-8 -*-
from ctypes import POINTER, c_void_p, c_int, c_uint, c_char, c_float, Structure, c_char_p, c_double, c_ubyte, c_size_t, c_uint32
class Vector2D(Structure):
"""
See 'aiVector2D.h' for details.
"""
_fields_ = [
("x", c_float),("y", c_float),
]
class Matrix3x3(Structure):
"""
See 'aiMatrix3x3.h' for details.
"""
_fields_ = [
("a1", c_float),("a2", c_float),("a3", c_float),
("b1", c_float),("b2", c_float),("b3", c_float),
("c1", c_float),("c2", c_float),("c3", c_float),
]
class Texel(Structure):
"""
See 'aiTexture.h' for details.
"""
_fields_ = [
("b", c_ubyte),("g", c_ubyte),("r", c_ubyte),("a", c_ubyte),
]
class Color4D(Structure):
"""
See 'aiColor4D.h' for details.
"""
_fields_ = [
# Red, green, blue and alpha color values
("r", c_float),("g", c_float),("b", c_float),("a", c_float),
]
class Plane(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Plane equation
("a", c_float),("b", c_float),("c", c_float),("d", c_float),
]
class Color3D(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Red, green and blue color values
("r", c_float),("g", c_float),("b", c_float),
]
class String(Structure):
"""
See 'aiTypes.h' for details.
"""
MAXLEN = 1024
_fields_ = [
# Binary length of the string excluding the terminal 0. This is NOT the
# logical length of strings containing UTF-8 multibyte sequences! It's
# the number of bytes from the beginning of the string to its end.
("length", c_size_t),
# String buffer. Size limit is MAXLEN
("data", c_char*MAXLEN),
]
class MaterialPropertyString(Structure):
"""
See 'aiTypes.h' for details.
The size of length is truncated to 4 bytes on 64-bit platforms when used as a
material property (see MaterialSystem.cpp aiMaterial::AddProperty() for details).
"""
MAXLEN = 1024
_fields_ = [
# Binary length of the string excluding the terminal 0. This is NOT the
# logical length of strings containing UTF-8 multibyte sequences! It's
# the number of bytes from the beginning of the string to its end.
("length", c_uint32),
# String buffer. Size limit is MAXLEN
("data", c_char*MAXLEN),
]
class MemoryInfo(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Storage allocated for texture data
("textures", c_uint),
# Storage allocated for material data
("materials", c_uint),
# Storage allocated for mesh data
("meshes", c_uint),
# Storage allocated for node data
("nodes", c_uint),
# Storage allocated for animation data
("animations", c_uint),
# Storage allocated for camera data
("cameras", c_uint),
# Storage allocated for light data
("lights", c_uint),
# Total storage allocated for the full import.
("total", c_uint),
]
class Quaternion(Structure):
"""
See 'aiQuaternion.h' for details.
"""
_fields_ = [
# w,x,y,z components of the quaternion
("w", c_float),("x", c_float),("y", c_float),("z", c_float),
]
class Face(Structure):
"""
See 'aiMesh.h' for details.
"""
_fields_ = [
# Number of indices defining this face.
# The maximum value for this member is
#AI_MAX_FACE_INDICES.
("mNumIndices", c_uint),
# Pointer to the indices array. Size of the array is given in numIndices.
("mIndices", POINTER(c_uint)),
]
class VertexWeight(Structure):
"""
See 'aiMesh.h' for details.
"""
_fields_ = [
# Index of the vertex which is influenced by the bone.
("mVertexId", c_uint),
# The strength of the influence in the range (0...1).
# The influence from all bones at one vertex amounts to 1.
("mWeight", c_float),
]
class Matrix4x4(Structure):
"""
See 'aiMatrix4x4.h' for details.
"""
_fields_ = [
("a1", c_float),("a2", c_float),("a3", c_float),("a4", c_float),
("b1", c_float),("b2", c_float),("b3", c_float),("b4", c_float),
("c1", c_float),("c2", c_float),("c3", c_float),("c4", c_float),
("d1", c_float),("d2", c_float),("d3", c_float),("d4", c_float),
]
class Vector3D(Structure):
"""
See 'aiVector3D.h' for details.
"""
_fields_ = [
("x", c_float),("y", c_float),("z", c_float),
]
class MeshKey(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
# The time of this key
("mTime", c_double),
# Index into the aiMesh::mAnimMeshes array of the
# mesh coresponding to the
#aiMeshAnim hosting this
# key frame. The referenced anim mesh is evaluated
# according to the rules defined in the docs for
#aiAnimMesh.
("mValue", c_uint),
]
class Node(Structure):
"""
See 'aiScene.h' for details.
"""
Node._fields_ = [
# The name of the node.
# The name might be empty (length of zero) but all nodes which
# need to be accessed afterwards by bones or anims are usually named.
# Multiple nodes may have the same name, but nodes which are accessed
# by bones (see
#aiBone and
#aiMesh::mBones) *must* be unique.
# Cameras and lights are assigned to a specific node name - if there
# are multiple nodes with this name, they're assigned to each of them.
# <br>
# There are no limitations regarding the characters contained in
# this text. You should be able to handle stuff like whitespace, tabs,
# linefeeds, quotation marks, ampersands, ... .
("mName", String),
# The transformation relative to the node's parent.
("mTransformation", Matrix4x4),
# Parent node. NULL if this node is the root node.
("mParent", POINTER(Node)),
# The number of child nodes of this node.
("mNumChildren", c_uint),
# The child nodes of this node. NULL if mNumChildren is 0.
("mChildren", POINTER(POINTER(Node))),
# The number of meshes of this node.
("mNumMeshes", c_uint),
# The meshes of this node. Each entry is an index into the mesh
("mMeshes", POINTER(c_uint)),
]
class Light(Structure):
"""
See 'aiLight.h' for details.
"""
_fields_ = [
# The name of the light source.
# There must be a node in the scenegraph with the same name.
# This node specifies the position of the light in the scene
# hierarchy and can be animated.
("mName", String),
# The type of the light source.
# aiLightSource_UNDEFINED is not a valid value for this member.
("mType", c_uint),
# Position of the light source in space. Relative to the
# transformation of the node corresponding to the light.
# The position is undefined for directional lights.
("mPosition", Vector3D),
# Direction of the light source in space. Relative to the
# transformation of the node corresponding to the light.
# The direction is undefined for point lights. The vector
# may be normalized, but it needn't.
("mDirection", Vector3D),
# Constant light attenuation factor.
# The intensity of the light source at a given distance 'd' from
# the light's position is
# @code
# Atten = 1/( att0 + att1
# d + att2
# d*d)
# @endcode
# This member corresponds to the att0 variable in the equation.
# Naturally undefined for directional lights.
("mAttenuationConstant", c_float),
# Linear light attenuation factor.
# The intensity of the light source at a given distance 'd' from
# the light's position is
# @code
# Atten = 1/( att0 + att1
# d + att2
# d*d)
# @endcode
# This member corresponds to the att1 variable in the equation.
# Naturally undefined for directional lights.
("mAttenuationLinear", c_float),
# Quadratic light attenuation factor.
# The intensity of the light source at a given distance 'd' from
# the light's position is
# @code
# Atten = 1/( att0 + att1
# d + att2
# d*d)
# @endcode
# This member corresponds to the att2 variable in the equation.
# Naturally undefined for directional lights.
("mAttenuationQuadratic", c_float),
# Diffuse color of the light source
# The diffuse light color is multiplied with the diffuse
# material color to obtain the final color that contributes
# to the diffuse shading term.
("mColorDiffuse", Color3D),
# Specular color of the light source
# The specular light color is multiplied with the specular
# material color to obtain the final color that contributes
# to the specular shading term.
("mColorSpecular", Color3D),
# Ambient color of the light source
# The ambient light color is multiplied with the ambient
# material color to obtain the final color that contributes
# to the ambient shading term. Most renderers will ignore
# this value it, is just a remaining of the fixed-function pipeline
# that is still supported by quite many file formats.
("mColorAmbient", Color3D),
# Inner angle of a spot light's light cone.
# The spot light has maximum influence on objects inside this
# angle. The angle is given in radians. It is 2PI for point
# lights and undefined for directional lights.
("mAngleInnerCone", c_float),
# Outer angle of a spot light's light cone.
# The spot light does not affect objects outside this angle.
# The angle is given in radians. It is 2PI for point lights and
# undefined for directional lights. The outer angle must be
# greater than or equal to the inner angle.
# It is assumed that the application uses a smooth
# interpolation between the inner and the outer cone of the
# spot light.
("mAngleOuterCone", c_float),
]
class Texture(Structure):
"""
See 'aiTexture.h' for details.
"""
_fields_ = [
# Width of the texture, in pixels
# If mHeight is zero the texture is compressed in a format
# like JPEG. In this case mWidth specifies the size of the
# memory area pcData is pointing to, in bytes.
("mWidth", c_uint),
# Height of the texture, in pixels
# If this value is zero, pcData points to an compressed texture
# in any format (e.g. JPEG).
("mHeight", c_uint),
# A hint from the loader to make it easier for applications
# to determine the type of embedded compressed textures.
# If mHeight != 0 this member is undefined. Otherwise it
# is set set to '\\0\\0\\0\\0' if the loader has no additional
# information about the texture file format used OR the
# file extension of the format without a trailing dot. If there
# are multiple file extensions for a format, the shortest
# extension is chosen (JPEG maps to 'jpg', not to 'jpeg').
# E.g. 'dds\\0', 'pcx\\0', 'jpg\\0'. All characters are lower-case.
# The fourth character will always be '\\0'.
("achFormatHint", c_char*4),
# Data of the texture.
# Points to an array of mWidth
# mHeight aiTexel's.
# The format of the texture data is always ARGB8888 to
# make the implementation for user of the library as easy
# as possible. If mHeight = 0 this is a pointer to a memory
# buffer of size mWidth containing the compressed texture
# data. Good luck, have fun!
("pcData", POINTER(Texel)),
]
class Ray(Structure):
"""
See 'aiTypes.h' for details.
"""
_fields_ = [
# Position and direction of the ray
("pos", Vector3D),("dir", Vector3D),
]
class UVTransform(Structure):
"""
See 'aiMaterial.h' for details.
"""
_fields_ = [
# Translation on the u and v axes.
# The default value is (0|0).
("mTranslation", Vector2D),
# Scaling on the u and v axes.
# The default value is (1|1).
("mScaling", Vector2D),
# Rotation - in counter-clockwise direction.
# The rotation angle is specified in radians. The
# rotation center is 0.5f|0.5f. The default value
# 0.f.
("mRotation", c_float),
]
class MaterialProperty(Structure):
"""
See 'aiMaterial.h' for details.
"""
_fields_ = [
# Specifies the name of the property (key)
# Keys are generally case insensitive.
("mKey", String),
# Textures: Specifies their exact usage semantic.
# For non-texture properties, this member is always 0
# (or, better-said,
#aiTextureType_NONE).
("mSemantic", c_uint),
# Textures: Specifies the index of the texture.
# For non-texture properties, this member is always 0.
("mIndex", c_uint),
# Size of the buffer mData is pointing to, in bytes.
# This value may not be 0.
("mDataLength", c_uint),
# Type information for the property.
# Defines the data layout inside the data buffer. This is used
# by the library internally to perform debug checks and to
# utilize proper type conversions.
# (It's probably a hacky solution, but it works.)
("mType", c_uint),
# Binary buffer to hold the property's value.
# The size of the buffer is always mDataLength.
("mData", POINTER(c_char)),
]
class Material(Structure):
"""
See 'aiMaterial.h' for details.
"""
_fields_ = [
# List of all material properties loaded.
("mProperties", POINTER(POINTER(MaterialProperty))),
# Number of properties in the data base
("mNumProperties", c_uint),
# Storage allocated
("mNumAllocated", c_uint),
]
class Bone(Structure):
"""
See 'aiMesh.h' for details.
"""
_fields_ = [
# The name of the bone.
("mName", String),
# The number of vertices affected by this bone
# The maximum value for this member is
#AI_MAX_BONE_WEIGHTS.
("mNumWeights", c_uint),
# The vertices affected by this bone
("mWeights", POINTER(VertexWeight)),
# Matrix that transforms from mesh space to bone space in bind pose
("mOffsetMatrix", Matrix4x4),
]
class Mesh(Structure):
"""
See 'aiMesh.h' for details.
"""
AI_MAX_FACE_INDICES = 0x7fff
AI_MAX_BONE_WEIGHTS = 0x7fffffff
AI_MAX_VERTICES = 0x7fffffff
AI_MAX_FACES = 0x7fffffff
AI_MAX_NUMBER_OF_COLOR_SETS = 0x8
AI_MAX_NUMBER_OF_TEXTURECOORDS = 0x8
_fields_ = [
# Bitwise combination of the members of the
#aiPrimitiveType enum.
# This specifies which types of primitives are present in the mesh.
# The "SortByPrimitiveType"-Step can be used to make sure the
# output meshes consist of one primitive type each.
("mPrimitiveTypes", c_uint),
# The number of vertices in this mesh.
# This is also the size of all of the per-vertex data arrays.
# The maximum value for this member is
#AI_MAX_VERTICES.
("mNumVertices", c_uint),
# The number of primitives (triangles, polygons, lines) in this mesh.
# This is also the size of the mFaces array.
# The maximum value for this member is
#AI_MAX_FACES.
("mNumFaces", c_uint),
# Vertex positions.
# This array is always present in a mesh. The array is
# mNumVertices in size.
("mVertices", POINTER(Vector3D)),
# Vertex normals.
# The array contains normalized vectors, NULL if not present.
# The array is mNumVertices in size. Normals are undefined for
# point and line primitives. A mesh consisting of points and
# lines only may not have normal vectors. Meshes with mixed
# primitive types (i.e. lines and triangles) may have normals,
# but the normals for vertices that are only referenced by
# point or line primitives are undefined and set to QNaN (WARN:
# qNaN compares to inequal to *everything*, even to qNaN itself.
# Using code like this to check whether a field is qnan is:
# @code
#define IS_QNAN(f) (f != f)
# @endcode
# still dangerous because even 1.f == 1.f could evaluate to false! (
# remember the subtleties of IEEE754 artithmetics). Use stuff like
# @c fpclassify instead.
# @note Normal vectors computed by Assimp are always unit-length.
# However, this needn't apply for normals that have been taken
# directly from the model file.
("mNormals", POINTER(Vector3D)),
# Vertex tangents.
# The tangent of a vertex points in the direction of the positive
# X texture axis. The array contains normalized vectors, NULL if
# not present. The array is mNumVertices in size. A mesh consisting
# of points and lines only may not have normal vectors. Meshes with
# mixed primitive types (i.e. lines and triangles) may have
# normals, but the normals for vertices that are only referenced by
# point or line primitives are undefined and set to qNaN. See
# the
#mNormals member for a detailled discussion of qNaNs.
# @note If the mesh contains tangents, it automatically also
# contains bitangents (the bitangent is just the cross product of
# tangent and normal vectors).
("mTangents", POINTER(Vector3D)),
# Vertex bitangents.
# The bitangent of a vertex points in the direction of the positive
# Y texture axis. The array contains normalized vectors, NULL if not
# present. The array is mNumVertices in size.
# @note If the mesh contains tangents, it automatically also contains
# bitangents.
("mBitangents", POINTER(Vector3D)),
# Vertex color sets.
# A mesh may contain 0 to
#AI_MAX_NUMBER_OF_COLOR_SETS vertex
# colors per vertex. NULL if not present. Each array is
# mNumVertices in size if present.
("mColors", POINTER(Color4D)*AI_MAX_NUMBER_OF_COLOR_SETS),
# Vertex texture coords, also known as UV channels.
# A mesh may contain 0 to AI_MAX_NUMBER_OF_TEXTURECOORDS per
# vertex. NULL if not present. The array is mNumVertices in size.
("mTextureCoords", POINTER(Vector3D)*AI_MAX_NUMBER_OF_TEXTURECOORDS),
# Specifies the number of components for a given UV channel.
# Up to three channels are supported (UVW, for accessing volume
# or cube maps). If the value is 2 for a given channel n, the
# component p.z of mTextureCoords[n][p] is set to 0.0f.
# If the value is 1 for a given channel, p.y is set to 0.0f, too.
# @note 4D coords are not supported
("mNumUVComponents", c_uint*AI_MAX_NUMBER_OF_TEXTURECOORDS),
# The faces the mesh is constructed from.
# Each face refers to a number of vertices by their indices.
# This array is always present in a mesh, its size is given
# in mNumFaces. If the
#AI_SCENE_FLAGS_NON_VERBOSE_FORMAT
# is NOT set each face references an unique set of vertices.
("mFaces", POINTER(Face)),
# The number of bones this mesh contains.
# Can be 0, in which case the mBones array is NULL.
("mNumBones", c_uint),
# The bones of this mesh.
# A bone consists of a name by which it can be found in the
# frame hierarchy and a set of vertex weights.
("mBones", POINTER(POINTER(Bone))),
# The material used by this mesh.
# A mesh does use only a single material. If an imported model uses
# multiple materials, the import splits up the mesh. Use this value
# as index into the scene's material list.
("mMaterialIndex", c_uint),
# Name of the mesh. Meshes can be named, but this is not a
# requirement and leaving this field empty is totally fine.
# There are mainly three uses for mesh names:
# - some formats name nodes and meshes independently.
# - importers tend to split meshes up to meet the
# one-material-per-mesh requirement. Assigning
# the same (dummy) name to each of the result meshes
# aids the caller at recovering the original mesh
# partitioning.
# - Vertex animations refer to meshes by their names.
("mName", String),
# NOT CURRENTLY IN USE. The number of attachment meshes
("mNumAnimMeshes", c_uint),
# NOT CURRENTLY IN USE. Attachment meshes for this mesh, for vertex-based animation.
# Attachment meshes carry replacement data for some of the
# mesh'es vertex components (usually positions, normals).
]
class Camera(Structure):
"""
See 'aiCamera.h' for details.
"""
_fields_ = [
# The name of the camera.
# There must be a node in the scenegraph with the same name.
# This node specifies the position of the camera in the scene
# hierarchy and can be animated.
("mName", String),
# Position of the camera relative to the coordinate space
# defined by the corresponding node.
# The default value is 0|0|0.
("mPosition", Vector3D),
# 'Up' - vector of the camera coordinate system relative to
# the coordinate space defined by the corresponding node.
# The 'right' vector of the camera coordinate system is
# the cross product of the up and lookAt vectors.
# The default value is 0|1|0. The vector
# may be normalized, but it needn't.
("mUp", Vector3D),
# 'LookAt' - vector of the camera coordinate system relative to
# the coordinate space defined by the corresponding node.
# This is the viewing direction of the user.
# The default value is 0|0|1. The vector
# may be normalized, but it needn't.
("mLookAt", Vector3D),
# Half horizontal field of view angle, in radians.
# The field of view angle is the angle between the center
# line of the screen and the left or right border.
# The default value is 1/4PI.
("mHorizontalFOV", c_float),
# Distance of the near clipping plane from the camera.
# The value may not be 0.f (for arithmetic reasons to prevent
# a division through zero). The default value is 0.1f.
("mClipPlaneNear", c_float),
# Distance of the far clipping plane from the camera.
# The far clipping plane must, of course, be further away than the
# near clipping plane. The default value is 1000.f. The ratio
# between the near and the far plane should not be too
# large (between 1000-10000 should be ok) to avoid floating-point
# inaccuracies which could lead to z-fighting.
("mClipPlaneFar", c_float),
# Screen aspect ratio.
# This is the ration between the width and the height of the
# screen. Typical values are 4/3, 1/2 or 1/1. This value is
# 0 if the aspect ratio is not defined in the source file.
# 0 is also the default value.
("mAspect", c_float),
]
class VectorKey(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
# The time of this key
("mTime", c_double),
# The value of this key
("mValue", Vector3D),
]
class QuatKey(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
# The time of this key
("mTime", c_double),
# The value of this key
("mValue", Quaternion),
]
class NodeAnim(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
# The name of the node affected by this animation. The node
# must exist and it must be unique.
("mNodeName", String),
# The number of position keys
("mNumPositionKeys", c_uint),
# The position keys of this animation channel. Positions are
# specified as 3D vector. The array is mNumPositionKeys in size.
# If there are position keys, there will also be at least one
# scaling and one rotation key.
("mPositionKeys", POINTER(VectorKey)),
# The number of rotation keys
("mNumRotationKeys", c_uint),
# The rotation keys of this animation channel. Rotations are
# given as quaternions, which are 4D vectors. The array is
# mNumRotationKeys in size.
# If there are rotation keys, there will also be at least one
# scaling and one position key.
("mRotationKeys", POINTER(QuatKey)),
# The number of scaling keys
("mNumScalingKeys", c_uint),
# The scaling keys of this animation channel. Scalings are
# specified as 3D vector. The array is mNumScalingKeys in size.
# If there are scaling keys, there will also be at least one
# position and one rotation key.
("mScalingKeys", POINTER(VectorKey)),
# Defines how the animation behaves before the first
# key is encountered.
# The default value is aiAnimBehaviour_DEFAULT (the original
# transformation matrix of the affected node is used).
("mPreState", c_uint),
# Defines how the animation behaves after the last
# key was processed.
# The default value is aiAnimBehaviour_DEFAULT (the original
# transformation matrix of the affected node is taken).
("mPostState", c_uint),
]
class Animation(Structure):
"""
See 'aiAnim.h' for details.
"""
_fields_ = [
# The name of the animation. If the modeling package this data was
# exported from does support only a single animation channel, this
# name is usually empty (length is zero).
("mName", String),
# Duration of the animation in ticks.
("mDuration", c_double),
# Ticks per second. 0 if not specified in the imported file
("mTicksPerSecond", c_double),
# The number of bone animation channels. Each channel affects
# a single node.
("mNumChannels", c_uint),
# The node animation channels. Each channel affects a single node.
# The array is mNumChannels in size.
("mChannels", POINTER(POINTER(NodeAnim))),
# The number of mesh animation channels. Each channel affects
# a single mesh and defines vertex-based animation.
("mNumMeshChannels", c_uint),
# The mesh animation channels. Each channel affects a single mesh.
# The array is mNumMeshChannels in size.
]
class Scene(Structure):
"""
See 'aiScene.h' for details.
"""
AI_SCENE_FLAGS_INCOMPLETE = 0x1
AI_SCENE_FLAGS_VALIDATED = 0x2
AI_SCENE_FLAGS_VALIDATION_WARNING = 0x4
AI_SCENE_FLAGS_NON_VERBOSE_FORMAT = 0x8
AI_SCENE_FLAGS_TERRAIN = 0x10
_fields_ = [
# Any combination of the AI_SCENE_FLAGS_XXX flags. By default
# this value is 0, no flags are set. Most applications will
# want to reject all scenes with the AI_SCENE_FLAGS_INCOMPLETE
# bit set.
("mFlags", c_uint),
# The root node of the hierarchy.
# There will always be at least the root node if the import
# was successful (and no special flags have been set).
# Presence of further nodes depends on the format and content
# of the imported file.
("mRootNode", POINTER(Node)),
# The number of meshes in the scene.
("mNumMeshes", c_uint),
# The array of meshes.
# Use the indices given in the aiNode structure to access
# this array. The array is mNumMeshes in size. If the
# AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always
# be at least ONE material.
("mMeshes", POINTER(POINTER(Mesh))),
# The number of materials in the scene.
("mNumMaterials", c_uint),
# The array of materials.
# Use the index given in each aiMesh structure to access this
# array. The array is mNumMaterials in size. If the
# AI_SCENE_FLAGS_INCOMPLETE flag is not set there will always
# be at least ONE material.
("mMaterials", POINTER(POINTER(Material))),
# The number of animations in the scene.
("mNumAnimations", c_uint),
# The array of animations.
# All animations imported from the given file are listed here.
# The array is mNumAnimations in size.
("mAnimations", POINTER(POINTER(Animation))),
# The number of textures embedded into the file
("mNumTextures", c_uint),
# The array of embedded textures.
# Not many file formats embed their textures into the file.
# An example is Quake's MDL format (which is also used by
# some GameStudio versions)
("mTextures", POINTER(POINTER(Texture))),
# The number of light sources in the scene. Light sources
# are fully optional, in most cases this attribute will be 0
("mNumLights", c_uint),
# The array of light sources.
# All light sources imported from the given file are
# listed here. The array is mNumLights in size.
("mLights", POINTER(POINTER(Light))),
# The number of cameras in the scene. Cameras
# are fully optional, in most cases this attribute will be 0
("mNumCameras", c_uint),
# The array of cameras.
# All cameras imported from the given file are listed here.
# The array is mNumCameras in size. The first camera in the
# array (if existing) is the default camera view into
# the scene.
("mCameras", POINTER(POINTER(Camera))),
]
assimp_structs_as_tuple = (Matrix4x4,
Matrix3x3,
Vector2D,
Vector3D,
Color3D,
Color4D,
Quaternion,
Plane,
Texel)
|
mit
| -883,316,027,591,678,700 | 37.027503 | 128 | 0.541109 | false |
gzamboni/sdnResilience
|
loxi/of11/action.py
|
1
|
47031
|
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
import util
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of11']
class action(loxi.OFObject):
subtypes = {}
def __init__(self, type=None):
if type != None:
self.type = type
else:
self.type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 0)
subclass = action.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = action()
obj.type = reader.read("!H")[0]
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
return True
def pretty_print(self, q):
q.text("action {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
class experimenter(action):
subtypes = {}
type = 65535
def __init__(self, experimenter=None, data=None):
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 4)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.experimenter = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.experimenter != other.experimenter: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
action.subtypes[65535] = experimenter
class bsn(experimenter):
subtypes = {}
type = 65535
experimenter = 6035143
def __init__(self, subtype=None):
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 8)
subclass = bsn.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bsn()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
obj.subtype = reader.read("!L")[0]
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("bsn {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
experimenter.subtypes[6035143] = bsn
class bsn_checksum(bsn):
type = 65535
experimenter = 6035143
subtype = 4
def __init__(self, checksum=None):
if checksum != None:
self.checksum = checksum
else:
self.checksum = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(util.pack_checksum_128(self.checksum))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_checksum()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 4)
obj.checksum = util.unpack_checksum_128(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.checksum != other.checksum: return False
return True
def pretty_print(self, q):
q.text("bsn_checksum {")
with q.group():
with q.indent(2):
q.breakable()
q.text("checksum = ");
q.pp(self.checksum)
q.breakable()
q.text('}')
bsn.subtypes[4] = bsn_checksum
class bsn_mirror(bsn):
type = 65535
experimenter = 6035143
subtype = 1
def __init__(self, dest_port=None, vlan_tag=None, copy_stage=None):
if dest_port != None:
self.dest_port = dest_port
else:
self.dest_port = 0
if vlan_tag != None:
self.vlan_tag = vlan_tag
else:
self.vlan_tag = 0
if copy_stage != None:
self.copy_stage = copy_stage
else:
self.copy_stage = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.dest_port))
packed.append(struct.pack("!L", self.vlan_tag))
packed.append(struct.pack("!B", self.copy_stage))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_mirror()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 1)
obj.dest_port = reader.read("!L")[0]
obj.vlan_tag = reader.read("!L")[0]
obj.copy_stage = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.dest_port != other.dest_port: return False
if self.vlan_tag != other.vlan_tag: return False
if self.copy_stage != other.copy_stage: return False
return True
def pretty_print(self, q):
q.text("bsn_mirror {")
with q.group():
with q.indent(2):
q.breakable()
q.text("dest_port = ");
q.text("%#x" % self.dest_port)
q.text(","); q.breakable()
q.text("vlan_tag = ");
q.text("%#x" % self.vlan_tag)
q.text(","); q.breakable()
q.text("copy_stage = ");
q.text("%#x" % self.copy_stage)
q.breakable()
q.text('}')
bsn.subtypes[1] = bsn_mirror
class bsn_set_tunnel_dst(bsn):
type = 65535
experimenter = 6035143
subtype = 2
def __init__(self, dst=None):
if dst != None:
self.dst = dst
else:
self.dst = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.dst))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_set_tunnel_dst()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 2)
obj.dst = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.dst != other.dst: return False
return True
def pretty_print(self, q):
q.text("bsn_set_tunnel_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("dst = ");
q.text("%#x" % self.dst)
q.breakable()
q.text('}')
bsn.subtypes[2] = bsn_set_tunnel_dst
class copy_ttl_in(action):
type = 12
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = copy_ttl_in()
_type = reader.read("!H")[0]
assert(_type == 12)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("copy_ttl_in {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[12] = copy_ttl_in
class copy_ttl_out(action):
type = 11
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = copy_ttl_out()
_type = reader.read("!H")[0]
assert(_type == 11)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("copy_ttl_out {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[11] = copy_ttl_out
class dec_mpls_ttl(action):
type = 16
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = dec_mpls_ttl()
_type = reader.read("!H")[0]
assert(_type == 16)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("dec_mpls_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[16] = dec_mpls_ttl
class dec_nw_ttl(action):
type = 24
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = dec_nw_ttl()
_type = reader.read("!H")[0]
assert(_type == 24)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("dec_nw_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[24] = dec_nw_ttl
class group(action):
type = 22
def __init__(self, group_id=None):
if group_id != None:
self.group_id = group_id
else:
self.group_id = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.group_id))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = group()
_type = reader.read("!H")[0]
assert(_type == 22)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.group_id = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.group_id != other.group_id: return False
return True
def pretty_print(self, q):
q.text("group {")
with q.group():
with q.indent(2):
q.breakable()
q.text("group_id = ");
q.text("%#x" % self.group_id)
q.breakable()
q.text('}')
action.subtypes[22] = group
class nicira(experimenter):
subtypes = {}
type = 65535
experimenter = 8992
def __init__(self, subtype=None):
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!H", self.subtype))
packed.append('\x00' * 2)
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 8)
subclass = nicira.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = nicira()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 8992)
obj.subtype = reader.read("!H")[0]
reader.skip(2)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("nicira {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
experimenter.subtypes[8992] = nicira
class nicira_dec_ttl(nicira):
type = 65535
experimenter = 8992
subtype = 18
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!H", self.subtype))
packed.append('\x00' * 2)
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = nicira_dec_ttl()
_type = reader.read("!H")[0]
assert(_type == 65535)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 8992)
_subtype = reader.read("!H")[0]
assert(_subtype == 18)
reader.skip(2)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("nicira_dec_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
nicira.subtypes[18] = nicira_dec_ttl
class output(action):
type = 0
def __init__(self, port=None, max_len=None):
if port != None:
self.port = port
else:
self.port = 0
if max_len != None:
self.max_len = max_len
else:
self.max_len = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(util.pack_port_no(self.port))
packed.append(struct.pack("!H", self.max_len))
packed.append('\x00' * 6)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = output()
_type = reader.read("!H")[0]
assert(_type == 0)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.port = util.unpack_port_no(reader)
obj.max_len = reader.read("!H")[0]
reader.skip(6)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.port != other.port: return False
if self.max_len != other.max_len: return False
return True
def pretty_print(self, q):
q.text("output {")
with q.group():
with q.indent(2):
q.breakable()
q.text("port = ");
q.text(util.pretty_port(self.port))
q.text(","); q.breakable()
q.text("max_len = ");
q.text("%#x" % self.max_len)
q.breakable()
q.text('}')
action.subtypes[0] = output
class pop_mpls(action):
type = 20
def __init__(self, ethertype=None):
if ethertype != None:
self.ethertype = ethertype
else:
self.ethertype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!H", self.ethertype))
packed.append('\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = pop_mpls()
_type = reader.read("!H")[0]
assert(_type == 20)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.ethertype = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.ethertype != other.ethertype: return False
return True
def pretty_print(self, q):
q.text("pop_mpls {")
with q.group():
with q.indent(2):
q.breakable()
q.text("ethertype = ");
q.text("%#x" % self.ethertype)
q.breakable()
q.text('}')
action.subtypes[20] = pop_mpls
class pop_vlan(action):
type = 18
def __init__(self):
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = pop_vlan()
_type = reader.read("!H")[0]
assert(_type == 18)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
return True
def pretty_print(self, q):
q.text("pop_vlan {")
with q.group():
with q.indent(2):
q.breakable()
q.breakable()
q.text('}')
action.subtypes[18] = pop_vlan
class push_mpls(action):
type = 19
def __init__(self, ethertype=None):
if ethertype != None:
self.ethertype = ethertype
else:
self.ethertype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!H", self.ethertype))
packed.append('\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = push_mpls()
_type = reader.read("!H")[0]
assert(_type == 19)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.ethertype = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.ethertype != other.ethertype: return False
return True
def pretty_print(self, q):
q.text("push_mpls {")
with q.group():
with q.indent(2):
q.breakable()
q.text("ethertype = ");
q.text("%#x" % self.ethertype)
q.breakable()
q.text('}')
action.subtypes[19] = push_mpls
class push_vlan(action):
type = 17
def __init__(self, ethertype=None):
if ethertype != None:
self.ethertype = ethertype
else:
self.ethertype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!H", self.ethertype))
packed.append('\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = push_vlan()
_type = reader.read("!H")[0]
assert(_type == 17)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.ethertype = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.ethertype != other.ethertype: return False
return True
def pretty_print(self, q):
q.text("push_vlan {")
with q.group():
with q.indent(2):
q.breakable()
q.text("ethertype = ");
q.text("%#x" % self.ethertype)
q.breakable()
q.text('}')
action.subtypes[17] = push_vlan
class set_dl_dst(action):
type = 4
def __init__(self, dl_addr=None):
if dl_addr != None:
self.dl_addr = dl_addr
else:
self.dl_addr = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!6B", *self.dl_addr))
packed.append('\x00' * 6)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_dl_dst()
_type = reader.read("!H")[0]
assert(_type == 4)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.dl_addr = list(reader.read('!6B'))
reader.skip(6)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.dl_addr != other.dl_addr: return False
return True
def pretty_print(self, q):
q.text("set_dl_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("dl_addr = ");
q.text(util.pretty_mac(self.dl_addr))
q.breakable()
q.text('}')
action.subtypes[4] = set_dl_dst
class set_dl_src(action):
type = 3
def __init__(self, dl_addr=None):
if dl_addr != None:
self.dl_addr = dl_addr
else:
self.dl_addr = [0,0,0,0,0,0]
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!6B", *self.dl_addr))
packed.append('\x00' * 6)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_dl_src()
_type = reader.read("!H")[0]
assert(_type == 3)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.dl_addr = list(reader.read('!6B'))
reader.skip(6)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.dl_addr != other.dl_addr: return False
return True
def pretty_print(self, q):
q.text("set_dl_src {")
with q.group():
with q.indent(2):
q.breakable()
q.text("dl_addr = ");
q.text(util.pretty_mac(self.dl_addr))
q.breakable()
q.text('}')
action.subtypes[3] = set_dl_src
class set_mpls_label(action):
type = 13
def __init__(self, mpls_label=None):
if mpls_label != None:
self.mpls_label = mpls_label
else:
self.mpls_label = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.mpls_label))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_mpls_label()
_type = reader.read("!H")[0]
assert(_type == 13)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.mpls_label = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.mpls_label != other.mpls_label: return False
return True
def pretty_print(self, q):
q.text("set_mpls_label {")
with q.group():
with q.indent(2):
q.breakable()
q.text("mpls_label = ");
q.text("%#x" % self.mpls_label)
q.breakable()
q.text('}')
action.subtypes[13] = set_mpls_label
class set_mpls_tc(action):
type = 14
def __init__(self, mpls_tc=None):
if mpls_tc != None:
self.mpls_tc = mpls_tc
else:
self.mpls_tc = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!B", self.mpls_tc))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_mpls_tc()
_type = reader.read("!H")[0]
assert(_type == 14)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.mpls_tc = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.mpls_tc != other.mpls_tc: return False
return True
def pretty_print(self, q):
q.text("set_mpls_tc {")
with q.group():
with q.indent(2):
q.breakable()
q.text("mpls_tc = ");
q.text("%#x" % self.mpls_tc)
q.breakable()
q.text('}')
action.subtypes[14] = set_mpls_tc
class set_mpls_ttl(action):
type = 15
def __init__(self, mpls_ttl=None):
if mpls_ttl != None:
self.mpls_ttl = mpls_ttl
else:
self.mpls_ttl = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!B", self.mpls_ttl))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_mpls_ttl()
_type = reader.read("!H")[0]
assert(_type == 15)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.mpls_ttl = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.mpls_ttl != other.mpls_ttl: return False
return True
def pretty_print(self, q):
q.text("set_mpls_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.text("mpls_ttl = ");
q.text("%#x" % self.mpls_ttl)
q.breakable()
q.text('}')
action.subtypes[15] = set_mpls_ttl
class set_nw_dst(action):
type = 6
def __init__(self, nw_addr=None):
if nw_addr != None:
self.nw_addr = nw_addr
else:
self.nw_addr = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.nw_addr))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_nw_dst()
_type = reader.read("!H")[0]
assert(_type == 6)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.nw_addr = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.nw_addr != other.nw_addr: return False
return True
def pretty_print(self, q):
q.text("set_nw_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("nw_addr = ");
q.text("%#x" % self.nw_addr)
q.breakable()
q.text('}')
action.subtypes[6] = set_nw_dst
class set_nw_ecn(action):
type = 8
def __init__(self, nw_ecn=None):
if nw_ecn != None:
self.nw_ecn = nw_ecn
else:
self.nw_ecn = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!B", self.nw_ecn))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_nw_ecn()
_type = reader.read("!H")[0]
assert(_type == 8)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.nw_ecn = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.nw_ecn != other.nw_ecn: return False
return True
def pretty_print(self, q):
q.text("set_nw_ecn {")
with q.group():
with q.indent(2):
q.breakable()
q.text("nw_ecn = ");
q.text("%#x" % self.nw_ecn)
q.breakable()
q.text('}')
action.subtypes[8] = set_nw_ecn
class set_nw_src(action):
type = 5
def __init__(self, nw_addr=None):
if nw_addr != None:
self.nw_addr = nw_addr
else:
self.nw_addr = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.nw_addr))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_nw_src()
_type = reader.read("!H")[0]
assert(_type == 5)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.nw_addr = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.nw_addr != other.nw_addr: return False
return True
def pretty_print(self, q):
q.text("set_nw_src {")
with q.group():
with q.indent(2):
q.breakable()
q.text("nw_addr = ");
q.text("%#x" % self.nw_addr)
q.breakable()
q.text('}')
action.subtypes[5] = set_nw_src
class set_nw_tos(action):
type = 7
def __init__(self, nw_tos=None):
if nw_tos != None:
self.nw_tos = nw_tos
else:
self.nw_tos = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!B", self.nw_tos))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_nw_tos()
_type = reader.read("!H")[0]
assert(_type == 7)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.nw_tos = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.nw_tos != other.nw_tos: return False
return True
def pretty_print(self, q):
q.text("set_nw_tos {")
with q.group():
with q.indent(2):
q.breakable()
q.text("nw_tos = ");
q.text("%#x" % self.nw_tos)
q.breakable()
q.text('}')
action.subtypes[7] = set_nw_tos
class set_nw_ttl(action):
type = 23
def __init__(self, nw_ttl=None):
if nw_ttl != None:
self.nw_ttl = nw_ttl
else:
self.nw_ttl = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!B", self.nw_ttl))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_nw_ttl()
_type = reader.read("!H")[0]
assert(_type == 23)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.nw_ttl = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.nw_ttl != other.nw_ttl: return False
return True
def pretty_print(self, q):
q.text("set_nw_ttl {")
with q.group():
with q.indent(2):
q.breakable()
q.text("nw_ttl = ");
q.text("%#x" % self.nw_ttl)
q.breakable()
q.text('}')
action.subtypes[23] = set_nw_ttl
class set_queue(action):
type = 21
def __init__(self, queue_id=None):
if queue_id != None:
self.queue_id = queue_id
else:
self.queue_id = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!L", self.queue_id))
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_queue()
_type = reader.read("!H")[0]
assert(_type == 21)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.queue_id = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.queue_id != other.queue_id: return False
return True
def pretty_print(self, q):
q.text("set_queue {")
with q.group():
with q.indent(2):
q.breakable()
q.text("queue_id = ");
q.text("%#x" % self.queue_id)
q.breakable()
q.text('}')
action.subtypes[21] = set_queue
class set_tp_dst(action):
type = 10
def __init__(self, tp_port=None):
if tp_port != None:
self.tp_port = tp_port
else:
self.tp_port = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!H", self.tp_port))
packed.append('\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_tp_dst()
_type = reader.read("!H")[0]
assert(_type == 10)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.tp_port = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.tp_port != other.tp_port: return False
return True
def pretty_print(self, q):
q.text("set_tp_dst {")
with q.group():
with q.indent(2):
q.breakable()
q.text("tp_port = ");
q.text("%#x" % self.tp_port)
q.breakable()
q.text('}')
action.subtypes[10] = set_tp_dst
class set_tp_src(action):
type = 9
def __init__(self, tp_port=None):
if tp_port != None:
self.tp_port = tp_port
else:
self.tp_port = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!H", self.tp_port))
packed.append('\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_tp_src()
_type = reader.read("!H")[0]
assert(_type == 9)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.tp_port = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.tp_port != other.tp_port: return False
return True
def pretty_print(self, q):
q.text("set_tp_src {")
with q.group():
with q.indent(2):
q.breakable()
q.text("tp_port = ");
q.text("%#x" % self.tp_port)
q.breakable()
q.text('}')
action.subtypes[9] = set_tp_src
class set_vlan_pcp(action):
type = 2
def __init__(self, vlan_pcp=None):
if vlan_pcp != None:
self.vlan_pcp = vlan_pcp
else:
self.vlan_pcp = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!B", self.vlan_pcp))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_vlan_pcp()
_type = reader.read("!H")[0]
assert(_type == 2)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.vlan_pcp = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.vlan_pcp != other.vlan_pcp: return False
return True
def pretty_print(self, q):
q.text("set_vlan_pcp {")
with q.group():
with q.indent(2):
q.breakable()
q.text("vlan_pcp = ");
q.text("%#x" % self.vlan_pcp)
q.breakable()
q.text('}')
action.subtypes[2] = set_vlan_pcp
class set_vlan_vid(action):
type = 1
def __init__(self, vlan_vid=None):
if vlan_vid != None:
self.vlan_vid = vlan_vid
else:
self.vlan_vid = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!H", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for len at index 1
packed.append(struct.pack("!H", self.vlan_vid))
packed.append('\x00' * 2)
length = sum([len(x) for x in packed])
packed[1] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_vlan_vid()
_type = reader.read("!H")[0]
assert(_type == 1)
_len = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_len, 4)
obj.vlan_vid = reader.read("!H")[0]
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.vlan_vid != other.vlan_vid: return False
return True
def pretty_print(self, q):
q.text("set_vlan_vid {")
with q.group():
with q.indent(2):
q.breakable()
q.text("vlan_vid = ");
q.text("%#x" % self.vlan_vid)
q.breakable()
q.text('}')
action.subtypes[1] = set_vlan_vid
|
gpl-2.0
| 5,950,182,360,299,091,000 | 27.297834 | 88 | 0.512534 | false |
jakeharding/repo-health
|
repo_health/gh_issues/serializers/GhIssueStatsSerializer.py
|
1
|
4835
|
"""
GhIssueStatsSerializer.py - (C) Copyright - 2017
This software is copyrighted to contributors listed in CONTRIBUTIONS.md.
SPDX-License-Identifier: MIT
Author(s) of this file:
J. Harding
Serializer for issue stats of a GitHub repo.
"""
import datetime
from rest_framework import serializers as s
from ..models import GhIssueEvent, GhIssueComment
from .TotalAndOpenIssueLabelSerial import TotalAndOpenIssueLabelSerial
from repo_health.gh_projects.models import GhRepoLabel
from repo_health.index.mixins import CountForPastYearMixin
from repo_health.metrics.serializers import MetricField, ChartField
class GhIssueStatsSerializer(s.Serializer, CountForPastYearMixin):
# Chart names
ISSUES_CLOSED = 'issues_closed'
ISSUES_OPENED = 'issues_opened'
card_title = s.SerializerMethodField()
issues_count = s.SerializerMethodField()
issues_closed_last_year = s.SerializerMethodField()
issues_opened_last_year = s.SerializerMethodField()
merged_count = s.SerializerMethodField()
avg_lifetime = s.SerializerMethodField()
# popular_labels = s.SerializerMethodField()
avg_maintainer_comments_per_issue = s.SerializerMethodField()
most_recent_created_at = s.SerializerMethodField()
_charts = None
@property
def charts(self):
return self._charts
@charts.setter
def charts(self, charts):
self._charts = charts
def __init__(self, *args, **kwargs):
"""
Reset _charts to empty array for every object initialized.
:param args:
:param kwargs:
"""
super().__init__(*args, **kwargs)
self.charts = []
def get_card_title(self, repo):
return MetricField(True, None, 0, None, "Issues")
def get_issues_count(self, repo):
return MetricField(True, "Number of issues", 1, None, repo.issues_count)
def get_issues_closed_last_year(self, repo):
metric, most_recent = self.get_count_list_for_year(repo.issues.filter(events__action=GhIssueEvent.CLOSED_ACTION).distinct())
self._charts.append(ChartField(self.ISSUES_CLOSED, most_recent, "Closed issues last year", None, None, 1))
return MetricField(True, "Closed issues last year", 2, self.ISSUES_CLOSED, metric)
def get_issues_opened_last_year(self, repo):
metric, most_recent = self.get_count_list_for_year(repo.issues)
self._charts.append(ChartField(self.ISSUES_OPENED, most_recent, "Open issues last year", None, None, 1))
return MetricField(True, "Open issues last year", 3, self.ISSUES_OPENED, metric)
def get_most_recent_created_at(self, repo):
most_recent = repo.issues.order_by('-created_at').first().created_at
return MetricField(True, 'Most recent issue created at', 4, None, most_recent, True)
def get_merged_count(self, repo):
merged_count = repo.issues.filter(events__action=GhIssueEvent.MERGED_ACTION).count()
return MetricField(True, "Merged issues", 5, None, merged_count)
def get_avg_lifetime(self, repo):
# Similar inefficiency as the PR stats but must access the fields a little differently.
avg = closed = 0
if repo.issues_count is not 0:
td = datetime.timedelta()
closed_issues = repo.issues.prefetch_related('events').filter(events__action=GhIssueEvent.CLOSED_ACTION,
created_at__isnull=False)
for i in closed_issues.all():
closed += 1
close_event = i.events.filter(action=GhIssueEvent.CLOSED_ACTION).order_by('-created_at').first()
if not isinstance(i.created_at, type(None)):
td += (close_event.created_at - i.created_at)
avg = (td / closed).days if closed > 0 else closed
return MetricField(True, "Average issue lifetime", 6, None, avg)
def get_popular_labels(self, repo):
# Raw SQL.
labels_by_count_for_repo = GhRepoLabel.objects.raw(
'SELECT t.*, count(t.id) as repos_labels FROM repo_labels t join issue_labels il on il.label_id=t.id\
where t.repo_id = %d GROUP BY t.id ORDER BY repos_labels desc;' % repo.id)
top_five = labels_by_count_for_repo[:5]
response_data = []
for l in top_five:
label_serial = TotalAndOpenIssueLabelSerial(l)
response_data.append(label_serial.data)
return response_data
def get_avg_maintainer_comments_per_issue(self, repo):
comments_from_m = GhIssueComment.objects.filter(
issue__in=repo.issues.all(),
user__in=repo.maintainers.all()
).count()
avg_coms = round(comments_from_m / repo.issues.count(), 5)
return MetricField(True, 'Average maintainer comments per issue', 6, None, avg_coms)
|
mit
| -7,466,337,894,348,510,000 | 41.052174 | 132 | 0.658945 | false |
wanghongjuan/crosswalk-test-suite
|
misc/sampleapp-android-tests/sampleapp/webgl_uninstall.py
|
1
|
2600
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Liu, Yun<yunx.liu@intel.com>
import unittest
import os
import sys
import commands
import comm
class TestSampleAppFunctions(unittest.TestCase):
def test_uninstall(self):
comm.setUp()
app_name = "Webgl"
cmdfind = "adb -s " + comm.device + \
" shell pm list packages |grep org.xwalk.%s" % (app_name.lower())
# print "cmdfind: ", cmdfind
pmstatus = commands.getstatusoutput(cmdfind)
# print "pmstatus: ", pmstatus
if pmstatus[0] != 0:
print "Uninstall APK ----------------> %s App haven't installed, need to install it!" % app_name
os.chdir(comm.const_path + "/../testapp/")
apk_file = commands.getstatusoutput("ls | grep %s" % app_name.lower())[1]
cmdinst = "adb -s " + comm.device + " install -r " + apk_file
comm.app_install(cmdinst, cmdfind, self)
cmduninst = "adb -s " + comm.device + \
" uninstall org.xwalk.%s" % (app_name.lower())
comm.app_uninstall(cmduninst, self)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
| 6,075,899,156,710,533,000 | 43.067797 | 108 | 0.693077 | false |
yhilpisch/dx
|
dx/plot.py
|
1
|
5805
|
#
# DX Analytics
# Helper Function for Plotting
# dx_plot.py
#
# DX Analytics is a financial analytics library, mainly for
# derviatives modeling and pricing by Monte Carlo simulation
#
# (c) Dr. Yves J. Hilpisch
# The Python Quants GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
import matplotlib as mpl; mpl.use('agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pylab import cm
def plot_option_stats(s_list, pv, de, ve):
''' Plot option prices, deltas and vegas for a set of
different initial values of the underlying.
Parameters
==========
s_list : array or list
set of intial values of the underlying
pv : array or list
present values
de : array or list
results for deltas
ve : array or list
results for vega
'''
plt.figure(figsize=(9, 7))
sub1 = plt.subplot(311)
plt.plot(s_list, pv, 'ro', label='Present Value')
plt.plot(s_list, pv, 'b')
plt.grid(True)
plt.legend(loc=0)
plt.setp(sub1.get_xticklabels(), visible=False)
sub2 = plt.subplot(312)
plt.plot(s_list, de, 'go', label='Delta')
plt.plot(s_list, de, 'b')
plt.grid(True)
plt.legend(loc=0)
plt.setp(sub2.get_xticklabels(), visible=False)
sub3 = plt.subplot(313)
plt.plot(s_list, ve, 'yo', label='Vega')
plt.plot(s_list, ve, 'b')
plt.xlabel('Strike')
plt.grid(True)
plt.legend(loc=0)
def plot_option_stats_full(s_list, pv, de, ve, th, rh, ga):
''' Plot option prices, deltas and vegas for a set of
different initial values of the underlying.
Parameters
==========
s_list : array or list
set of intial values of the underlying
pv : array or list
present values
de : array or list
results for deltas
ve : array or list
results for vega
th : array or list
results for theta
rh : array or list
results for rho
ga : array or list
results for gamma
'''
plt.figure(figsize=(10, 14))
sub1 = plt.subplot(611)
plt.plot(s_list, pv, 'ro', label='Present Value')
plt.plot(s_list, pv, 'b')
plt.grid(True)
plt.legend(loc=0)
plt.setp(sub1.get_xticklabels(), visible=False)
sub2 = plt.subplot(612)
plt.plot(s_list, de, 'go', label='Delta')
plt.plot(s_list, de, 'b')
plt.grid(True)
plt.legend(loc=0)
plt.setp(sub2.get_xticklabels(), visible=False)
sub3 = plt.subplot(613)
plt.plot(s_list, ve, 'yo', label='Gamma')
plt.plot(s_list, ve, 'b')
plt.grid(True)
plt.legend(loc=0)
sub4 = plt.subplot(614)
plt.plot(s_list, th, 'mo', label='Vega')
plt.plot(s_list, th, 'b')
plt.grid(True)
plt.legend(loc=0)
sub5 = plt.subplot(615)
plt.plot(s_list, rh, 'co', label='Theta')
plt.plot(s_list, rh, 'b')
plt.grid(True)
plt.legend(loc=0)
sub6 = plt.subplot(616)
plt.plot(s_list, ga, 'ko', label='Rho')
plt.plot(s_list, ga, 'b')
plt.xlabel('Strike')
plt.grid(True)
plt.legend(loc=0)
def plot_greeks_3d(inputs, labels):
''' Plot Greeks in 3d.
Parameters
==========
inputs : list of arrays
x, y, z arrays
labels : list of strings
labels for x, y, z
'''
x, y, z = inputs
xl, yl, zl = labels
fig = plt.figure(figsize=(10, 7))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1,
cmap=cm.coolwarm, linewidth=0.5, antialiased=True)
ax.set_xlabel(xl)
ax.set_ylabel(yl)
ax.set_zlabel(zl)
fig.colorbar(surf, shrink=0.5, aspect=5)
def plot_calibration_results(cali, relative=False):
''' Plot calibration results.
Parameters
==========
cali : instance of calibration class
instance has to have opt_parameters
relative : boolean
if True, then relative error reporting
if False, absolute error reporting
'''
cali.update_model_values()
mats = set(cali.option_data[:, 0])
mats = np.sort(list(mats))
fig, axarr = plt.subplots(len(mats), 2, sharex=True)
fig.set_size_inches(8, 12)
fig.subplots_adjust(wspace=0.2, hspace=0.2)
z = 0
for T in mats:
strikes = strikes = cali.option_data[cali.option_data[:, 0] == T][:, 1]
market = cali.option_data[cali.option_data[:, 0] == T][:, 2]
model = cali.model_values[cali.model_values[:, 0] == T][:, 2]
axarr[z, 0].set_ylabel('%s' % str(T)[:10])
axarr[z, 0].plot(strikes, market, label='Market Quotes')
axarr[z, 0].plot(strikes, model, 'ro', label='Model Prices')
axarr[z, 0].grid()
if T is mats[0]:
axarr[z, 0].set_title('Option Quotes')
if T is mats[-1]:
axarr[z, 0].set_xlabel('Strike')
wi = 2.
if relative is True:
axarr[z, 1].bar(strikes - wi / 2,
(model - market) / market * 100, width=wi)
else:
axarr[z, 1].bar(strikes - wi / 2, model - market, width=wi)
axarr[z, 1].grid()
if T is mats[0]:
axarr[z, 1].set_title('Differences')
if T is mats[-1]:
axarr[z, 1].set_xlabel('Strike')
z += 1
|
agpl-3.0
| -1,432,545,182,039,051,300 | 30.042781 | 79 | 0.604134 | false |
all-of-us/raw-data-repository
|
rdr_service/lib_fhir/fhirclient_4_0_0/models/range.py
|
1
|
1470
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Range) on 2019-05-07.
# 2019, SMART Health IT.
from . import element
class Range(element.Element):
""" Set of values bounded by low and high.
A set of ordered Quantities defined by a low and high limit.
"""
resource_type = "Range"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.high = None
""" High limit.
Type `Quantity` (represented as `dict` in JSON). """
self.low = None
""" Low limit.
Type `Quantity` (represented as `dict` in JSON). """
super(Range, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Range, self).elementProperties()
js.extend([
("high", "high", quantity.Quantity, False, None, False),
("low", "low", quantity.Quantity, False, None, False),
])
return js
import sys
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
|
bsd-3-clause
| 917,025,002,437,808,800 | 29 | 102 | 0.6 | false |
ging/python-keystoneclient
|
keystoneclient/auth/identity/v3.py
|
1
|
13078
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import logging
from oslo_config import cfg
import six
from keystoneclient import access
from keystoneclient.auth.identity import base
from keystoneclient import exceptions
from keystoneclient.i18n import _
from keystoneclient import utils
_logger = logging.getLogger(__name__)
class Auth(base.BaseIdentityPlugin):
"""Identity V3 Authentication Plugin.
:param string auth_url: Identity service endpoint for authentication.
:param list auth_methods: A collection of methods to authenticate with.
:param string trust_id: Trust ID for trust scoping.
:param string domain_id: Domain ID for domain scoping.
:param string domain_name: Domain name for domain scoping.
:param string project_id: Project ID for project scoping.
:param string project_name: Project name for project scoping.
:param string project_domain_id: Project's domain ID for project.
:param string project_domain_name: Project's domain name for project.
:param bool reauthenticate: Allow fetching a new token if the current one
is going to expire. (optional) default True
:param bool include_catalog: Include the service catalog in the returned
token. (optional) default True.
"""
@utils.positional()
def __init__(self, auth_url, auth_methods,
trust_id=None,
domain_id=None,
domain_name=None,
project_id=None,
project_name=None,
project_domain_id=None,
project_domain_name=None,
reauthenticate=True,
include_catalog=True):
super(Auth, self).__init__(auth_url=auth_url,
reauthenticate=reauthenticate)
self.auth_methods = auth_methods
self.trust_id = trust_id
self.domain_id = domain_id
self.domain_name = domain_name
self.project_id = project_id
self.project_name = project_name
self.project_domain_id = project_domain_id
self.project_domain_name = project_domain_name
self.include_catalog = include_catalog
@property
def token_url(self):
"""The full URL where we will send authentication data."""
return '%s/auth/tokens' % self.auth_url.rstrip('/')
def get_auth_ref(self, session, **kwargs):
headers = {'Accept': 'application/json'}
body = {'auth': {'identity': {}}}
ident = body['auth']['identity']
rkwargs = {}
for method in self.auth_methods:
name, auth_data = method.get_auth_data(session,
self,
headers,
request_kwargs=rkwargs)
ident.setdefault('methods', []).append(name)
ident[name] = auth_data
if not ident:
raise exceptions.AuthorizationFailure(
_('Authentication method required (e.g. password)'))
mutual_exclusion = [bool(self.domain_id or self.domain_name),
bool(self.project_id or self.project_name),
bool(self.trust_id)]
if sum(mutual_exclusion) > 1:
raise exceptions.AuthorizationFailure(
_('Authentication cannot be scoped to multiple targets. Pick '
'one of: project, domain or trust'))
if self.domain_id:
body['auth']['scope'] = {'domain': {'id': self.domain_id}}
elif self.domain_name:
body['auth']['scope'] = {'domain': {'name': self.domain_name}}
elif self.project_id:
body['auth']['scope'] = {'project': {'id': self.project_id}}
elif self.project_name:
scope = body['auth']['scope'] = {'project': {}}
scope['project']['name'] = self.project_name
if self.project_domain_id:
scope['project']['domain'] = {'id': self.project_domain_id}
elif self.project_domain_name:
scope['project']['domain'] = {'name': self.project_domain_name}
elif self.trust_id:
body['auth']['scope'] = {'OS-TRUST:trust': {'id': self.trust_id}}
# NOTE(jamielennox): we add nocatalog here rather than in token_url
# directly as some federation plugins require the base token_url
token_url = self.token_url
if not self.include_catalog:
token_url += '?nocatalog'
_logger.debug('Making authentication request to %s', token_url)
resp = session.post(token_url, json=body, headers=headers,
authenticated=False, log=False, **rkwargs)
try:
resp_data = resp.json()['token']
except (KeyError, ValueError):
raise exceptions.InvalidResponse(response=resp)
return access.AccessInfoV3(resp.headers['X-Subject-Token'],
**resp_data)
@classmethod
def get_options(cls):
options = super(Auth, cls).get_options()
options.extend([
cfg.StrOpt('domain-id', help='Domain ID to scope to'),
cfg.StrOpt('domain-name', help='Domain name to scope to'),
cfg.StrOpt('project-id', help='Project ID to scope to'),
cfg.StrOpt('project-name', help='Project name to scope to'),
cfg.StrOpt('project-domain-id',
help='Domain ID containing project'),
cfg.StrOpt('project-domain-name',
help='Domain name containing project'),
cfg.StrOpt('trust-id', help='Trust ID'),
])
return options
@six.add_metaclass(abc.ABCMeta)
class AuthMethod(object):
"""One part of a V3 Authentication strategy.
V3 Tokens allow multiple methods to be presented when authentication
against the server. Each one of these methods is implemented by an
AuthMethod.
Note: When implementing an AuthMethod use the method_parameters
and do not use positional arguments. Otherwise they can't be picked up by
the factory method and don't work as well with AuthConstructors.
"""
_method_parameters = []
def __init__(self, **kwargs):
for param in self._method_parameters:
setattr(self, param, kwargs.pop(param, None))
if kwargs:
msg = _("Unexpected Attributes: %s") % ", ".join(kwargs.keys())
raise AttributeError(msg)
@classmethod
def _extract_kwargs(cls, kwargs):
"""Remove parameters related to this method from other kwargs."""
return dict([(p, kwargs.pop(p, None))
for p in cls._method_parameters])
@abc.abstractmethod
def get_auth_data(self, session, auth, headers, **kwargs):
"""Return the authentication section of an auth plugin.
:param session: The communication session.
:type session: keystoneclient.session.Session
:param Auth auth: The auth plugin calling the method.
:param dict headers: The headers that will be sent with the auth
request if a plugin needs to add to them.
:return: The identifier of this plugin and a dict of authentication
data for the auth type.
:rtype: tuple(string, dict)
"""
@six.add_metaclass(abc.ABCMeta)
class AuthConstructor(Auth):
"""AuthConstructor is a means of creating an Auth Plugin that contains
only one authentication method. This is generally the required usage.
An AuthConstructor creates an AuthMethod based on the method's
arguments and the auth_method_class defined by the plugin. It then
creates the auth plugin with only that authentication method.
"""
_auth_method_class = None
def __init__(self, auth_url, *args, **kwargs):
method_kwargs = self._auth_method_class._extract_kwargs(kwargs)
method = self._auth_method_class(*args, **method_kwargs)
super(AuthConstructor, self).__init__(auth_url, [method], **kwargs)
class PasswordMethod(AuthMethod):
"""Construct a User/Password based authentication method.
:param string password: Password for authentication.
:param string username: Username for authentication.
:param string user_id: User ID for authentication.
:param string user_domain_id: User's domain ID for authentication.
:param string user_domain_name: User's domain name for authentication.
"""
_method_parameters = ['user_id',
'username',
'user_domain_id',
'user_domain_name',
'password']
def get_auth_data(self, session, auth, headers, **kwargs):
user = {'password': self.password}
if self.user_id:
user['id'] = self.user_id
elif self.username:
user['name'] = self.username
if self.user_domain_id:
user['domain'] = {'id': self.user_domain_id}
elif self.user_domain_name:
user['domain'] = {'name': self.user_domain_name}
return 'password', {'user': user}
class Password(AuthConstructor):
"""A plugin for authenticating with a username and password.
:param string auth_url: Identity service endpoint for authentication.
:param string password: Password for authentication.
:param string username: Username for authentication.
:param string user_id: User ID for authentication.
:param string user_domain_id: User's domain ID for authentication.
:param string user_domain_name: User's domain name for authentication.
:param string trust_id: Trust ID for trust scoping.
:param string domain_id: Domain ID for domain scoping.
:param string domain_name: Domain name for domain scoping.
:param string project_id: Project ID for project scoping.
:param string project_name: Project name for project scoping.
:param string project_domain_id: Project's domain ID for project.
:param string project_domain_name: Project's domain name for project.
:param bool reauthenticate: Allow fetching a new token if the current one
is going to expire. (optional) default True
"""
_auth_method_class = PasswordMethod
@classmethod
def get_options(cls):
options = super(Password, cls).get_options()
options.extend([
cfg.StrOpt('user-id', help='User ID'),
cfg.StrOpt('user-name', dest='username', help='Username',
deprecated_name='username'),
cfg.StrOpt('user-domain-id', help="User's domain id"),
cfg.StrOpt('user-domain-name', help="User's domain name"),
cfg.StrOpt('password', secret=True, help="User's password"),
])
return options
class TokenMethod(AuthMethod):
"""Construct an Auth plugin to fetch a token from a token.
:param string token: Token for authentication.
"""
_method_parameters = ['token']
def get_auth_data(self, session, auth, headers, **kwargs):
headers['X-Auth-Token'] = self.token
return 'token', {'id': self.token}
class Token(AuthConstructor):
"""A plugin for authenticating with an existing Token.
:param string auth_url: Identity service endpoint for authentication.
:param string token: Token for authentication.
:param string trust_id: Trust ID for trust scoping.
:param string domain_id: Domain ID for domain scoping.
:param string domain_name: Domain name for domain scoping.
:param string project_id: Project ID for project scoping.
:param string project_name: Project name for project scoping.
:param string project_domain_id: Project's domain ID for project.
:param string project_domain_name: Project's domain name for project.
:param bool reauthenticate: Allow fetching a new token if the current one
is going to expire. (optional) default True
"""
_auth_method_class = TokenMethod
def __init__(self, auth_url, token, **kwargs):
super(Token, self).__init__(auth_url, token=token, **kwargs)
@classmethod
def get_options(cls):
options = super(Token, cls).get_options()
options.extend([
cfg.StrOpt('token',
secret=True,
help='Token to authenticate with'),
])
return options
|
apache-2.0
| -5,012,167,486,128,561,000 | 38.391566 | 79 | 0.61829 | false |
TheGoldLab/Lab-Matlab-Control
|
modularTasks/utilities/readPupilLabsData.py
|
1
|
1388
|
import sys
import scipy.io as scpy
import numpy as np
import msgpack
from file_methods import *
#Python Script to read Eye Data, extract desired information and then create a .mat structure where rows are times, columns are:
# 1. timestamp
# 2. gaze x
# 3. gaze y
# 4. confidence
#input:
#sys.argv[1]: the filepath to the datafile
#sys.argv[2]: the desired name of the newly created .mat structure
# Use pupil-labs function to load data
data = load_pldata_file(sys.argv[1], sys.argv[2])
# Make matrix with samples as rows, columns as below
raw_data = np.zeros((len(data.data),6),dtype=np.object)
for q in range(len(data.data)):
raw_data[q][0] = data.data[q]['timestamp']
raw_data[q][1] = data.data[q]['norm_pos'][0]
raw_data[q][2] = data.data[q]['norm_pos'][1]
raw_data[q][3] = data.data[q]['confidence']
try:
raw_data[q][4] = data.data[q]['base_data'][0]['diameter']
raw_data[q][5] = data.data[q]['base_data'][1]['diameter']
except IndexError:
if data.data[q]['base_data'][0]['topic'] == 'pupil.0':
raw_data[q][4] = data.data[q]['base_data'][0]['diameter']
raw_data[q][5] = -1
else:
raw_data[q][4] = -1
raw_data[q][5] = data.data[q]['base_data'][0]['diameter']
# save in temporary file
scpy.savemat(sys.argv[3] +'.mat', {sys.argv[3]:raw_data})
|
apache-2.0
| -4,823,258,720,058,868,000 | 32.071429 | 128 | 0.608069 | false |
jgeewax/googlepersonfinder
|
app/photo.py
|
1
|
1131
|
#!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for retrieving uploaded photos for display."""
from model import *
from utils import *
import prefix
class Photo(Handler):
def get(self):
if not self.params.id:
return self.error(404, 'No photo id was specified.')
photo = db.get(self.params.id)
if not photo:
return self.error(404, 'There is no photo for the specified id.')
self.response.headers['Content-Type'] = "image/png"
self.response.out.write(photo.bin_data)
if __name__ == '__main__':
run([('/photo', Photo)], debug=False)
|
apache-2.0
| 5,566,294,216,070,481,000 | 32.264706 | 74 | 0.71176 | false |
AdrianGaudebert/elmo
|
lib/auth/backends.py
|
1
|
9276
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import ldap
from ldap.filter import filter_format
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.contrib.auth.backends import RemoteUserBackend
from django.core.validators import email_re
from django.utils.hashcompat import md5_constructor
from django.utils.encoding import force_unicode, smart_str
import os
HERE = os.path.abspath(os.path.dirname(__file__))
# List all ldap errors that are our fault (not the client's)
AUTHENTICATION_SERVER_ERRORS = (ldap.SERVER_DOWN,)
GROUP_MAPPINGS = {
# Django name: LDAP name(s),
'Localizers': 'scm_l10n',
'build': ('buildteam', 'shipit'),
}
def flatten_group_names(values):
"""
Take something like this:
['a', ('b', 'c'), 'd', ['e', 'f']]
and return this:
['a', 'b', 'c', 'd', 'e', 'f']
"""
group_names = []
if isinstance(values, basestring):
return [values]
for value in values:
if isinstance(value, basestring):
group_names.append(value)
else:
# tuple or list
group_names += value
return group_names
class MozLdapBackend(RemoteUserBackend):
"""Creates the connvection to the server, and binds anonymously"""
host = ""
dn = ""
password = ""
certfile = os.path.join(HERE, "cacert.pem")
ldo = None
def __init__(self):
# Note, any exceptions that happen here will be swallowed by Django's
# core handler for middleware classes. Ugly truth :)
self.host = settings.LDAP_HOST
self.dn = settings.LDAP_DN
self.password = settings.LDAP_PASSWORD
self.localizers = None
self.ldo = None
#
# This is the path we take here:
# *) Try to find the user locally
# *) If the user exists, authenticate him locally
# *) If authentication is granted return his object
# *) If not, try to authenticate against LDAP
# *) If authentication is granted create/update his local account and
# return the *local* one
#
# Important note:
# We don't store LDAP password locally, so LDAP accounts will
# never be authenticated locally
def authenticate(self, username=None, password=None):
try: # Let's see if we have such user
if email_re.match(username):
local_user = User.objects.get(email=username)
else:
local_user = User.objects.get(username=username)
if local_user.has_usable_password():
if local_user.check_password(password):
return local_user
else:
return
else:
return self._authenticate_ldap(username, password, local_user)
except User.DoesNotExist:
return self._authenticate_ldap(username, password)
@staticmethod
def make_search_filter(data, any_parameter=False):
params = []
for key, value in data.items():
if not isinstance(value, (list, tuple)):
value = [value]
for v in value:
params.append(filter_format('(%s=%s)', (key, v)))
search_filter = ''.join(params)
if len(params) > 1:
if any_parameter:
search_filter = '(|%s)' % search_filter
else:
search_filter = '(&%s)' % search_filter
return search_filter
def initialize(self):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, self.certfile)
self.ldo = ldap.initialize(self.host)
self.ldo.set_option(ldap.OPT_PROTOCOL_VERSION, 3)
def connect(self):
self.initialize()
# open a connection using the bind user
self.ldo.simple_bind_s(self.dn, self.password)
def disconnect(self):
self.ldo.unbind_s()
def _authenticate_ldap(self, mail, password, user=None):
self.connect()
# Because the mail and password is taken in request.POST it's
# unicode strings, we have to convert it to a byte strings
# before sending.
# However, we want to do this as late as possible.
# first, figure out the uid
search_filter = self.make_search_filter(dict(mail=mail))
try:
# get the uid (first and foremost) but also pick up the other
# essential attributes which we'll need later on.
results = self.ldo.search_s(
"dc=mozilla",
ldap.SCOPE_SUBTREE,
smart_str(search_filter),
['uid', 'givenName', 'sn', 'mail']
)
if not results:
# that means there is no user with this email address
return
uid, result = results[0]
# search by groups
group_names = flatten_group_names(GROUP_MAPPINGS.values())
search_filter1 = self.make_search_filter(
dict(cn=group_names),
any_parameter=True
)
# When searching by group you need to be more delicate with how you
# search.
# This pattern :jabba helped me find.
search_filter2 = self.make_search_filter({
'memberUID': [uid, mail],
'member': ['mail=%s,o=com,dc=mozilla' % mail,
'mail=%s,o=org,dc=mozilla' % mail,
'mail=%s,o=net,dc=mozillacom' % mail],
}, any_parameter=True)
# combine the group part with the mail part
search_filter = '(&%s%s)' % (search_filter1, search_filter2)
group_results = self.ldo.search_s(
"ou=groups,dc=mozilla",
ldap.SCOPE_SUBTREE,
smart_str(search_filter),
['cn']
)
groups = []
for __, each in group_results:
for names in each.values():
groups.extend(names)
finally:
self.disconnect()
# Now we know everything we need to know about the user but lastly we
# need to check if their password is correct
self.initialize()
try:
self.ldo.simple_bind_s(smart_str(uid), smart_str(password))
except ldap.INVALID_CREDENTIALS: # Bad password, credentials are bad.
return
except ldap.UNWILLING_TO_PERFORM: # Bad password, credentials are bad.
return
else:
self.ldo.unbind_s()
first_name = result['givenName'][0]
last_name = result['sn'][0]
email = result['mail'][0]
first_name = force_unicode(first_name)
last_name = force_unicode(last_name)
# final wrapper that returns the user
return self._update_local_user(
user,
mail,
first_name,
last_name,
email,
in_groups=groups
)
def _update_local_user(self, user, username, first_name, last_name, email,
in_groups=None):
if in_groups is None:
in_groups = []
# Because the username field on model User is capped to 30
# characters we need to assign a butchered username here.
# It's not a problem because the user can be found by email
# anyway.
# 30 is the default max length of the username field for
# django.contrib.auth.models.User
if not user:
django_username = username
if email_re.match(django_username):
if isinstance(username, unicode):
# md5 chokes on non-ascii characters
django_username = username.encode('ascii', 'ignore')
django_username = (md5_constructor(django_username)
.hexdigest()[:30])
user = User(username=django_username,
first_name=first_name,
last_name=last_name,
email=email)
user.set_unusable_password()
user.save()
else:
changed = False
if user.first_name != first_name:
user.first_name = first_name
changed = True
if user.last_name != last_name:
user.last_name = last_name
changed = True
if user.email != email:
user.email = email
changed = True
if changed:
user.save()
for django_name, ldap_names in GROUP_MAPPINGS.items():
ldap_names = set(flatten_group_names(ldap_names))
if ldap_names & set(in_groups):
# make sure the user is in this django group
if not user.groups.filter(name=django_name).exists():
user.groups.add(Group.objects.get(name=django_name))
else:
user.groups.remove(Group.objects.get(name=django_name))
return user
|
mpl-2.0
| -8,468,056,727,729,221,000 | 34.953488 | 79 | 0.559077 | false |
ambyte/Vertaler
|
src/modules/httprequest.py
|
1
|
1619
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright (c) 2011 Sergey Gulyaev <astraway@gmail.com>
#
# This file is part of Vertaler.
#
# Vertaler is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Vertaler is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
# ----------------------------------------------------------------------------
from src.modules.settings import config
from src.packages import requests
class HttpRequest():
def __init__(self):
if config.useProxy:
proxy = config.proxyAddress+":"+ config.proxyPort
user = config.proxyLogin
password = config.proxyPassword
self.proxies={'proxy':proxy,'user':user,'password':password}
else:
self.proxies=None
def http_request(self,url,method='POST',params=None,data=None,headers=None):
response = requests.request(method,url,params=params,data=data,headers=headers,timeout=5,proxies=self.proxies)
return response.content
|
gpl-2.0
| -4,815,225,284,546,109,000 | 41.631579 | 118 | 0.64546 | false |
Railk/T3S
|
Typescript.py
|
1
|
23768
|
# coding=utf8
import sublime
import sublime_plugin
from queue import Queue
from threading import Thread
from subprocess import Popen, PIPE
import subprocess
import os
import json
import re
import sys
# --------------------------------------- CONSTANT -------------------------------------- #
dirname = os.path.dirname(__file__)
if os.name == 'nt':
ICONS_PATH = ".."+os.path.join(dirname.split('Packages')[1], 'icons', 'bright-illegal')
else:
ICONS_PATH = "Packages"+os.path.join(dirname.split('Packages')[1], 'icons', 'bright-illegal.png')
TSS_PATH = os.path.join(dirname,'bin','tss.js')
COMPLETION_LIST = []
ROOT_FILES = []
PROCESSES = []
ERRORS = {}
# -------------------------------------- UTILITIES -------------------------------------- #
def is_ts(view):
return view.file_name() and view.file_name().endswith('.ts')
def is_dts(view):
return view.file_name() and view.file_name().endswith('.d.ts')
def get_lines(view):
(line,col) = view.rowcol(view.size())
return line
def get_content(view):
return view.substr(sublime.Region(0, view.size()))
js_id_re = re.compile(u'^[_$a-zA-Z\u00FF-\uFFFF][_$a-zA-Z0-9\u00FF-\uFFFF]*')
def is_member_completion(line):
def partial_completion():
sp = line.split(".")
if len(sp) > 1:
return js_id_re.match(sp[-1]) is not None
return False
return line.endswith(".") or partial_completion()
# ----------------------------------------- TSS ---------------------------------------- #
class Tss(object):
interface = False
threads = []
queues = {}
processes = {}
prefixes = {
'method': u'○',
'property': u'●',
'class':u'◆',
'interface':u'◇',
'keyword':u'∆',
'variable': u'∨',
'public':u'[pub]',
'private':u'[priv]'
}
data = {
'string':u'"string"',
'boolean':u'false',
'Object':u'{"key":"value"}',
'{}':u'{"key":"value"}',
'any':'"any"',
'any[]':'"[]"',
'HTMLElement':'"HTMLElement"',
'Function':'function(){}',
'number':'0.0'
}
# GET PROCESS
def get_process(self,view):
filename = view.file_name();
if filename in self.processes:
return self.processes[filename]
return None
# START PROCESS
def start(self,view,filename,added):
if filename in self.processes:
if added != None and added not in self.processes:
self.processes[added] = self.processes[filename]
self.queues[added] = self.queues[filename]
self.update(view)
return
self.processes[filename] = None
self.queues[filename] = {'stdin':Queue(),'stdout':Queue()}
if added != None: self.queues[added] = self.queues[filename]
thread = TssInit(filename,self.queues[filename]['stdin'],self.queues[filename]['stdout'])
self.add_thread(thread)
self.handle_threads(view,filename,added)
# RELOAD PROCESS
def reload(self,view):
process = self.get_process(view)
if process == None:
return
sublime.active_window().run_command('save_all')
process.stdin.write(bytes('reload\n','UTF-8'))
print(process.stdout.readline().decode('UTF-8'))
# GET INDEXED FILES
def files(self,view):
process = self.get_process(view)
if process == None:
return
process.stdin.write(bytes('files\n','UTF-8'));
print(process.stdout.readline().decode('UTF-8'))
# KILL PROCESS
def kill(self):
del ROOT_FILES[:]
del COMPLETION_LIST[:]
self.threads= []
ERRORS.clear()
self.processes.clear()
self.queues.clear()
for process in PROCESSES:
process.stdin.write(bytes('quit\n','UTF-8'))
process.kill()
del PROCESSES[:]
sublime.status_message('typescript projects closed')
# DUMP FILE
def dump(self,view,output):
process = self.get_process(view)
if process == None:
return
process.stdin.write(bytes('dump {0} {1}\n'.format(output,view.file_name().replace('\\','/')),'UTF-8'))
print(process.stdout.readline().decode('UTF-8'))
# TYPE
def type(self,view,line,col):
process = self.get_process(view)
if process == None:
return
process.stdin.write(bytes('type {0} {1} {2}\n'.format(str(line+1),str(col+1),view.file_name().replace('\\','/')),'UTF-8'))
return json.loads(process.stdout.readline().decode('UTF-8'))
# DEFINITION
def definition(self,view,line,col):
process = self.get_process(view)
if process == None:
return
process.stdin.write(bytes('definition {0} {1} {2}\n'.format(str(line+1),str(col+1),view.file_name().replace('\\','/')),'UTF-8'))
return json.loads(process.stdout.readline().decode('UTF-8'))
# REFERENCES
def references(self,view,line,col):
process = self.get_process(view)
if process == None:
return
process.stdin.write(bytes('references {0} {1} {2}\n'.format(str(line+1),str(col+1),view.file_name().replace('\\','/')),'UTF-8'))
print(process.stdout.readline().decode('UTF-8'))
# STRUCTURE
def structure(self,view):
process = self.get_process(view)
if process == None:
return
process.stdin.write(bytes('structure {0}\n'.format(view.file_name().replace('\\','/')),'UTF-8'))
return json.loads(process.stdout.readline().decode('UTF-8'))
# ASK FOR COMPLETIONS
def complete(self,view,line,col,member):
process = self.get_process(view)
if process == None:
return
process.stdin.write(bytes('completions {0} {1} {2} {3}\n'.format(member,str(line+1),str(col+1),view.file_name().replace('\\','/')),'UTF-8'))
data = process.stdout.readline().decode('UTF-8')
try:
entries = json.loads(data)['entries']
except:
print('completion json error : ',data)
entries = []
self.prepare_completions_list(entries)
# UPDATE FILE
def update(self,view):
process = self.get_process(view)
if process == None:
return
(lineCount, col) = view.rowcol(view.size())
content = view.substr(sublime.Region(0, view.size()))
process.stdin.write(bytes('update nocheck {0} {1}\n'.format(str(lineCount+1),view.file_name().replace('\\','/')),'UTF-8'))
process.stdin.write(bytes(content+'\n','UTF-8'))
process.stdout.readline().decode('UTF-8')
# GET ERRORS
def errors(self,view):
if self.get_process(view) == None:
return
filename = view.file_name()
(lineCount, col) = view.rowcol(view.size())
content = view.substr(sublime.Region(0, view.size()))
self.queues[filename]['stdin'].put(bytes('update nocheck {0} {1}\n'.format(str(lineCount+1),filename.replace('\\','/')),'UTF-8'))
self.queues[filename]['stdin'].put(bytes(content+'\n','UTF-8'))
self.queues[filename]['stdin'].put(bytes('showErrors\n','UTF-8'))
def get_panel_errors(self,view):
process = self.get_process(view)
if process == None:
return
filename = view.file_name()
(lineCount, col) = view.rowcol(view.size())
content = view.substr(sublime.Region(0, view.size()))
process.stdin.write(bytes('update nocheck {0} {1}\n'.format(str(lineCount+1),filename.replace('\\','/')),'UTF-8'))
process.stdin.write(bytes(content+'\n','UTF-8'))
process.stdout.readline().decode('UTF-8')
process.stdin.write(bytes('showErrors\n','UTF-8'))
return json.loads(process.stdout.readline().decode('UTF-8'))
# ADD THREADS
def add_thread(self,thread):
self.threads.append(thread)
thread.daemon = True
thread.start()
#HANDLE THREADS
def handle_threads(self,view,filename,added, i=0, dir=1):
next_threads = []
for thread in self.threads:
if thread.is_alive():
next_threads.append(thread)
continue
ROOT_FILES.append(view)
self.processes[filename] = thread.result
if added != None: self.processes[added] = self.processes[filename]
self.threads = next_threads
if len(self.threads):
before = i % 8
after = (7) - before
if not after:
dir = -1
if not before:
dir = 1
i += dir
sublime.status_message(' Typescript is initializing [%s=%s]' % \
(' ' * before, ' ' * after))
sublime.set_timeout(lambda: self.handle_threads(view,filename,added,i,dir), 100)
return
sublime.status_message('')
self.errors(sublime.active_window().active_view())
# COMPLETIONS LIST
def prepare_completions_list(self,entries):
del COMPLETION_LIST[:]
for entry in entries:
if self.interface and entry['kind'] != 'interface': continue
key = self.get_completions_list_key(entry)
value = self.get_completions_list_value(entry)
COMPLETION_LIST.append((key,value))
COMPLETION_LIST.sort()
def get_completions_list_key(self,entry):
kindModifiers = self.prefixes[entry['kindModifiers']] if entry['kindModifiers'] in self.prefixes else ""
kind = self.prefixes[entry['kind']] if entry['kind'] in self.prefixes else ""
return kindModifiers+' '+kind+' '+str(entry['name'])+' '+str(entry['type'])
def get_completions_list_value(self,entry):
match = re.match('\(([a-zA-Z :,?\{\}\[\]]*)\):',str(entry['type']))
result = []
if match:
variables = match.group(1).split(',')
count = 1
for variable in variables:
splits = variable.split(':')
if len(splits) > 1:
split = splits[1].replace(' ','')
data = self.data[split] if split in self.data else ""
data = '${'+str(count)+':'+data+'}'
result.append(data)
count = count+1
else:
result.append('')
return entry['name']+'('+','.join(result)+');'
else:
return entry['name']
# ERRORS
def show_errors(self,view,errors):
try:
errors = json.loads(errors)
self.highlight_errors(view,errors)
except:
print('show_errors json error')
def highlight_errors(self,view,errors) :
char_regions = []
filename = view.file_name()
ERRORS[filename] = {}
for e in errors :
if e['file'].replace('/',os.sep).lower() == filename.lower():
start_line = e['start']['line']
end_line = e['end']['line']
left = e['start']['character']
right = e['end']['character']
a = view.text_point(start_line-1,left-1)
b = view.text_point(end_line-1,right-1)
char_regions.append( sublime.Region(a,b))
ERRORS[filename][(a,b)] = e['text']
view.add_regions('typescript-error' , char_regions , 'invalid' , ICONS_PATH)
def set_error_status(self,view):
error = self.get_error_at(view.sel()[0].begin(),view.file_name())
if error != None:
sublime.status_message(error)
else:
sublime.status_message('')
def get_error_at(self,pos,filename):
if filename in ERRORS:
for (l, h), error in ERRORS[filename].items():
if pos >= l and pos <= h:
return error
return None
# ----------------------------------------- TSS THREADs ---------------------------------------- #
class TssInit(Thread):
def __init__(self, filename, stdin_queue, stdout_queue):
self.filename = filename
self.stdin_queue = stdin_queue
self.stdout_queue = stdout_queue
self.result = None
self.settings = sublime.load_settings('Typescript.sublime-settings')
Thread.__init__(self)
def run(self):
kwargs = {}
cmd = 'tss'
if os.name == 'nt':
errorlog = open(os.devnull, 'w')
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
kwargs = {'stderr':errorlog, 'startupinfo':startupinfo}
cmd = 'tss.cmd'
print('typescript initializing')
if self.settings.get('local_tss'):
if sys.platform == "darwin":
self.result = Popen(['/usr/local/bin/node', TSS_PATH ,self.filename], stdin=PIPE, stdout=PIPE, **kwargs)
p = Popen(['/usr/local/bin/node', TSS_PATH, self.filename], stdin=PIPE, stdout=PIPE, **kwargs)
else:
self.result = Popen(['node', TSS_PATH, self.filename], stdin=PIPE, stdout=PIPE, **kwargs)
p = Popen(['node', TSS_PATH, self.filename], stdin=PIPE, stdout=PIPE, **kwargs)
else:
if sys.platform == "darwin":
self.result = Popen(['/usr/local/bin/node', '/usr/local/lib/node_modules/tss/bin/tss.js' ,self.filename], stdin=PIPE, stdout=PIPE, **kwargs)
p = Popen(['/usr/local/bin/node', '/usr/local/lib/node_modules/tss/bin/tss.js', self.filename], stdin=PIPE, stdout=PIPE, **kwargs)
else:
self.result = Popen([cmd, self.filename], stdin=PIPE, stdout=PIPE, **kwargs)
p = Popen([cmd, self.filename], stdin=PIPE, stdout=PIPE, **kwargs)
PROCESSES.append(self.result)
PROCESSES.append(p)
self.result.stdout.readline().decode('UTF-8')
p.stdout.readline().decode('UTF-8')
tssWriter = TssWriter(p.stdin,self.stdin_queue)
tssWriter.daemon = True
tssWriter.start()
tssReader = TssReader(p.stdout,self.stdout_queue)
tssReader.daemon = True
tssReader.start()
class TssWriter(Thread):
def __init__(self,stdin,queue):
self.stdin = stdin
self.queue = queue
Thread.__init__(self)
def run(self):
for item in iter(self.queue.get, None):
self.stdin.write(item)
self.stdin.close()
class TssReader(Thread):
def __init__(self,stdout,queue):
self.stdout = stdout
self.queue = queue
Thread.__init__(self)
def run(self):
for line in iter(self.stdout.readline, b''):
line = line.decode('UTF-8')
if line.startswith('"updated') or line.startswith('"added'):
continue
else:
TSS.show_errors(sublime.active_window().active_view(),line)
self.stdout.close()
# --------------------------------------- COMMANDS -------------------------------------- #
# RELOAD PROJECT
class TypescriptReloadProject(sublime_plugin.TextCommand):
def run(self, edit, characters=None):
sublime.status_message('reloading project')
TSS.reload(self.view)
# SHOW INFOS
class TypescriptType(sublime_plugin.TextCommand):
prefixes = {
'method': u'○',
'property': u'●',
'class':u'♦',
'interface':u'◊',
'keyword':u'∆',
'constructor':u'■'
}
def run(self, edit, characters=None):
pos = self.view.sel()[0].begin()
(line, col) = self.view.rowcol(pos)
types = TSS.type(self.view,line,col)
if types == None: return
if 'kind' not in types: return
kind = self.prefixes[types['kind']] if types['kind'] in self.prefixes else ""
if types['docComment'] != '':
liste = types['docComment'].split('\n')+[kind+' '+types['fullSymbolName']+' '+types['type']]
else :
liste = [kind+' '+types['fullSymbolName']+' '+types['type']]
self.view.show_popup_menu(liste,None)
# GO TO DEFINITION
class TypescriptDefinition(sublime_plugin.TextCommand):
def run(self, edit, characters=None):
pos = self.view.sel()[0].begin()
(line, col) = self.view.rowcol(pos)
definition = TSS.definition(self.view,line,col)
if definition == None: return
if 'file' not in definition: return
view = sublime.active_window().open_file(definition['file'])
self.open_view(view,definition)
def open_view(self,view,definition):
if view.is_loading():
sublime.set_timeout(lambda: self.open_view(view,definition), 100)
return
else:
start_line = definition['min']['line']
end_line = definition['lim']['line']
left = definition['min']['character']
right = definition['lim']['character']
a = view.text_point(start_line-1,left-1)
b = view.text_point(end_line-1,right-1)
region = sublime.Region(a,b)
sublime.active_window().focus_view(view)
view.show_at_center(region)
view.add_regions('typescript-definition', [region], 'comment', 'dot', sublime.DRAW_NO_FILL)
class TypescriptReferences(sublime_plugin.TextCommand):
def run(self, edit, characters=None):
pos = self.view.sel()[0].begin()
(line, col) = self.view.rowcol(pos)
TSS.references(self.view,line,col)
# NAVIGATE IN FILE
class TypescriptStructure(sublime_plugin.TextCommand):
prefixes = {
'method': u'○',
'property': u'●',
'class':u'♦',
'interface':u'◊',
'keyword':u'∆',
'constructor':u'■'
}
def run(self, edit, characters=None):
self.regions = []
liste = []
members = TSS.structure(self.view)
try:
for member in members:
start_line = member['min']['line']
end_line = member['lim']['line']
left = member['min']['character']
right = member['lim']['character']
a = self.view.text_point(start_line-1,left-1)
b = self.view.text_point(end_line-1,right-1)
self.regions.append(sublime.Region(a,b))
kind = self.prefixes[member['loc']['kind']] if member['loc']['kind'] in self.prefixes else ""
container_kind = self.prefixes[member['loc']['containerKind']] if member['loc']['containerKind'] in self.prefixes else ""
liste.append([kind+' '+member['loc']['name']+' '+container_kind+' '+member['loc']['containerName'],member['loc']['kindModifiers']+' '+member['loc']['kind']])
sublime.active_window().show_quick_panel(liste,self.on_done)
except (Exception) as member:
sublime.message_dialog("File navigation : plugin not yet intialize please retry after initialisation")
def on_done(self,index):
if index == -1: return
view = sublime.active_window().active_view()
view.show_at_center(self.regions[index])
view.add_regions('typescript-definition', [self.regions[index]], 'comment', 'dot', sublime.DRAW_NO_FILL)
# CLOSE ALL PROJECTS
class TypescriptKill(sublime_plugin.TextCommand):
def run(self, edit, characters=None):
TSS.kill()
# OPEN ERROR PANEL
class TypescriptErrorPanel(sublime_plugin.TextCommand):
def run(self, edit, characters=None):
views = []
liste = []
errors = TSS.get_panel_errors(self.view)
try:
for e in errors:
views.append(sublime.active_window().open_file(e['file'], sublime.TRANSIENT))
if len(views) == 0:
liste.append('no errors')
sublime.active_window().show_quick_panel(liste,self.on_done)
else:
self.open_panel(views,errors)
except (Exception) as e:
sublime.message_dialog("error panel : plugin not yet intialize please retry after initialisation")
def open_panel(self,views,errors,i=0,dir=1):
# LOADING
if self.has_loading_views(views):
before = i % 8
after = (7) - before
if not after:
dir = -1
if not before:
dir = 1
i += dir
sublime.status_message(' Typescript Error panel is loading [%s=%s]' % \
(' ' * before, ' ' * after))
sublime.set_timeout(lambda: self.open_panel(views,errors,i,dir), 100)
return
# FINISHED LOADING
sublime.status_message('')
# OPEN PANEL
self.files = []
self.regions = []
self.views = []
liste = []
count=0
for e in errors:
segments = e['file'].split('/')
last = len(segments)-1
filename = segments[last]
view = views[count]
start_line = e['start']['line']
end_line = e['end']['line']
left = e['start']['character']
right = e['end']['character']
a = view.text_point(start_line-1,left-1)
b = view.text_point(end_line-1,right-1)
file_info = filename + " Line " + str(start_line) + " - "
title = self.error_text(e)
description = file_info + view.substr(view.full_line(a)).strip()
liste.append([title, description])
self.regions.append( sublime.Region(a,b))
self.files.append(e['file'])
count = count+1
sublime.active_window().show_quick_panel(liste,self.on_done)
def has_loading_views(self,views):
for view in views:
if view.is_loading():
return True
return False
def error_text(self,error):
text = error['text']
text = re.sub(r'^.*?:\s*', '', text)
return text
def on_done(self,index):
if index == -1: return
view = sublime.active_window().open_file(self.files[index])
self.open_view(view,self.regions[index])
def open_view(self,view,region):
if view.is_loading():
sublime.set_timeout(lambda: self.open_view(view,region), 100)
return
else:
sublime.active_window().focus_view(view)
view.show(region)
# AUTO COMPLETION
class TypescriptComplete(sublime_plugin.TextCommand):
def run(self, edit, characters):
for region in self.view.sel():
self.view.insert(edit, region.end(), characters)
TSS.update(self.view)
TSS.interface = (characters != '.' and self.view.substr(self.view.sel()[0].begin()-1) == ':')
self.view.run_command('auto_complete',{
'disable_auto_insert': True,
'api_completions_only': True,
'next_competion_if_showing': True
})
# --------------------------------------- EVENT LISTENERS -------------------------------------- #
class TypescriptEventListener(sublime_plugin.EventListener):
pending = 0
settings = None
def on_activated_async(self,view):
self.init_view(view)
def on_clone_async(self,view):
self.init_view(view)
def init_view(self,view):
self.settings = sublime.load_settings('Typescript.sublime-settings')
init(view)
TSS.errors(view)
def on_post_save_async(self,view):
if not is_ts(view):
return
TSS.update(view)
TSS.errors(view)
def on_selection_modified_async(self, view):
if not is_ts(view):
return
view.erase_regions('typescript-definition')
TSS.set_error_status(view)
def on_modified_async(self,view):
if view.is_loading(): return
if not is_ts(view):
return
TSS.update(view)
self.pending = self.pending + 1
if self.settings == None:
self.settings = sublime.load_settings('Typescript.sublime-settings')
if not self.settings.get('error_on_save_only'):
sublime.set_timeout_async(lambda:self.handle_timeout(view),180)
def handle_timeout(self,view):
self.pending = self.pending -1
if self.pending == 0:
TSS.errors(view)
def on_query_completions(self, view, prefix, locations):
if is_ts(view):
pos = view.sel()[0].begin()
(line, col) = view.rowcol(pos)
is_member = str(is_member_completion(view.substr(sublime.Region(view.line(pos-1).a, pos)))).lower()
TSS.complete(view,line,col,is_member)
return (COMPLETION_LIST, sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS)
def on_query_context(self, view, key, operator, operand, match_all):
if key == "typescript":
view = sublime.active_window().active_view()
return is_ts(view)
# ---------------------------------------- INITIALISATION --------------------------------------- #
TSS = Tss()
def init(view):
if not is_ts(view): return
filename = view.file_name()
view.settings().set('auto_complete',False)
view.settings().set('extensions',['ts'])
if is_dts(view):
update_dts(filename)
return
root = get_root()
added = None
if root != None:
if root != filename: added = filename
filename = root
TSS.start(view,filename,added)
def update_dts(filename):
if filename.endswith('lib.d.ts'):
return
for root_file in ROOT_FILES:
TSS.start(root_file,root_file.file_name(),filename)
def get_root():
project_settings = sublime.active_window().active_view().settings().get('typescript')
current_folder = os.path.dirname(sublime.active_window().active_view().file_name())
top_folder = get_top_folder(current_folder)
top_folder_segments = top_folder.split(os.sep)
# WITH PROJECT SETTINGS TYPESCRIP DEFINED
if(project_settings != None):
for root in project_settings:
root_path = os.sep.join(top_folder_segments[:len(top_folder_segments)-1]+root.replace('\\','/').split('/'))
root_dir = os.path.dirname(root_path)
if current_folder.lower().startswith(root_dir.lower()):
return root_path
return None
# SUBLIME TS ?
else:
segments = current_folder.split(os.sep)
segments[0] = top_folder.split(os.sep)[0]
length = len(segments)
segment_range =reversed(range(0,length+1))
for index in segment_range:
folder = os.sep.join(segments[:index])
config_file = os.path.join(folder,'.sublimets')
config_data = get_data(config_file)
if config_data != None:
return os.path.join(folder,config_data['root'])
return None
def get_top_folder(current_folder):
top_folder = None
open_folders = sublime.active_window().folders()
for folder in open_folders:
if current_folder.lower().startswith(folder.lower()):
top_folder = folder
break
if top_folder != None:
return top_folder
return current_folder
def get_data(file):
if os.path.isfile(file):
try:
f = open(file,'r').read()
return json.loads(f)
except IOError:
pass
return None
# ---------------------------------------- PLUGIN LOADED --------------------------------------- #
def plugin_loaded():
sublime.set_timeout(lambda:init(sublime.active_window().active_view()), 300)
|
mit
| 4,896,523,224,958,104,000 | 25.310421 | 161 | 0.64491 | false |
fsquillace/pycious
|
tests/rc.py
|
1
|
2263
|
#!/usr/bin/python
from __future__ import division
from pycious.widgets.system import BatteryTextWidget, DateTextWidget, CPUGraphWidget
from pycious.widgets.web import MailTextWidget, GrssTextWidget
from pycious.api.timer import Timer
from pycious.api.widget import ImageBoxWidget
if __name__ == "__main__":
# DEBUG (remember to backup your own rc.lua):
# $ cp rc.lua ~/.config/awesome/
# $ PYTHONPATH=.. python -O rc.py
# Retrieve all available widgets defined in rc.lua
# You MUST define the widget in rc.lua before
#
f = open('credentials')
username = f.readline()[:-1]
password = f.readline()[:-1]
battery_widget = BatteryTextWidget("battery_widget")
clock_widget = DateTextWidget("clock_widget")
image_widget = ImageBoxWidget("image_widget")
mail_widget = MailTextWidget("mail_widget", username, password)
grss_widget = GrssTextWidget("grss_widget", username, password)
cpu_widget = CPUGraphWidget('cpu_widget')
################### MAIL WIDGET ########################
mail_timer = Timer(5)
mail_timer.add_signal("mail", mail_widget)
mail_timer.start()
########################################################
################### GRSS WIDGET ########################
grss_timer = Timer(7)
grss_timer.add_signal("mail", grss_widget)
grss_timer.start()
########################################################
################### DATE WIDGET ########################
clock_timer = Timer(60)
clock_timer.add_signal("date", clock_widget)
clock_timer.start()
########################################################
################### BATTERY WIDGET #####################
battery_timer = Timer(11)
battery_timer.add_signal("battery", battery_widget)
battery_timer.start()
########################################################
################### CPU GRAPH WIDGET ###################
cpu_widget.set_width(50)
cpu_widget.set_background_color("#494B4F")
cpu_widget.set_color("#FF5656")
cpu_widget.set_gradient_colors({ "#FF5656", "#88A175", "#AECF96" })
cpu_timer = Timer(1)
cpu_timer.add_signal('cpu', cpu_widget)
cpu_timer.start()
########################################################
|
gpl-2.0
| 4,042,621,580,200,080,400 | 32.294118 | 84 | 0.526734 | false |
barneygale/barneymc
|
barneymc/protocol/packet.py
|
1
|
1741
|
from time import gmtime, strftime
from data_types import *
from packet_structs import *
import packet_extensions
class Packet:
def __init__(self, **kargs):
self.ident = kargs.get('ident', 0)
self.direction = kargs.get('direction', CLIENT_TO_SERVER)
self.data = kargs.get('data', {})
def clone(self):
return Packet(ident = self.ident, direction = self.direction, data = dict(self.data))
def decode(self, bbuff):
#Ident
self.ident = unpack(bbuff, 'ubyte')
#Payload
for data_type, name in structs[self.ident][self.direction]:
self.data[name] = unpack(bbuff, data_type)
#Extension
if self.ident in packet_extensions.extensions:
packet_extensions.extensions[self.ident].decode_extra(self, bbuff)
def encode(self):
#Ident
output = pack('ubyte', self.ident)
#Extension
if self.ident in packet_extensions.extensions:
append = packet_extensions.extensions[self.ident].encode_extra(self)
else:
append = ''
#Payload
for data_type, name in structs[self.ident][self.direction]:
output += pack(data_type, self.data[name])
return output + append
def __repr__(self):
if self.direction == TO_SERVER: s = ">>>"
else: s = "<<<"
format = "[%s] %s 0x%02x: %-"+str(max([len(i) for i in names.values()])+1)+"s%s"
return format % (strftime("%H:%M:%S", gmtime()), s, self.ident, names[self.ident], str(self.data))
def read_packet(bbuff, direction):
p = Packet(direction=direction)
p.decode(bbuff)
return p
|
mit
| -6,908,231,330,771,204,000 | 31.240741 | 106 | 0.569787 | false |
openaid-IATI/OIPA
|
OIPA/api/codelist/views.py
|
1
|
4510
|
from django.apps import apps
from django.core.exceptions import FieldError
from rest_framework.exceptions import NotFound
from rest_framework.filters import OrderingFilter
from rest_framework_extensions.cache.mixins import CacheResponseMixin
from api.codelist.filters import AllDjangoFilterBackend
from api.codelist.serializers import (
CodelistItemSerializer, CodelistMetaSerializer
)
from api.generics.views import DynamicListView
from geodata.models import Region
from iati_synchroniser.models import Codelist
class CodelistMetaList(CacheResponseMixin, DynamicListView):
"""
Returns a list of IATI codelists stored in OIPA.
## Result details
Each result item contains full information about codelist including URI to
codelist items.
URI is constructed as follows: `/api/codelists/{codelistname}/`
"""
queryset = Codelist.objects.all().order_by('name')
serializer_class = CodelistMetaSerializer
fields = ('name', 'items')
pagination_class = None
@classmethod
def get_queryset(cls):
return Codelist.objects.exclude(
name__in=['EarmarkingCategory', 'Region', 'Country']
)
class CodelistItemList(CacheResponseMixin, DynamicListView):
"""
Returns a list of IATI codelist values stored in OIPA.
## request parameters
- `code` (*optional*): Comma separated list of codes on the codelist.
- `vocabulary` (*optional*): Comma separated list of .
- `category` (*optional*): Comma separated list of categories (if
applicable for the codelist).
## Ordering
API request may include `ordering` parameter. This parameter controls the
order in which results are returned.
Results can be ordered by:
- `name`
The user may also specify reverse orderings by prefixing the field name
with '-', like so: `-name`
## Result details
Each item contains all information on the codelist items being shown.
"""
queryset = Region.objects.none()
filter_backends = (AllDjangoFilterBackend, OrderingFilter, )
fields = ('code', 'name')
codelistAppMap = {
'Country': 'geodata',
'Region': 'geodata',
}
pagination_class = None
model_name_maps = {
'CRSAddOtherFlags': 'OtherFlags',
'IATIOrganisationIdentifier': 'OrganisationIdentifier'
}
@classmethod
def model_name_camel(cls, name):
names = name.split('-')
if len(names) > 1:
name = names[0] + names[1].capitalize()
return name
def get_app_label(self, model_name):
if 'Vocabulary' in model_name:
return 'iati_vocabulary'
return self.codelistAppMap.get(model_name, 'iati_codelists')
def get_queryset(self):
model_name = self.kwargs.get('codelist', None)
if not model_name:
return self.queryset
model_name = self.model_name_camel(model_name)
app_label = self.get_app_label(model_name)
model_name = self.model_name_maps.get(model_name, model_name)
try:
model_cls = apps.get_model(app_label, model_name)
except LookupError:
raise NotFound("Codelist not found")
if model_cls.__name__ == 'Sector':
queryset = model_cls.objects.filter(vocabulary__isnull=False)
else:
queryset = model_cls.objects.all()
vocabulary = self.request.query_params.get('vocabulary', None)
if vocabulary is not None:
try:
queryset = queryset.filter(vocabulary_id=vocabulary)
except FieldError:
pass
for f in model_cls._meta.get_fields():
if f.many_to_one and f.related_model:
queryset = queryset.select_related(f.name)
return queryset
def get_serializer_class(self):
cms = CodelistItemSerializer
# dummy, for some reason this method is called multiple times, first
# time without a request class.
cms.Meta.model = Region
if hasattr(self, 'request'):
model_name = self.kwargs.get('codelist', None)
if not model_name:
return cms
# model_name = self.capitalize(model_name)
model_name = self.model_name_maps.get(model_name, model_name)
model_name = self.model_name_camel(model_name)
app_label = self.get_app_label(model_name)
cms.Meta.model = apps.get_model(app_label, model_name)
return cms
|
agpl-3.0
| -3,173,719,476,157,136,400 | 29.680272 | 78 | 0.64745 | false |
sfu-fas/coursys
|
oldcode/management/commands/crim_import.py
|
1
|
5753
|
from django.core.management.base import BaseCommand
from django.db import transaction
from django.core.files import File
from optparse import make_option
import csv
import datetime
import os.path
import mimetypes
from advisornotes.models import AdvisorNote
from coredata.models import Person, Unit
from coredata.queries import add_person
from courselib.text import normalize_newlines
class Command(BaseCommand):
help = 'Import CSV advising data from CRIM.'
args = '<unit_slug> <advisor_userid> <csv_data> <file_base>'
option_list = BaseCommand.option_list + (
make_option('-n', '--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Don\'t actually modify anything.'),
)
def get_filepath(self, fm_filename):
if not fm_filename:
return None
filename = os.path.split(fm_filename)[1]
filepath = os.path.join(self.file_base, filename)
if os.path.isfile(filepath):
return filepath
else:
if self.verbosity > 0:
print("Missing file %s." % (filename,))
def get_advisornote(self, key, person, advisor, created, delete_old_file=False, offset=0):
"""
get_or_create for this usage
"""
created = created + datetime.timedelta(minutes=offset)
# look for previously-imported version of this note, so we're roughly idempotent
oldnotes = AdvisorNote.objects.filter(student=person, advisor=advisor, created_at=created, unit=self.unit)
oldnotes = [n for n in oldnotes if 'import_key' in n.config and n.config['import_key'] == key]
if oldnotes:
note = oldnotes[0]
if delete_old_file and note.file_attachment and os.path.isfile(note.file_attachment.path):
# let file be recreated below
os.remove(note.file_attachment.path)
note.file_attachment = None
note.file_mediatype = None
else:
note = AdvisorNote(student=person, advisor=advisor, created_at=created, unit=self.unit)
note.config['import_key'] = key
note.config['src'] = 'crim_import'
return note, bool(oldnotes)
def attach_file(self, note, filepath):
"""
Use this filepath as the attachment for this note.
"""
with File(open(filepath, 'rb')) as fh:
base = os.path.split(filepath)[1]
if self.commit:
note.file_attachment.save(base, fh)
mediatype = mimetypes.guess_type(filepath)[0]
note.file_mediatype = mediatype
def import_note(self, advisor, fn, i, row):
emplid = row['Student ID']
date_str = row['Date Modified']
notes = normalize_newlines(row['Notes'])
files = [
row.get('Transcript', None),
row.get('Files', None),
row.get('Files2', None),
row.get('Files3', None),
]
files = list(map(self.get_filepath, files))
files = list(filter(bool, files))
# fix mis-typed emplids we found
# Lindsay
if emplid == '960022098':
emplid = '963022098'
elif emplid == '30108409':
emplid = '301078409'
elif emplid == '30115964':
emplid = '301115964'
elif emplid == '30117882':
emplid = '301178882'
# Michael Sean
elif emplid == '30105659':
emplid = '301040985' # ?
# Dijana
elif emplid == '30120965':
emplid = '301202965'
if not emplid or emplid == '0':
if self.verbosity > 0:
print('No emplid on row %i' % (i+2))
return
p = add_person(emplid, commit=self.commit)
if not p:
if self.verbosity > 0:
print("Can't find person on row %i (emplid %s)" % (i+2, emplid))
return
if self.verbosity > 1:
print("Importing %s with %i file(s)." % (emplid, len(files)))
try:
date = datetime.datetime.strptime(date_str, '%m-%d-%Y').date()
except ValueError:
date = datetime.datetime.strptime(date_str, '%Y-%m-%d').date()
created = datetime.datetime.combine(date, datetime.time(hour=12, minute=0))
key = '%s-%i' % (fn, i)
note, _ = self.get_advisornote(key, p, advisor, created, delete_old_file=self.commit)
if files:
path = files[0]
self.attach_file(note, path)
for j, path in enumerate(files[1:]):
# these get stashed in accompanying notes
k = key + '-auxfile-' + str(i)
n, _ = self.get_advisornote(k, p, advisor, created, delete_old_file=self.commit, offset=(j+1))
n.text = '[Additional file for previous note.]'
self.attach_file(n, path)
if self.commit:
n.save()
note.text = notes
if self.commit:
note.save()
def import_notes(self, unit_slug, advisor_userid, inputfile, file_base):
self.unit = Unit.objects.get(slug=unit_slug)
self.file_base = file_base
advisor = Person.objects.get(userid=advisor_userid)
with open(inputfile, 'rb') as fh:
data = csv.DictReader(fh)
fn = os.path.split(inputfile)[1]
for i, row in enumerate(data):
with transaction.atomic():
self.import_note(advisor, fn, i, row)
def handle(self, *args, **options):
self.verbosity = int(options['verbosity'])
self.commit = not options['dry_run']
self.import_notes(args[0], args[1], args[2], args[3])
|
gpl-3.0
| 9,127,078,747,302,222,000 | 34.085366 | 114 | 0.566661 | false |
aligoren/pyalgo
|
evolutionary_algo.py
|
1
|
1389
|
from string import letters
from random import choice, random
target = list("METHINKS IT IS LIKE A WEASEL")
charset = letters + ' '
parent = [choice(charset) for _ in range(len(target))]
minmutaterate = .09
C = range(100)
perfectfitness = float(len(target))
def fitness(trial):
'Sum of matching chars by position'
return sum(t==h for t,h in zip(trial, target))
def mutaterate():
'Less mutation the closer the fit of the parent'
return 1-((perfectfitness - fitness(parent)) / perfectfitness * (1 - minmutaterate))
def mutate(parent, rate):
return [(ch if random() <= rate else choice(charset)) for ch in parent]
def que():
'(from the favourite saying of Manuel in Fawlty Towers)'
print ("#%-4i, fitness: %4.1f%%, '%s'" %
(iterations, fitness(parent)*100./perfectfitness, ''.join(parent)))
def mate(a, b):
place = 0
if choice(xrange(10)) < 7:
place = choice(xrange(len(target)))
else:
return a, b
return a, b, a[:place] + b[place:], b[:place] + a[place:]
iterations = 0
center = len(C)/2
while parent != target:
rate = mutaterate()
iterations += 1
if iterations % 100 == 0: que()
copies = [ mutate(parent, rate) for _ in C ] + [parent]
parent1 = max(copies[:center], key=fitness)
parent2 = max(copies[center:], key=fitness)
parent = max(mate(parent1, parent2), key=fitness)
que()
|
mit
| 556,866,754,575,191,800 | 28.553191 | 88 | 0.637869 | false |
openstack/python-glanceclient
|
glanceclient/tests/unit/test_progressbar.py
|
1
|
3006
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import sys
import requests
import testtools
from glanceclient.common import progressbar
from glanceclient.common import utils
from glanceclient.tests import utils as test_utils
class TestProgressBarWrapper(testtools.TestCase):
def test_iter_iterator_display_progress_bar(self):
size = 100
# create fake response object to return request-id with iterator
resp = requests.Response()
resp.headers['x-openstack-request-id'] = 'req-1234'
iterator_with_len = utils.IterableWithLength(iter('X' * 100), size)
requestid_proxy = utils.RequestIdProxy((iterator_with_len, resp))
saved_stdout = sys.stdout
try:
sys.stdout = output = test_utils.FakeTTYStdout()
# Consume iterator.
data = list(progressbar.VerboseIteratorWrapper(requestid_proxy,
size))
self.assertEqual(['X'] * 100, data)
self.assertEqual(
'[%s>] 100%%\n' % ('=' * 29),
output.getvalue()
)
finally:
sys.stdout = saved_stdout
def test_iter_file_display_progress_bar(self):
size = 98304
file_obj = io.StringIO('X' * size)
saved_stdout = sys.stdout
try:
sys.stdout = output = test_utils.FakeTTYStdout()
file_obj = progressbar.VerboseFileWrapper(file_obj, size)
chunksize = 1024
chunk = file_obj.read(chunksize)
while chunk:
chunk = file_obj.read(chunksize)
self.assertEqual(
'[%s>] 100%%\n' % ('=' * 29),
output.getvalue()
)
finally:
sys.stdout = saved_stdout
def test_iter_file_no_tty(self):
size = 98304
file_obj = io.StringIO('X' * size)
saved_stdout = sys.stdout
try:
sys.stdout = output = test_utils.FakeNoTTYStdout()
file_obj = progressbar.VerboseFileWrapper(file_obj, size)
chunksize = 1024
chunk = file_obj.read(chunksize)
while chunk:
chunk = file_obj.read(chunksize)
# If stdout is not a tty progress bar should do nothing.
self.assertEqual('', output.getvalue())
finally:
sys.stdout = saved_stdout
|
apache-2.0
| -22,959,252,073,428,704 | 35.658537 | 78 | 0.597804 | false |
aykut/django-oscar
|
oscar/apps/product/reviews/abstract_models.py
|
1
|
5476
|
from django.db import models
from django.utils.translation import gettext as _
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django.conf import settings
from django.db.models import Sum, Count
from oscar.apps.product.reviews.managers import (ApprovedReviewsManager, RecentReviewsManager,
TopScoredReviewsManager, TopVotedReviewsManager)
class AbstractProductReview(models.Model):
"""
Superclass ProductReview. Some key aspects have been implemented from the original spec.
* Each product can have reviews attached to it. Each review has a title, a body and a score from 1-5.
* Signed in users can always submit reviews, anonymous users can only submit reviews if a setting
OSCAR_ALLOW_ANON_REVIEWS is set to true - it should default to false.
* If anon users can submit reviews, then we require their name, email address and an (optional) URL.
* By default, reviews must be approved before they are live.
However, if a setting OSCAR_MODERATE_REVIEWS is set to false, then they don't need moderation.
* Each review should have a permalink, ie it has its own page.
* Each reviews can be voted up or down by other users
* Only signed in users can vote
* A user can only vote once on each product once
"""
# Note we keep the review even if the product is deleted
product = models.ForeignKey('product.Item', related_name='reviews', null=True, on_delete=models.SET_NULL)
SCORE_CHOICES = tuple([(x, x) for x in range(0, 6)])
score = models.SmallIntegerField(_("Score"), choices=SCORE_CHOICES)
title = models.CharField(_("Title"), max_length=255)
body = models.TextField(_("Body"))
# User information. We include fields to handle anonymous users
user = models.ForeignKey('auth.User', related_name='reviews', null=True, blank=True)
name = models.CharField(_("Name"), max_length=255, null=True, blank=True)
email = models.EmailField(_("Email"), null=True, blank=True)
homepage = models.URLField(_("URL"), null=True, blank=True)
FOR_MODERATION, APPROVED, REJECTED = range(0, 3)
STATUS_CHOICES = (
(FOR_MODERATION, _("Requires moderation")),
(APPROVED, _("Approved")),
(REJECTED, _("Rejected")),
)
default_status = FOR_MODERATION if settings.OSCAR_MODERATE_REVIEWS else APPROVED
status = models.SmallIntegerField(_("Status"), choices=STATUS_CHOICES, default=default_status)
# Denormalised vote totals
total_votes = models.IntegerField(_("Total Votes"), default=0) # upvotes + down votes
delta_votes = models.IntegerField(_("Delta Votes"), default=0, db_index=True) # upvotes - down votes
date_created = models.DateTimeField(auto_now_add=True)
# Managers
objects = models.Manager()
approved = ApprovedReviewsManager()
class Meta:
abstract = True
ordering = ['-delta_votes']
unique_together = (('product', 'user'),)
@models.permalink
def get_absolute_url(self):
return ('products:reviews-detail', (), {
'item_slug': self.product.slug,
'item_pk': self.product.id,
'pk': self.id})
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
if not self.user and not (self.name and self.email):
raise ValidationError("Anonymous review must have a name and an email")
if not self.title:
raise ValidationError("Reviews must have a title")
if self.score is None:
raise ValidationError("Reviews must have a score")
super(AbstractProductReview, self).save(*args, **kwargs)
def has_votes(self):
return self.total_votes > 0
def num_up_votes(self):
"""Returns the total up votes"""
return int((self.total_votes + self.delta_votes) / 2)
def num_down_votes(self):
"""Returns the total down votes"""
return int((self.total_votes - self.delta_votes) / 2)
def update_totals(self):
"""Updates total and delta votes"""
result = self.votes.aggregate(score=Sum('delta'),total_votes=Count('id'))
self.total_votes = result['total_votes'] or 0
self.delta_votes = result['score'] or 0
self.save()
def get_reviewer_name(self):
if self.user:
return self.user.username
else:
return self.name
class AbstractVote(models.Model):
"""
Records user ratings as yes/no vote.
* Only signed-in users can vote.
* Each user can vote only once.
"""
review = models.ForeignKey('reviews.ProductReview', related_name='votes')
user = models.ForeignKey('auth.User', related_name='review_votes')
UP, DOWN = 1, -1
VOTE_CHOICES = (
(UP, _("Up")),
(DOWN, _("Down"))
)
delta = models.SmallIntegerField(choices=VOTE_CHOICES)
date_created = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
ordering = ['-date_created']
unique_together = (('user', 'review'),)
def __unicode__(self):
return u"%s vote for %s" % (self.delta, self.review)
def save(self, *args, **kwargs):
u"""
Validates model and raises error if validation fails
"""
self.review.update_totals()
super(AbstractVote, self).save(*args, **kwargs)
|
bsd-3-clause
| -6,377,473,219,612,442,000 | 38.395683 | 109 | 0.643718 | false |
mozilla/kitchensinkserver
|
kitchensink/device/models.py
|
1
|
1115
|
"""
device.models
-------------
"""
import logging
from django.db import models
from kitchensink.base.models import BaseModel
from kitchensink.device.managers import MakeManager
_log = logging.getLogger('kss.%s' % __name__)
class Make(BaseModel):
""" Which company made the device (i.e. LG)
"""
objects = MakeManager()
# BaseModel uses it to provide api urls
resource_name = 'make'
#:
name = models.CharField(max_length=100, unique=True)
#:
slug = models.SlugField(max_length=100)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
def natural_key(self):
return self.slug
class Device(BaseModel):
""" What is the model of the device (i.e. make:LG, model:Nexus4)
"""
# BaseModel uses it to provide api urls
resource_name = 'device'
#:
make = models.ForeignKey(Make)
#:
model = models.CharField(max_length=100)
class Meta:
ordering = ('model',)
unique_together = ('make', 'model')
def __unicode__(self):
return self.make.name + ' ' + self.model
|
bsd-3-clause
| 6,497,167,644,692,732,000 | 20.037736 | 68 | 0.610762 | false |
dwavesystems/dimod
|
dimod/sampleset.py
|
1
|
63014
|
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import copy
import itertools
import json
import numbers
import collections.abc as abc
from collections import namedtuple
import numpy as np
from numpy.lib import recfunctions
from warnings import warn
from dimod.decorators import lockable_method
from dimod.exceptions import WriteableError
from dimod.serialization.format import Formatter
from dimod.serialization.utils import (pack_samples as _pack_samples,
unpack_samples,
serialize_ndarray,
deserialize_ndarray,
serialize_ndarrays,
deserialize_ndarrays)
from dimod.utilities import LockableDict
from dimod.variables import Variables, iter_deserialize_variables
from dimod.vartypes import as_vartype, Vartype, DISCRETE
from dimod.views.samples import SampleView, SamplesArray
__all__ = ['append_data_vectors', 'append_variables', 'as_samples', 'concatenate', 'SampleSet']
def append_data_vectors(sampleset, **vectors):
"""Create a new :obj:`.SampleSet` with additional fields in
:attr:`SampleSet.record`.
Args:
sampleset (:obj:`.SampleSet`):
:obj:`.SampleSet` to build from.
**vectors (list):
Per-sample data to be appended to :attr:`SampleSet.record`. Each
keyword is a new field name and each keyword parameter should be a
list of scalar values or numpy arrays (lists and tuples will be
converted to numpy arrays).
Returns:
:obj:`.SampleSet`: SampleSet
Examples:
The following example appends a field of lists to :attr:`SampleSet.record`.
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [-1, 1]], energy=[-1.4, -1.4], vartype='SPIN')
>>> print(sampleset)
0 1 energy num_oc.
0 -1 +1 -1.4 1
1 -1 +1 -1.4 1
['SPIN', 2 rows, 2 samples, 2 variables]
>>> sampleset = dimod.append_data_vectors(sampleset, new=[[0, 1], [1, 2]])
>>> print(sampleset)
0 1 energy num_oc. new
0 -1 +1 -1.4 1 [0 1]
1 -1 +1 -1.4 1 [1 2]
['SPIN', 2 rows, 2 samples, 2 variables]
>>> print(sampleset.record.dtype)
(numpy.record, [('sample', 'i1', (2,)), ('energy', '<f8'), ('num_occurrences', '<i8'), ('new', '<i8', (2,))])
"""
record = sampleset.record
for name, vector in vectors.items():
if len(vector) != len(record.energy):
raise ValueError("Length of vector {} must be equal to number of samples.".format(name))
try:
vector = np.asarray(vector)
if vector.ndim == 1:
record = recfunctions.append_fields(record, name, vector, usemask=False, asrecarray=True)
else:
# np's append_fields cannot append a vector with a shape that
# doesn't match the base array's, so appending non-scalar data
# requires a workaround
dtype = np.dtype([(name, vector[0].dtype, vector[0].shape)])
new_arr = recfunctions.unstructured_to_structured(vector, dtype=dtype)
record = recfunctions.merge_arrays((record, new_arr), flatten=True, asrecarray=True)
except (TypeError, AttributeError):
raise ValueError("Field value type not supported.")
return SampleSet(record, sampleset.variables, sampleset.info, sampleset.vartype)
def append_variables(sampleset, samples_like, sort_labels=True):
"""Create a new :obj:`.SampleSet` with the given variables and values.
Not defined for empty sample sets. If `sample_like` is a
:obj:`.SampleSet`, its data vectors and info are ignored.
Args:
sampleset (:obj:`.SampleSet`):
:obj:`.SampleSet` to build from.
samples_like:
Samples to add to the sample set. Either a single
sample or identical in length to the sample set.
'samples_like' is an extension of NumPy's array_like_.
See :func:`.as_samples`.
sort_labels (bool, optional, default=True):
Return :attr:`.SampleSet.variables` in sorted order. For mixed
(unsortable) types, the given order is maintained.
Returns:
:obj:`.SampleSet`: New sample set with the variables/values added.
Examples:
>>> sampleset = dimod.SampleSet.from_samples([{'a': -1, 'b': +1},
... {'a': +1, 'b': +1}],
... dimod.SPIN,
... energy=[-1.0, 1.0])
>>> new = dimod.append_variables(sampleset, {'c': -1})
>>> print(new)
a b c energy num_oc.
0 -1 +1 -1 -1.0 1
1 +1 +1 -1 1.0 1
['SPIN', 2 rows, 2 samples, 3 variables]
Add variables from another sample set to the previous example. Note
that the energies remain unchanged.
>>> another = dimod.SampleSet.from_samples([{'c': -1, 'd': +1},
... {'c': +1, 'd': +1}],
... dimod.SPIN,
... energy=[-2.0, 1.0])
>>> new = dimod.append_variables(sampleset, another)
>>> print(new)
a b c d energy num_oc.
0 -1 +1 -1 +1 -1.0 1
1 +1 +1 +1 +1 1.0 1
['SPIN', 2 rows, 2 samples, 4 variables]
.. _array_like: https://numpy.org/doc/stable/user/basics.creation.html
"""
samples, labels = as_samples(samples_like)
num_samples = len(sampleset)
# we don't handle multiple values
if samples.shape[0] == num_samples:
# we don't need to do anything, it's already the correct shape
pass
elif samples.shape[0] == 1 and num_samples:
samples = np.repeat(samples, num_samples, axis=0)
else:
msg = ("mismatched shape. The samples to append should either be "
"a single sample or should match the length of the sample "
"set. Empty sample sets cannot be appended to.")
raise ValueError(msg)
# append requires the new variables to be unique
variables = sampleset.variables
if any(v in variables for v in labels):
msg = "Appended samples cannot contain variables in sample set"
raise ValueError(msg)
new_variables = list(variables) + labels
new_samples = np.hstack((sampleset.record.sample, samples))
return type(sampleset).from_samples((new_samples, new_variables),
sampleset.vartype,
info=copy.deepcopy(sampleset.info), # make a copy
sort_labels=sort_labels,
**sampleset.data_vectors)
def as_samples(samples_like, dtype=None, copy=False, order='C'):
"""Convert a samples_like object to a NumPy array and list of labels.
Args:
samples_like (samples_like):
A collection of raw samples. `samples_like` is an extension of
NumPy's array_like_ structure. See examples below.
dtype (data-type, optional):
dtype for the returned samples array. If not provided, it is either
derived from `samples_like`, if that object has a dtype, or set to
the smallest dtype that can hold the given values.
copy (bool, optional, default=False):
If true, then samples_like is guaranteed to be copied, otherwise
it is only copied if necessary.
order ({'K', 'A', 'C', 'F'}, optional, default='C'):
Specify the memory layout of the array. See :func:`numpy.array`.
Returns:
tuple: A 2-tuple containing:
:obj:`numpy.ndarray`: Samples.
list: Variable labels
Examples:
The following examples convert a variety of samples_like objects:
NumPy arrays
>>> import numpy as np
...
>>> dimod.as_samples(np.ones(5, dtype='int8'))
(array([[1, 1, 1, 1, 1]], dtype=int8), [0, 1, 2, 3, 4])
>>> dimod.as_samples(np.zeros((5, 2), dtype='int8'))
(array([[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]], dtype=int8), [0, 1])
Lists
>>> dimod.as_samples([-1, +1, -1])
(array([[-1, 1, -1]], dtype=int8), [0, 1, 2])
>>> dimod.as_samples([[-1], [+1], [-1]])
(array([[-1],
[ 1],
[-1]], dtype=int8), [0])
Dicts
>>> dimod.as_samples({'a': 0, 'b': 1, 'c': 0}) # doctest: +SKIP
(array([[0, 1, 0]], dtype=int8), ['a', 'b', 'c'])
>>> dimod.as_samples([{'a': -1, 'b': +1}, {'a': 1, 'b': 1}]) # doctest: +SKIP
(array([[-1, 1],
[ 1, 1]], dtype=int8), ['a', 'b'])
A 2-tuple containing an array_like object and a list of labels
>>> dimod.as_samples(([-1, +1, -1], ['a', 'b', 'c']))
(array([[-1, 1, -1]], dtype=int8), ['a', 'b', 'c'])
>>> dimod.as_samples((np.zeros((5, 2), dtype='int8'), ['in', 'out']))
(array([[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]], dtype=int8), ['in', 'out'])
.. _array_like: https://numpy.org/doc/stable/user/basics.creation.html
"""
if isinstance(samples_like, SampleSet):
# we implicitely support this by handling an iterable of mapping but
# it is much faster to just do this here.
labels = list(samples_like.variables)
if dtype is None:
return samples_like.record.sample, labels
else:
return samples_like.record.sample.astype(dtype), labels
if isinstance(samples_like, tuple) and len(samples_like) == 2:
samples_like, labels = samples_like
if not isinstance(labels, list) and labels is not None:
labels = list(labels)
else:
labels = None
if isinstance(samples_like, abc.Iterator):
# if we don't check this case we can get unexpected behaviour where an
# iterator can be depleted
raise TypeError('samples_like cannot be an iterator')
if isinstance(samples_like, abc.Mapping):
return as_samples(([samples_like], labels), dtype=dtype)
if (isinstance(samples_like, list) and samples_like and
isinstance(samples_like[0], numbers.Number)):
# this is not actually necessary but it speeds up the
# samples_like = [1, 0, 1,...] case significantly
return as_samples(([samples_like], labels), dtype=dtype)
if not isinstance(samples_like, np.ndarray):
if any(isinstance(sample, abc.Mapping) for sample in samples_like):
# go through samples-like, turning the dicts into lists
samples_like, old = list(samples_like), samples_like
if labels is None:
first = samples_like[0]
if isinstance(first, abc.Mapping):
labels = list(first)
else:
labels = list(range(len(first)))
for idx, sample in enumerate(old):
if isinstance(sample, abc.Mapping):
try:
samples_like[idx] = [sample[v] for v in labels]
except KeyError:
raise ValueError("samples_like and labels do not match")
if dtype is None:
if not hasattr(samples_like, 'dtype'):
# we want to use the smallest dtype available, not yet doing any
# copying or whatever, although we do make a new array to speed
# this up
samples_like = np.asarray(samples_like)
max_ = max(-samples_like.min(initial=0),
+samples_like.max(initial=0))
if max_ <= np.iinfo(np.int8).max:
dtype = np.int8
elif max_ <= np.iinfo(np.int16).max:
dtype = np.int16
elif max_ < np.iinfo(np.int32).max:
dtype = np.int32
elif max_ < np.iinfo(np.int64).max:
dtype = np.int64
else:
raise RuntimeError
else:
dtype = samples_like.dtype
# samples-like should now be array-like
arr = np.array(samples_like, dtype=dtype, copy=copy, order=order)
if arr.ndim > 2:
raise ValueError("expected samples_like to be <= 2 dimensions")
if arr.ndim < 2:
if arr.size:
arr = np.atleast_2d(arr)
elif labels: # is not None and len > 0
arr = arr.reshape((0, len(labels)))
else:
arr = arr.reshape((0, 0))
# ok we're basically done, just need to check against the labels
if labels is None:
return arr, list(range(arr.shape[1]))
elif len(labels) != arr.shape[1]:
raise ValueError("samples_like and labels dimensions do not match")
else:
return arr, labels
def concatenate(samplesets, defaults=None):
"""Combine sample sets.
Args:
samplesets (iterable[:obj:`.SampleSet`):
Iterable of sample sets.
defaults (dict, optional):
Dictionary mapping data vector names to the corresponding default values.
Returns:
:obj:`.SampleSet`: A sample set with the same vartype and variable order as the first
given in `samplesets`.
Examples:
>>> a = dimod.SampleSet.from_samples(([-1, +1], 'ab'), dimod.SPIN, energy=-1)
>>> b = dimod.SampleSet.from_samples(([-1, +1], 'ba'), dimod.SPIN, energy=-1)
>>> ab = dimod.concatenate((a, b))
>>> ab.record.sample
array([[-1, 1],
[ 1, -1]], dtype=int8)
"""
itertup = iter(samplesets)
try:
first = next(itertup)
except StopIteration:
raise ValueError("samplesets must contain at least one SampleSet")
vartype = first.vartype
variables = first.variables
records = [first.record]
records.extend(_iter_records(itertup, vartype, variables))
# dev note: I was able to get ~2x performance boost when trying to
# implement the same functionality here by hand (I didn't know that
# this function existed then). However I think it is better to use
# numpy's function and rely on their testing etc. If however this becomes
# a performance bottleneck in the future, it might be worth changing.
record = recfunctions.stack_arrays(records, defaults=defaults,
asrecarray=True, usemask=False)
return SampleSet(record, variables, {}, vartype)
def _iter_records(samplesets, vartype, variables):
# coerce each record into the correct vartype and variable-order
for samples in samplesets:
# coerce vartype
if samples.vartype is not vartype:
samples = samples.change_vartype(vartype, inplace=False)
if samples.variables != variables:
new_record = samples.record.copy()
order = [samples.variables.index(v) for v in variables]
new_record.sample = samples.record.sample[:, order]
yield new_record
else:
# order matches so we're done
yield samples.record
def infer_vartype(samples_like):
"""Infer the vartype of the given samples-like.
Args:
A collection of samples. 'samples_like' is an extension of NumPy's
array_like_. See :func:`.as_samples`.
Returns:
The :class:`.Vartype`, or None in the case that it is ambiguous.
"""
if isinstance(samples_like, SampleSet):
return samples_like.vartype
samples, _ = as_samples(samples_like)
ones_mask = (samples == 1)
if ones_mask.all():
# either empty or all 1s, in either case ambiguous
return None
if (ones_mask ^ (samples == 0)).all():
return Vartype.BINARY
if (ones_mask ^ (samples == -1)).all():
return Vartype.SPIN
raise ValueError("given samples_like is of an unknown vartype")
class SampleSet(abc.Iterable, abc.Sized):
"""Samples and any other data returned by dimod samplers.
Args:
record (:obj:`numpy.recarray`)
A NumPy record array. Must have 'sample', 'energy' and 'num_occurrences' as fields.
The 'sample' field should be a 2D NumPy array where each row is a sample and each
column represents the value of a variable.
variables (iterable):
An iterable of variable labels, corresponding to columns in `record.samples`.
info (dict):
Information about the :class:`SampleSet` as a whole, formatted as a dict.
vartype (:class:`.Vartype`/str/set):
Variable type for the :class:`SampleSet`. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
* :class:`.ExtendedVartype.DISCRETE`, ``'DISCRETE'``
Examples:
This example creates a SampleSet out of a samples_like object (a NumPy array).
>>> import numpy as np
...
>>> sampleset = dimod.SampleSet.from_samples(np.ones(5, dtype='int8'),
... 'BINARY', 0)
>>> sampleset.variables
Variables([0, 1, 2, 3, 4])
"""
_REQUIRED_FIELDS = ['sample', 'energy', 'num_occurrences']
###############################################################################################
# Construction
###############################################################################################
def __init__(self, record, variables, info, vartype):
vartype = as_vartype(vartype, extended=True)
# make sure that record is a numpy recarray and that it has the expected fields
if not isinstance(record, np.recarray):
raise TypeError("input record must be a numpy recarray")
elif not set(self._REQUIRED_FIELDS).issubset(record.dtype.fields):
raise ValueError("input record must have {}, {} and {} as fields".format(*self._REQUIRED_FIELDS))
self._record = record
num_samples, num_variables = record.sample.shape
self._variables = variables = Variables(variables)
if len(variables) != num_variables:
msg = ("mismatch between number of variables in record.sample ({}) "
"and labels ({})").format(num_variables, len(variables))
raise ValueError(msg)
self._info = LockableDict(info)
# vartype is checked by vartype_argument decorator
self._vartype = vartype
@classmethod
def from_samples(cls, samples_like, vartype, energy, info=None,
num_occurrences=None, aggregate_samples=False,
sort_labels=True, **vectors):
"""Build a :class:`SampleSet` from raw samples.
Args:
samples_like:
A collection of raw samples. 'samples_like' is an extension of NumPy's array_like_.
See :func:`.as_samples`.
vartype (:class:`.Vartype`/str/set):
Variable type for the :class:`SampleSet`. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
* :class:`.ExtendedVartype.DISCRETE`, ``'DISCRETE'``
energy (array_like):
Vector of energies.
info (dict, optional):
Information about the :class:`SampleSet` as a whole formatted as a dict.
num_occurrences (array_like, optional):
Number of occurrences for each sample. If not provided, defaults to a vector of 1s.
aggregate_samples (bool, optional, default=False):
If True, all samples in returned :obj:`.SampleSet` are unique,
with `num_occurrences` accounting for any duplicate samples in
`samples_like`.
sort_labels (bool, optional, default=True):
Return :attr:`.SampleSet.variables` in sorted order. For mixed
(unsortable) types, the given order is maintained.
**vectors (array_like):
Other per-sample data.
Returns:
:obj:`.SampleSet`
Examples:
This example creates a SampleSet out of a samples_like object (a dict).
>>> import numpy as np
...
>>> sampleset = dimod.SampleSet.from_samples(
... dimod.as_samples({'a': 0, 'b': 1, 'c': 0}), 'BINARY', 0)
>>> sampleset.variables
Variables(['a', 'b', 'c'])
.. _array_like: https://numpy.org/doc/stable/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
"""
if aggregate_samples:
return cls.from_samples(samples_like, vartype, energy,
info=info, num_occurrences=num_occurrences,
aggregate_samples=False,
**vectors).aggregate()
# get the samples, variable labels
samples, variables = as_samples(samples_like)
if sort_labels and variables: # need something to sort
try:
reindex, new_variables = zip(*sorted(enumerate(variables),
key=lambda tup: tup[1]))
except TypeError:
# unlike types are not sortable in python3, so we do nothing
pass
else:
if new_variables != variables:
# avoid the copy if possible
samples = samples[:, reindex]
variables = new_variables
num_samples, num_variables = samples.shape
energy = np.asarray(energy)
# num_occurrences
if num_occurrences is None:
num_occurrences = np.ones(num_samples, dtype=int)
else:
num_occurrences = np.asarray(num_occurrences)
# now construct the record
datatypes = [('sample', samples.dtype, (num_variables,)),
('energy', energy.dtype),
('num_occurrences', num_occurrences.dtype)]
for key, vector in vectors.items():
vectors[key] = vector = np.asarray(vector)
datatypes.append((key, vector.dtype, vector.shape[1:]))
record = np.rec.array(np.zeros(num_samples, dtype=datatypes))
record['sample'] = samples
record['energy'] = energy
record['num_occurrences'] = num_occurrences
for key, vector in vectors.items():
record[key] = vector
if info is None:
info = {}
return cls(record, variables, info, vartype)
# todo: this works with DQM/BinaryPolynomial, should change the name and/or
# update the docs.
@classmethod
def from_samples_bqm(cls, samples_like, bqm, **kwargs):
"""Build a sample set from raw samples and a binary quadratic model.
The binary quadratic model is used to calculate energies and set the
:class:`vartype`.
Args:
samples_like:
A collection of raw samples. 'samples_like' is an extension of NumPy's array_like.
See :func:`.as_samples`.
bqm (:obj:`.BinaryQuadraticModel`):
A binary quadratic model.
info (dict, optional):
Information about the :class:`SampleSet` as a whole formatted as a dict.
num_occurrences (array_like, optional):
Number of occurrences for each sample. If not provided, defaults to a vector of 1s.
aggregate_samples (bool, optional, default=False):
If True, all samples in returned :obj:`.SampleSet` are unique,
with `num_occurrences` accounting for any duplicate samples in
`samples_like`.
sort_labels (bool, optional, default=True):
Return :attr:`.SampleSet.variables` in sorted order. For mixed
(unsortable) types, the given order is maintained.
**vectors (array_like):
Other per-sample data.
Returns:
:obj:`.SampleSet`
Examples:
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): -1})
>>> sampleset = dimod.SampleSet.from_samples_bqm({'a': -1, 'b': 1}, bqm)
"""
# more performant to do this once, here rather than again in bqm.energies
# and in cls.from_samples
samples_like = as_samples(samples_like)
energies = bqm.energies(samples_like)
return cls.from_samples(samples_like, energy=energies, vartype=bqm.vartype, **kwargs)
@classmethod
def from_future(cls, future, result_hook=None):
"""Construct a :class:`SampleSet` referencing the result of a future computation.
Args:
future (object):
Object that contains or will contain the information needed to construct a
:class:`SampleSet`. If `future` has a :meth:`~concurrent.futures.Future.done` method,
this determines the value returned by :meth:`.SampleSet.done`.
result_hook (callable, optional):
A function that is called to resolve the future. Must accept the future and return
a :obj:`.SampleSet`. If not provided, set to
.. code-block:: python
def result_hook(future):
return future.result()
Returns:
:obj:`.SampleSet`
Notes:
The future is resolved on the first read of any of the :class:`SampleSet` properties.
Examples:
Run a dimod sampler on a single thread and load the returned future into :class:`SampleSet`.
>>> from concurrent.futures import ThreadPoolExecutor
...
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): -1})
>>> with ThreadPoolExecutor(max_workers=1) as executor:
... future = executor.submit(dimod.ExactSolver().sample, bqm)
... sampleset = dimod.SampleSet.from_future(future)
>>> sampleset.first.energy # doctest: +SKIP
"""
obj = cls.__new__(cls)
obj._future = future
if result_hook is None:
def result_hook(future):
return future.result()
elif not callable(result_hook):
raise TypeError("expected result_hook to be callable")
obj._result_hook = result_hook
return obj
###############################################################################################
# Special Methods
###############################################################################################
def __len__(self):
"""The number of rows in record."""
return self.record.__len__()
def __iter__(self):
"""Iterate over the samples, low energy to high."""
# need to make it an iterator rather than just an iterable
return iter(self.samples(sorted_by='energy'))
def __eq__(self, other):
"""SampleSet equality."""
if not isinstance(other, SampleSet):
return False
if self.vartype != other.vartype or self.info != other.info:
return False
# check that all the fields match in record, order doesn't matter
if self.record.dtype.fields.keys() != other.record.dtype.fields.keys():
return False
for field in self.record.dtype.fields:
if field == 'sample':
continue
if not (self.record[field] == other.record[field]).all():
return False
# now check the actual samples.
if self.variables == other.variables:
return (self.record.sample == other.record.sample).all()
try:
other_idx = [other.variables.index(v) for v in self.variables]
except ValueError:
# mismatched variables
return False
return (self.record.sample == other.record.sample[:, other_idx]).all()
def __getstate__(self):
# Ensure that any futures are resolved before pickling.
self.resolve()
# we'd prefer to do super().__getstate__ but unfortunately that's not
# present, so instead we recreate the (documented) behaviour
return self.__dict__
def __repr__(self):
return "{}({!r}, {}, {}, {!r})".format(self.__class__.__name__,
self.record,
self.variables,
self.info,
self.vartype.name)
def __str__(self):
return Formatter().format(self)
###############################################################################################
# Properties
###############################################################################################
@property
def data_vectors(self):
"""The per-sample data in a vector.
Returns:
dict: A dict where the keys are the fields in the record and the
values are the corresponding arrays.
Examples:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.data_vectors['energy']
array([-1, 1])
Note that this is equivalent to, and less performant than:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.record['energy']
array([-1, 1])
"""
return {field: self.record[field] for field in self.record.dtype.names
if field != 'sample'}
@property
def first(self):
"""Sample with the lowest-energy.
Raises:
ValueError: If empty.
Example:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 1}, {('a', 'b'): 1})
>>> sampleset.first
Sample(sample={'a': -1, 'b': 1}, energy=-2.0, num_occurrences=1)
"""
try:
return next(self.data(sorted_by='energy', name='Sample'))
except StopIteration:
raise ValueError('{} is empty'.format(self.__class__.__name__))
@property
def info(self):
"""Dict of information about the :class:`SampleSet` as a whole.
Examples:
This example shows the type of information that might be returned by
a dimod sampler by submitting a BQM that sets a value on a D-Wave
system's first listed coupler.
>>> from dwave.system import DWaveSampler # doctest: +SKIP
>>> sampler = DWaveSampler() # doctest: +SKIP
>>> bqm = dimod.BQM({}, {sampler.edgelist[0]: -1}, 0, dimod.SPIN) # doctest: +SKIP
>>> sampler.sample(bqm).info # doctest: +SKIP
{'timing': {'qpu_sampling_time': 315,
'qpu_anneal_time_per_sample': 20,
'qpu_readout_time_per_sample': 274,
# Snipped above response for brevity
"""
self.resolve()
return self._info
@property
def record(self):
""":obj:`numpy.recarray` containing the samples, energies, number of occurences, and other sample data.
Examples:
>>> sampler = dimod.ExactSolver()
>>> sampleset = sampler.sample_ising({'a': -0.5, 'b': 1.0}, {('a', 'b'): -1.0})
>>> sampleset.record.sample # doctest: +SKIP
array([[-1, -1],
[ 1, -1],
[ 1, 1],
[-1, 1]], dtype=int8)
>>> len(sampleset.record.energy)
4
"""
self.resolve()
return self._record
@property
def variables(self):
""":class:`~.variables.Variables` of variable labels.
Corresponds to columns of the sample field of :attr:`.SampleSet.record`.
"""
self.resolve()
return self._variables
@property
def vartype(self):
""":class:`.Vartype` of the samples."""
self.resolve()
return self._vartype
@property
def is_writeable(self):
return getattr(self, '_writeable', True)
@is_writeable.setter
def is_writeable(self, b):
b = bool(b) # cast
self._writeable = b
self.record.flags.writeable = b
self.info.is_writeable = b
###############################################################################################
# Views
###############################################################################################
def done(self):
"""Return True if a pending computation is done.
Used when a :class:`SampleSet` is constructed with :meth:`SampleSet.from_future`.
Examples:
This example uses a :class:`~concurrent.futures.Future` object directly. Typically
a :class:`~concurrent.futures.Executor` sets the result of the future
(see documentation for :mod:`concurrent.futures`).
>>> from concurrent.futures import Future
...
>>> future = Future()
>>> sampleset = dimod.SampleSet.from_future(future)
>>> future.done()
False
>>> future.set_result(dimod.ExactSolver().sample_ising({0: -1}, {}))
>>> future.done()
True
>>> sampleset.first.energy
-1.0
"""
return (not hasattr(self, '_future')) or (not hasattr(self._future, 'done')) or self._future.done()
def samples(self, n=None, sorted_by='energy'):
"""Return an iterable over the samples.
Args:
n (int, optional, default=None):
Maximum number of samples to return in the view.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples. If None,
samples are returned in record order.
Returns:
:obj:`.SamplesArray`: A view object mapping variable labels to
values.
Examples:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 0.1, 'b': 0.0},
... {('a', 'b'): 1})
>>> for sample in sampleset.samples(): # doctest: +SKIP
... print(sample)
{'a': -1, 'b': 1}
{'a': 1, 'b': -1}
{'a': -1, 'b': -1}
{'a': 1, 'b': 1}
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 0.1, 'b': 0.0},
... {('a', 'b'): 1})
>>> samples = sampleset.samples()
>>> samples[0]
{'a': -1, 'b': 1}
>>> samples[0, 'a']
-1
>>> samples[0, ['b', 'a']]
array([ 1, -1], dtype=int8)
>>> samples[1:, ['a', 'b']]
array([[ 1, -1],
[-1, -1],
[ 1, 1]], dtype=int8)
"""
if n is not None:
return self.samples(sorted_by=sorted_by)[:n]
if sorted_by is None:
samples = self.record.sample
else:
order = np.argsort(self.record[sorted_by])
samples = self.record.sample[order]
return SamplesArray(samples, self.variables)
def data(self, fields=None, sorted_by='energy', name='Sample', reverse=False,
sample_dict_cast=True, index=False):
"""Iterate over the data in the :class:`SampleSet`.
Args:
fields (list, optional, default=None):
If specified, only these fields are included in the yielded tuples.
The special field name 'sample' can be used to view the samples.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples. If None, the samples are yielded
in record order.
name (str/None, optional, default='Sample'):
Name of the yielded namedtuples or None to yield regular tuples.
reverse (bool, optional, default=False):
If True, yield in reverse order.
sample_dict_cast (bool, optional, default=True):
Samples are returned as dicts rather than
:class:`.SampleView`, which requires heavy memory
usage. Set to False to reduce load on memory.
index (bool, optional, default=False):
If True, `datum.idx` gives the corresponding index of the
:attr:`.SampleSet.record`.
Yields:
namedtuple/tuple: The data in the :class:`SampleSet`, in the order specified by the input
`fields`.
Examples:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': -0.5, 'b': 1.0}, {('a', 'b'): -1})
>>> for datum in sampleset.data(fields=['sample', 'energy']): # doctest: +SKIP
... print(datum)
Sample(sample={'a': -1, 'b': -1}, energy=-1.5)
Sample(sample={'a': 1, 'b': -1}, energy=-0.5)
Sample(sample={'a': 1, 'b': 1}, energy=-0.5)
Sample(sample={'a': -1, 'b': 1}, energy=2.5)
>>> for energy, in sampleset.data(fields=['energy'], sorted_by='energy'):
... print(energy)
...
-1.5
-0.5
-0.5
2.5
>>> print(next(sampleset.data(fields=['energy'], name='ExactSolverSample')))
ExactSolverSample(energy=-1.5)
"""
record = self.record
if fields is None:
# make sure that sample, energy is first
fields = self._REQUIRED_FIELDS + [field for field in record.dtype.fields
if field not in self._REQUIRED_FIELDS]
if index:
fields.append('idx')
if sorted_by is None:
order = np.arange(len(self))
elif index:
# we want a stable sort but it can be slower
order = np.argsort(record[sorted_by], kind='stable')
else:
order = np.argsort(record[sorted_by])
if reverse:
order = np.flip(order)
if name is None:
# yielding a tuple
def _pack(values):
return tuple(values)
else:
# yielding a named tuple
SampleTuple = namedtuple(name, fields)
def _pack(values):
return SampleTuple(*values)
def _values(idx):
for field in fields:
if field == 'sample':
sample = SampleView(record.sample[idx, :], self.variables)
if sample_dict_cast:
sample = dict(sample)
yield sample
elif field == 'idx':
yield idx
else:
yield record[field][idx]
for idx in order:
yield _pack(_values(idx))
###############################################################################################
# Methods
###############################################################################################
def copy(self):
"""Create a shallow copy."""
return self.__class__(self.record.copy(),
self.variables, # a new one is made in all cases
self.info.copy(),
self.vartype)
def change_vartype(self, vartype, energy_offset=0.0, inplace=True):
"""Return the :class:`SampleSet` with the given vartype.
Args:
vartype (:class:`.Vartype`/str/set):
Variable type to use for the new :class:`SampleSet`. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
energy_offset (number, optional, defaul=0.0):
Constant value applied to the 'energy' field of :attr:`SampleSet.record`.
inplace (bool, optional, default=True):
If True, the instantiated :class:`SampleSet` is updated; otherwise, a new
:class:`SampleSet` is returned.
Returns:
:obj:`.SampleSet`: SampleSet with changed vartype. If `inplace` is True, returns itself.
Notes:
This function is non-blocking unless `inplace==True`, in which case
the sample set is resolved.
Examples:
This example creates a binary copy of a spin-valued :class:`SampleSet`.
>>> sampleset = dimod.ExactSolver().sample_ising({'a': -0.5, 'b': 1.0}, {('a', 'b'): -1})
>>> sampleset_binary = sampleset.change_vartype(dimod.BINARY, energy_offset=1.0, inplace=False)
>>> sampleset_binary.vartype is dimod.BINARY
True
>>> sampleset_binary.first.sample
{'a': 0, 'b': 0}
"""
if not inplace:
return self.copy().change_vartype(vartype, energy_offset, inplace=True)
if not self.done():
def hook(sampleset):
sampleset.resolve()
return sampleset.change_vartype(vartype, energy_offset)
return self.from_future(self, hook)
if not self.is_writeable:
raise WriteableError("SampleSet is not writeable")
vartype = as_vartype(vartype, extended=True) # cast to correct vartype
if energy_offset:
self.record.energy = self.record.energy + energy_offset
if vartype is self.vartype:
return self # we're done!
if vartype is Vartype.SPIN and self.vartype is Vartype.BINARY:
self.record.sample = 2 * self.record.sample - 1
self._vartype = vartype
elif vartype is Vartype.BINARY and self.vartype is Vartype.SPIN:
self.record.sample = (self.record.sample + 1) // 2
self._vartype = vartype
else:
raise ValueError("Cannot convert from {} to {}".format(self.vartype, vartype))
return self
@lockable_method
def relabel_variables(self, mapping, inplace=True):
"""Relabel the variables of a :class:`SampleSet` according to the specified mapping.
Args:
mapping (dict):
Mapping from current variable labels to new, as a dict. If incomplete mapping is
specified, unmapped variables keep their current labels.
inplace (bool, optional, default=True):
If True, the current :class:`SampleSet` is updated; otherwise, a new
:class:`SampleSet` is returned.
Returns:
:class:`.SampleSet`: SampleSet with relabeled variables. If `inplace` is True, returns
itself.
Notes:
This function is non-blocking unless `inplace==True`, in which case
the sample set is resolved.
Examples:
This example creates a relabeled copy of a :class:`SampleSet`.
>>> sampleset = dimod.ExactSolver().sample_ising({'a': -0.5, 'b': 1.0}, {('a', 'b'): -1})
>>> new_sampleset = sampleset.relabel_variables({'a': 0, 'b': 1}, inplace=False)
>>> new_sampleset.variables
Variables([0, 1])
"""
if not inplace:
return self.copy().relabel_variables(mapping, inplace=True)
if not self.done():
def hook(sampleset):
sampleset.resolve()
return sampleset.relabel_variables(mapping, inplace=True)
return self.from_future(self, hook)
self.variables._relabel(mapping)
return self
def resolve(self):
"""Ensure that the sampleset is resolved if constructed from a future.
"""
# if it doesn't have the attribute then it is already resolved
if hasattr(self, '_future'):
samples = self._result_hook(self._future)
self.__init__(samples.record, samples.variables, samples.info, samples.vartype)
del self._future
del self._result_hook
def aggregate(self):
"""Create a new SampleSet with repeated samples aggregated.
Returns:
:obj:`.SampleSet`
Note:
:attr:`.SampleSet.record.num_occurrences` are accumulated but no
other fields are.
Examples:
This examples aggregates a sample set with two identical samples
out of three.
>>> sampleset = dimod.SampleSet.from_samples([[0, 0, 1], [0, 0, 1],
... [1, 1, 1]],
... dimod.BINARY,
... [0, 0, 1])
>>> print(sampleset)
0 1 2 energy num_oc.
0 0 0 1 0 1
1 0 0 1 0 1
2 1 1 1 1 1
['BINARY', 3 rows, 3 samples, 3 variables]
>>> print(sampleset.aggregate())
0 1 2 energy num_oc.
0 0 0 1 0 2
1 1 1 1 1 1
['BINARY', 2 rows, 3 samples, 3 variables]
"""
_, indices, inverse = np.unique(self.record.sample, axis=0,
return_index=True, return_inverse=True)
# unique also sorts the array which we don't want, so we undo the sort
order = np.argsort(indices)
indices = indices[order]
# and on the inverse
revorder = np.empty(len(order), dtype=order.dtype)
revorder[order] = np.arange(len(order))
inverse = revorder[inverse]
record = self.record[indices]
# fix the number of occurrences
record.num_occurrences = 0
for old_idx, new_idx in enumerate(inverse):
record[new_idx].num_occurrences += self.record[old_idx].num_occurrences
# dev note: we don't check the energies as they should be the same
# for individual samples
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype)
def append_variables(self, samples_like, sort_labels=True):
"""Deprecated in favor of `dimod.append_variables`."""
warn("SampleSet.append_variables is deprecated; please use "
"`dimod.append_variables` instead.", DeprecationWarning)
return append_variables(self, samples_like, sort_labels)
def lowest(self, rtol=1.e-5, atol=1.e-8):
"""Return a sample set containing the lowest-energy samples.
A sample is included if its energy is within tolerance of the lowest
energy in the sample set. The following equation is used to determine
if two values are equivalent:
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
See :func:`numpy.isclose` for additional details and caveats.
Args:
rtol (float, optional, default=1.e-5):
The relative tolerance (see above).
atol (float, optional, default=1.e-8):
The absolute tolerance (see above).
Returns:
:obj:`.SampleSet`: A new sample set containing the lowest energy
samples as delimited by configured tolerances from the lowest energy
sample in the current sample set.
Examples:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': .001},
... {('a', 'b'): -1})
>>> print(sampleset.lowest())
a b energy num_oc.
0 -1 -1 -1.001 1
['SPIN', 1 rows, 1 samples, 2 variables]
>>> print(sampleset.lowest(atol=.1))
a b energy num_oc.
0 -1 -1 -1.001 1
1 +1 +1 -0.999 1
['SPIN', 2 rows, 2 samples, 2 variables]
Note:
"Lowest energy" is the lowest energy in the sample set. This is not
always the "ground energy" which is the lowest energy possible
for a binary quadratic model.
"""
if len(self) == 0:
# empty so all are lowest
return self.copy()
record = self.record
# want all the rows within tolerance of the minimal energy
close = np.isclose(record.energy,
np.min(record.energy),
rtol=rtol, atol=atol)
record = record[close]
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype)
def truncate(self, n, sorted_by='energy'):
"""Create a new sample set with up to n rows.
Args:
n (int):
Maximum number of rows in the returned sample set. Does not return
any rows above this limit in the original sample set.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples before
truncating. Note that this sort order is maintained in the
returned sample set.
Returns:
:obj:`.SampleSet`
Examples:
>>> import numpy as np
...
>>> sampleset = dimod.SampleSet.from_samples(np.ones((5, 5)), dimod.SPIN, energy=5)
>>> print(sampleset)
0 1 2 3 4 energy num_oc.
0 +1 +1 +1 +1 +1 5 1
1 +1 +1 +1 +1 +1 5 1
2 +1 +1 +1 +1 +1 5 1
3 +1 +1 +1 +1 +1 5 1
4 +1 +1 +1 +1 +1 5 1
['SPIN', 5 rows, 5 samples, 5 variables]
>>> print(sampleset.truncate(2))
0 1 2 3 4 energy num_oc.
0 +1 +1 +1 +1 +1 5 1
1 +1 +1 +1 +1 +1 5 1
['SPIN', 2 rows, 2 samples, 5 variables]
See:
:meth:`SampleSet.slice`
"""
return self.slice(n, sorted_by=sorted_by)
def slice(self, *slice_args, **kwargs):
"""Create a new sample set with rows sliced according to standard Python
slicing syntax.
Args:
start (int, optional, default=None):
Start index for `slice`.
stop (int):
Stop index for `slice`.
step (int, optional, default=None):
Step value for `slice`.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples before
slicing. Note that `sorted_by` determines the sample order in
the returned sample set.
Returns:
:obj:`.SampleSet`
Examples:
>>> import numpy as np
...
>>> sampleset = dimod.SampleSet.from_samples(np.diag(range(1, 11)),
... dimod.BINARY, energy=range(10))
>>> print(sampleset)
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
3 0 0 0 1 0 0 0 0 0 0 3 1
4 0 0 0 0 1 0 0 0 0 0 4 1
5 0 0 0 0 0 1 0 0 0 0 5 1
6 0 0 0 0 0 0 1 0 0 0 6 1
7 0 0 0 0 0 0 0 1 0 0 7 1
8 0 0 0 0 0 0 0 0 1 0 8 1
9 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 10 rows, 10 samples, 10 variables]
The above example's first 3 samples by energy == truncate(3):
>>> print(sampleset.slice(3))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
['BINARY', 3 rows, 3 samples, 10 variables]
The last 3 samples by energy:
>>> print(sampleset.slice(-3, None))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 0 0 0 0 1 0 0 7 1
1 0 0 0 0 0 0 0 0 1 0 8 1
2 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 3 rows, 3 samples, 10 variables]
Every second sample in between, skipping top and bottom 3:
>>> print(sampleset.slice(3, -3, 2))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 1 0 0 0 0 0 0 3 1
1 0 0 0 0 0 1 0 0 0 0 5 1
['BINARY', 2 rows, 2 samples, 10 variables]
"""
# handle `sorted_by` kwarg with a default value in a python2-compatible way
sorted_by = kwargs.pop('sorted_by', 'energy')
if kwargs:
# be strict about allowed kwargs: throw the same error as python3 would
raise TypeError('slice got an unexpected '
'keyword argument {!r}'.format(kwargs.popitem()[0]))
# follow Python's slice syntax
if slice_args:
selector = slice(*slice_args)
else:
selector = slice(None)
if sorted_by is None:
record = self.record[selector]
else:
sort_indices = np.argsort(self.record[sorted_by])
record = self.record[sort_indices[selector]]
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype)
###############################################################################################
# Serialization
###############################################################################################
def to_serializable(self, use_bytes=False, bytes_type=bytes,
pack_samples=True):
"""Convert a :class:`SampleSet` to a serializable object.
Note that the contents of the :attr:`.SampleSet.info` field are assumed
to be serializable.
Args:
use_bytes (bool, optional, default=False):
If True, a compact representation of the biases as bytes is used.
bytes_type (class, optional, default=bytes):
If `use_bytes` is True, this class is used to wrap the bytes
objects in the serialization. Useful for Python 2 using BSON
encoding, which does not accept the raw `bytes` type;
`bson.Binary` can be used instead.
pack_samples (bool, optional, default=True):
Pack the samples using 1 bit per sample. Samples are never
packed when :attr:`SampleSet.vartype` is
`~ExtendedVartype.DISCRETE`.
Returns:
dict: Object that can be serialized.
Examples:
This example encodes using JSON.
>>> import json
...
>>> samples = dimod.SampleSet.from_samples([-1, 1, -1], dimod.SPIN, energy=-.5)
>>> s = json.dumps(samples.to_serializable())
See also:
:meth:`~.SampleSet.from_serializable`
"""
schema_version = "3.1.0"
# developer note: numpy's record array stores the samples, energies,
# num_occ. etc as a struct array. If we dumped that array directly to
# bytes we could avoid a copy when undoing the serialization. However,
# we want to pack the samples, so that means that we're storing the
# arrays individually.
vectors = {name: serialize_ndarray(data, use_bytes=use_bytes,
bytes_type=bytes_type)
for name, data in self.data_vectors.items()}
# we never pack DISCRETE samplesets
pack_samples = pack_samples and self.vartype is not DISCRETE
if pack_samples:
# we could just do self.record.sample > 0 for all of these, but to
# save on the copy if we are already binary and bool/integer we
# check and just pass through in that case
samples = self.record.sample
if (self.vartype is Vartype.BINARY and
(np.issubdtype(samples.dtype, np.integer) or
np.issubdtype(samples.dtype, np.bool_))):
packed = _pack_samples(samples)
else:
packed = _pack_samples(samples > 0)
sample_data = serialize_ndarray(packed,
use_bytes=use_bytes,
bytes_type=bytes_type)
else:
sample_data = serialize_ndarray(self.record.sample,
use_bytes=use_bytes,
bytes_type=bytes_type)
return {
# metadata
"type": type(self).__name__,
"version": {"sampleset_schema": schema_version},
# samples
"num_variables": len(self.variables),
"num_rows": len(self),
"sample_data": sample_data,
"sample_type": self.record.sample.dtype.name,
"sample_packed": bool(pack_samples), # 3.1.0+, default=True
# vectors
"vectors": vectors,
# other
"variable_labels": self.variables.to_serializable(),
"variable_type": self.vartype.name,
"info": serialize_ndarrays(self.info, use_bytes=use_bytes,
bytes_type=bytes_type),
}
def _asdict(self):
# support simplejson encoding
return self.to_serializable()
@classmethod
def from_serializable(cls, obj):
"""Deserialize a :class:`SampleSet`.
Args:
obj (dict):
A :class:`SampleSet` serialized by :meth:`~.SampleSet.to_serializable`.
Returns:
:obj:`.SampleSet`
Examples:
This example encodes and decodes using JSON.
>>> import json
...
>>> samples = dimod.SampleSet.from_samples([-1, 1, -1], dimod.SPIN, energy=-.5)
>>> s = json.dumps(samples.to_serializable())
>>> new_samples = dimod.SampleSet.from_serializable(json.loads(s))
See also:
:meth:`~.SampleSet.to_serializable`
"""
version = obj["version"]["sampleset_schema"]
if version < "3.0.0":
raise ValueError("No longer supported serialization format")
# assume we're working with v3
# other data
vartype = str(obj['variable_type']) # cast to str for python2
num_variables = obj['num_variables']
variables = list(iter_deserialize_variables(obj['variable_labels']))
info = deserialize_ndarrays(obj['info'])
# vectors
vectors = {name: deserialize_ndarray(data)
for name, data in obj['vectors'].items()}
sample = deserialize_ndarray(obj['sample_data'])
if obj.get('sample_packed', True): # 3.1.0
sample = unpack_samples(sample,
n=num_variables,
dtype=obj['sample_type'])
if vartype == 'SPIN':
sample *= 2
sample -= 1
return cls.from_samples((sample, variables), vartype, info=info,
**vectors)
###############################################################################################
# Export to dataframe
###############################################################################################
def to_pandas_dataframe(self, sample_column=False):
"""Convert a sample set to a Pandas DataFrame
Args:
sample_column(bool, optional, default=False): If True, samples are
represented as a column of type dict.
Returns:
:obj:`pandas.DataFrame`
Examples:
>>> samples = dimod.SampleSet.from_samples([{'a': -1, 'b': +1, 'c': -1},
... {'a': -1, 'b': -1, 'c': +1}],
... dimod.SPIN, energy=-.5)
>>> samples.to_pandas_dataframe() # doctest: +SKIP
a b c energy num_occurrences
0 -1 1 -1 -0.5 1
1 -1 -1 1 -0.5 1
>>> samples.to_pandas_dataframe(sample_column=True) # doctest: +SKIP
sample energy num_occurrences
0 {'a': -1, 'b': 1, 'c': -1} -0.5 1
1 {'a': -1, 'b': -1, 'c': 1} -0.5 1
"""
import pandas as pd
if sample_column:
df = pd.DataFrame(self.data(sorted_by=None, sample_dict_cast=True))
else:
# work directly with the record, it's much faster
df = pd.DataFrame(self.record.sample, columns=self.variables)
for field in sorted(self.record.dtype.fields): # sort for consistency
if field == 'sample':
continue
df.loc[:, field] = self.record[field]
return df
|
apache-2.0
| 2,160,505,098,050,896,100 | 36.86899 | 132 | 0.521233 | false |
upconsulting/IsisCB
|
isiscb/isisdata/migrations/0076_auto_20180601_1948.py
|
1
|
7720
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-06-01 19:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('isisdata', '0075_authorityvalue_name'),
]
operations = [
migrations.CreateModel(
name='AuthorityCollection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('description', models.TextField(blank=True)),
],
),
migrations.AlterField(
model_name='aarelation',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='acrelation',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='attribute',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='authority',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='authoritytracking',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='ccrelation',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='citation',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='dataset',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='historicalacrelation',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='historicalattribute',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='historicalauthority',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='historicalauthoritytracking',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='historicalccrelation',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='historicalcitation',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='historicallinkeddata',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='historicalperson',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='historicaltracking',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='linkeddata',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AlterField(
model_name='tracking',
name='record_status_value',
field=models.CharField(blank=True, choices=[(b'Active', b'Active'), (b'Duplicate', b'Delete'), (b'Redirect', b'Redirect'), (b'Inactive', b'Inactive')], default=b'Active', max_length=255, null=True),
),
migrations.AddField(
model_name='authoritycollection',
name='authorities',
field=models.ManyToManyField(related_name='in_collections', to='isisdata.Authority'),
),
migrations.AddField(
model_name='authoritycollection',
name='createdBy',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='authority_collections', to=settings.AUTH_USER_MODEL),
),
]
|
mit
| 1,103,525,495,658,646,800 | 57.484848 | 210 | 0.605829 | false |
jobscry/vz-wiki
|
vz_wiki/admin.py
|
1
|
1672
|
from django.contrib import admin
from models import WikiPage, Revision, Comparison
def latest_revision_display(obj):
latest_revision = obj.latest_revision()
if latest_revision is None:
return '0'
return u'%s' % latest_revision.number
latest_revision_display.short_description = 'Version'
def who_checked_out(obj):
return obj.who_checked_out()
who_checked_out.short_description = 'Checked out by'
def make_editable(modelAdmin, request, queryset):
queryset.update(is_editable=True)
make_editable.short_description = 'Make the selected pages editable'
def make_not_editable(modelAdmin, request, queryset):
queryset.update(is_editable=False)
make_not_editable.short_description = 'Make the selected pages not editable'
def make_checked_in(modelAdmin, request, queryset):
queryset.update(is_checked_out=False)
make_checked_in.short_description = 'Check in the selected pages'
def make_checked_out(modelAdmin, request, queryset):
queryset.update(is_checked_out=True)
make_checked_out.short_description = 'Check out the selected pages'
class RevisionInline(admin.StackedInline):
model = Revision
extra = 1
class WikiPageAdmin(admin.ModelAdmin):
actions = [make_editable, make_not_editable, make_checked_in,
make_checked_out]
inlines = [RevisionInline]
prepopulated_fields = {'slug': ('title', )}
list_display = ('title', latest_revision_display, 'is_editable',
'is_checked_out', who_checked_out, 'created_on', 'edited_on')
list_filter = ('is_editable', 'is_checked_out')
admin.site.register(WikiPage, WikiPageAdmin)
admin.site.register(Revision)
admin.site.register(Comparison)
|
mit
| -6,895,925,225,505,879,000 | 29.962963 | 76 | 0.73445 | false |
xSAVIKx/PRESENT-cipher
|
gmac/util/galue_fields.py
|
1
|
3561
|
# Author: Joao H de A Franco (jhafranco@acm.org)
#
# Description: Binary finite field multiplication in Python 3
#
# Date: 2012-02-16
#
# License: Attribution-NonCommercial-ShareAlike 3.0 Unported
# (CC BY-NC-SA 3.0)
# ===========================================================
from functools import reduce
# constants used in the multGF2 function
mask1 = mask2 = polyred = None
def setGF2(degree, irPoly):
"""Define parameters of binary finite field GF(2^m)/g(x)
- degree: extension degree of binary field
- irPoly: coefficients of irreducible polynomial g(x)
"""
global mask1, mask2, polyred
mask1 = mask2 = 1 << degree
mask2 -= 1
if sum(irPoly) <= len(irPoly):
polyred = reduce(lambda x, y: (x << 1) + y, irPoly[1:])
else:
polyred = poly2Int(irPoly[1:])
def multGF2(p1, p2):
"""Multiply two polynomials in GF(2^m)/g(x)"""
p = 0
while p2:
if p2 & 1:
p ^= p1
p1 <<= 1
if p1 & mask1:
p1 ^= polyred
p2 >>= 1
return p & mask2
# =============================================================================
# Auxiliary formatting functions
# =============================================================================
def int2Poly(bInt):
"""Convert a "big" integer into a "high-degree" polynomial"""
exp = 0
poly = []
while bInt:
if bInt & 1:
poly.append(exp)
exp += 1
bInt >>= 1
return poly[::-1]
def poly2Int(hdPoly):
"""Convert a "high-degree" polynomial into a "big" integer"""
bigInt = 0
for exp in hdPoly:
bigInt += 1 << exp
return bigInt
def i2P(sInt):
"""Convert a "small" integer into a "low-degree" polynomial"""
res = [(sInt >> i) & 1 for i in reversed(range(sInt.bit_length()))]
if len(res) == 0:
res.append(0)
return res
def p2I(ldPoly):
"""Convert a "low-degree" polynomial into a "small" integer"""
return reduce(lambda x, y: (x << 1) + y, ldPoly)
def ldMultGF2(p1, p2):
"""Multiply two "low-degree" polynomials in GF(2^n)/g(x)"""
return multGF2(p2I(p1), p2I(p2))
def hdMultGF2(p1, p2):
"""Multiply two "high-degree" polynomials in GF(2^n)/g(x)"""
return multGF2(poly2Int(p1), poly2Int(p2))
if __name__ == "__main__":
# Define binary field GF(2^3)/x^3 + x + 1
setGF2(3, [1, 0, 1, 1])
# Alternative way to define GF(2^3)/x^3 + x + 1
setGF2(3, i2P(0b1011))
# Check if (x + 1)(x^2 + 1) == x^2
assert ldMultGF2([1, 1], [1, 0, 1]) == p2I([1, 0, 0])
# Check if (x^2 + x + 1)(x^2 + 1) == x^2 + x
assert ldMultGF2([1, 1, 1], [1, 0, 1]) == p2I([1, 1, 0])
# Define binary field GF(2^8)/x^8 + x^4 + x^3 + x + 1
setGF2(8, [1, 0, 0, 0, 1, 1, 0, 1, 1])
# Alternative way to define GF(2^8)/x^8 + x^4 + x^3 + x + 1
setGF2(8, i2P(0b100011011))
# Check if (x)(x^7 + x^2 + x + 1) == x^4 + x^2 + 1
assert ldMultGF2([1, 0], [1, 0, 0, 0, 0, 1, 1, 1]) == p2I([1, 0, 1, 0, 1])
# Check if (x + 1)(x^6 + x^5 + x^3 + x^2 + x) == x^7 + x^5 + x^4 + x
assert ldMultGF2([1, 1], [1, 1, 0, 1, 1, 1, 0]) == p2I([1, 0, 1, 1, 0, 0, 1, 0])
# Define binary field GF(2^571)/x^571 + x^10 + x^5 + x^2 + x
setGF2(571, [571, 10, 5, 2, 1])
# Calculate the product of two polynomials in GF(2^571)/x^571 + x^10 + x^5 + x^2 + x,
# x^518 + x^447 + x^320 + x^209 + x^119 + x + 1 and x^287 + x^145 + x^82 + + x^44
print(int2Poly(hdMultGF2([518, 447, 320, 209, 119, 1, 0], [287, 145, 82, 44])))
|
apache-2.0
| 7,770,864,948,732,986,000 | 28.92437 | 89 | 0.505757 | false |
penguintutor/networking-quiz
|
src/quizapp.py
|
1
|
12224
|
# This uses the dev branch of guizero which needs to be linked to the appropriate
# directory - in future this will use the normal production version of guizero
from guizero.build.lib.guizero import App, Text, PushButton, info, MenuBar, Picture, yesno
import quizdetails
# For testing the gui without the arduino comment out the quizarduino entry and replace with quizarduinodev
import quizarduino
#import quizarduinodev as quizarduino
import quizstrings
import time
from tkinter import filedialog
class QuizApp():
## These values are hardcoded here in this version
quiz_filename = "quizzes/quiz1.json"
serial_port = '/dev/ttyACM0'
def __init__ (self, app):
self.app = app
# Load Strings for pages
self.strings = quizstrings.QuizStrings()
self.strings.load()
# Questions are held in QuizDetails
self.quiz = quizdetails.QuizDetails()
# Setup serial connection to arduino
self.arduino = quizarduino.QuizArduino(self.serial_port)
self.arduino.connect()
# send blue to indicate startup
self.arduino.send_recv ([3,3,3,3,3,3])
def open_quiz_file(self):
filename = filedialog.askopenfilename(initialdir = "quizzes/",title = "Select file",filetypes = (("Quiz files","*.json"),("all files","*.*")))
# If valid filename then update
if (filename):
self.quiz_filename = filename
self.load_quiz()
self.home()
# Updates screen to a different page
# Updates button labels, but not their functions
def upd_page(self, page_name):
page_strings = self.strings.getPage(page_name)
self.text_title.value = self.strings.getTitle()
self.text_question_title.value = page_strings["title"]
self.text_question_details_1.value = page_strings["details"][0]
self.text_question_details_2.value = page_strings["details"][1]
self.text_question_details_3.value = page_strings["details"][2]
self.text_question_details_4.value = page_strings["details"][3]
self.text_question_details_5.value = page_strings["details"][4]
self.text_question_details_6.value = page_strings["details"][5]
self.text_question_option_1.value = page_strings["options"][0]
self.text_question_option_2.value = page_strings["options"][1]
self.text_question_option_3.value = page_strings["options"][2]
self.text_question_option_4.value = page_strings["options"][3]
self.image_question.value = "images/"+page_strings["image"]
self.left_button.text = page_strings["left_button"]
self.right_button.text = page_strings["right_button"]
# Set home page with appropriate values
def home(self):
self.upd_page("home")
# update buttons
# left button does nothing (returns here)
self.left_button.change_command(self.home)
self.right_button.change_command(self.start_quiz)
# Reset quiz position to 0
# Updates buttons on gui to reflect first and last buttons
# Also highlights appropriate port for question
def upd_buttons(self):
if self.quiz.isFirst():
self.left_button.text="Return"
self.left_button.change_command(self.home)
else:
self.left_button.text="<< Previous"
self.left_button.change_command(self.prev_question)
if self.quiz.isLast():
self.right_button.text="End Quiz"
self.right_button.change_command(self.end_quiz)
else:
self.right_button.text="Next >>"
self.right_button.change_command(self.next_question)
# Light up the current question
status_leds = [0,0,0,0,0,0]
status_leds[self.quiz.getQuestionNum()] = 3
self.arduino.send_recv(status_leds)
# Load quiz from disk
def load_quiz(self):
self.quiz.load(self.quiz_filename)
pass
# Start the quiz
def start_quiz(self):
self.load_quiz()
self.text_title.value = self.quiz.getTitle()
self.upd_question()
self.upd_buttons()
#print ("Start Quiz - Q "+str(self.quiz.getQuestionNum()))
# Update display of question
def upd_question(self):
#print ("Show question - Q "+str(self.quiz.getQuestionNum()))
this_question = self.quiz.getQuestion()
self.text_question_title.value = this_question.getTitle()
details = this_question.getDetails()
self.text_question_details_1.value = details[0]
self.text_question_details_2.value = details[1]
self.text_question_details_3.value = details[2]
self.text_question_details_4.value = details[3]
self.text_question_details_5.value = details[4]
self.text_question_details_6.value = details[5]
options = this_question.getOptions()
self.text_question_option_1.value = options[0]
self.text_question_option_2.value = options[1]
self.text_question_option_3.value = options[2]
self.text_question_option_4.value = options[3]
self.image_question.value = "images/"+this_question.getImage()
# Move to prev question
def prev_question(self):
self.quiz.prevQuestion()
self.upd_question()
self.upd_buttons()
# Move to next question
def next_question(self):
#print ("Nex Q - was "+str(self.quiz.getQuestionNum()))
self.quiz.nextQuestion()
self.upd_question()
self.upd_buttons()
#print ("Nex Q - now "+str(self.quiz.getQuestionNum()))
# Allows to restart and retry same quiz
def review(self):
# Reset to question 1 and restart
self.quiz.setQuestionNum(0)
self.upd_question()
self.upd_buttons()
# End quiz
def end_quiz(self):
# Check with user they really want to end
mark_quiz = yesno("Exam completed", "Have you answered all the questions?")
if (mark_quiz == False):
return
# Set all leds blue to indicate marking and get status
status_leds = [3,3,3,3,3,3]
given_answers = self.arduino.send_recv(status_leds)
score = 0
# compare given_answers with correct answers
details = []
for i in range (0,6):
# get the question
this_question = self.quiz.getQuestion(i)
# compare whether answer correct
#print ("Question "+str(i)+" given answer "+str(given_answers[i])+" correct answer "+str(this_question.getAnswer()))
if (given_answers[i] == this_question.getAnswer()):
# correct answer
score += 1
details.append(this_question.getTitle()+ " is correct, Answer = "+ this_question.getAnswerLetter())
status_leds[i] = 1
else:
details.append(this_question.getTitle()+ " is incorrect, Correct answer = "+ this_question.getAnswerLetter())
status_leds[i] = 2
self.text_question_title.value = "Results"
self.text_question_details_1.value = details[0]
self.text_question_details_2.value = details[1]
self.text_question_details_3.value = details[2]
self.text_question_details_4.value = details[3]
self.text_question_details_5.value = details[4]
self.text_question_details_6.value = details[5]
# Set eval based on score
if (score < 2) :
eval_string = "Your network is NOT working"
eval_image = "poor.gif"
elif (score > 4) :
eval_string = "High speed network"
eval_image = "good.gif"
else:
eval_string = "Network performance acceptable"
eval_image = "average.gif"
# Show score and updated image
self.text_question_option_1.value = ""
self.text_question_option_2.value = "Score "+str(score)+" out of 6"
self.text_question_option_3.value = ""
self.text_question_option_4.value = eval_string
self.image_question.value = "images/"+eval_image
# Update LEDs with status
self.arduino.send_recv(status_leds)
# Set back button "Review" - goes back to first question to allow retry
self.left_button.text="Review"
self.left_button.change_command(self.review)
# Set right button to home to restart process
self.right_button.text="Home"
self.right_button.change_command(self.home)
# Open a new quiz
def file_open(self):
##Todo load different quiz
self.open_quiz_file()
pass
# exit the self.app
def file_exit(self):
self.app.destroy()
# About
def help_about(self):
info("About Quiz", "Created by Stewart Watkiss\nhttp://www.penguintutor.com")
def setup_gui(self):
menubar = MenuBar(self.app,
toplevel=["File", "Help"],
options=[
[ ["Open",self.file_open],["Exit", self.file_exit] ] ,
[ ["About", self.help_about] ]
])
# Text / buttons are created without any details and are then updated
# based on the quizstrings.json file
# This is done prior app.display() so the user will not see flicker etc.
# column 0 and row 0 are used for dummy images for spacing
# cols 1 to 5 used for actual display
# dimensions shown to the right are minimum (using image)
padding0_0 = Picture(self.app, image="layout/0_0.gif", grid=[0,0]) # 1 pixel
padding1_0 = Picture(self.app, image="layout/1_0.gif", grid=[1,0]) # 100 pixel
padding2_0 = Picture(self.app, image="layout/2_0.gif", grid=[2,0]) # 550 pixel
padding2_0 = Picture(self.app, image="layout/3_0.gif", grid=[3,0]) # 100 pixel
padding3_0 = Picture(self.app, image="layout/4_0.gif", grid=[4,0]) # 100 pixel
padding0_2 = Picture(self.app, image="layout/0_2.gif", grid=[0,2]) # 100 pixel
padding0_12 = Picture(self.app, image="layout/0_13.gif", grid=[0,13]) # 100 pixel
self.text_title = Text(self.app, text="", size=30, grid=[2,1,2,1])
image_logo = Picture(self.app, image="images/logo.gif", grid=[4,1,2,1])
self.text_question_title = Text(self.app, text="", align="left", size=25, grid=[1,2,2,1])
self.text_question_details_1 = Text(self.app, text="", align="left", size=18, grid=[1,3,3,1])
self.text_question_details_2 = Text(self.app, text="", align="left", size=18, grid=[1,4,2,1])
self.text_question_details_3 = Text(self.app, text="", align="left", size=18, grid=[1,5,2,1])
self.text_question_details_4 = Text(self.app, text="", align="left", size=18, grid=[1,6,2,1])
self.text_question_details_5 = Text(self.app, text="", align="left", size=18, grid=[1,7,2,1])
self.text_question_details_6 = Text(self.app, text="", align="left", size=18, grid=[1,8,2,1])
self.text_question_option_1 = Text(self.app, text="", align="left", size=18, grid=[1,9,2,1])
self.text_question_option_2 = Text(self.app, text="", align="left", size=18, grid=[1,10,2,1])
self.text_question_option_3 = Text(self.app, text="", align="left", size=18, grid=[1,11,2,1])
self.text_question_option_4 = Text(self.app, text="", align="left", size=18, grid=[1,12,2,1])
self.image_question = Picture(self.app, image="images/quiz.gif", grid=[3,3,3,9])
self.left_button = PushButton(self.app, text="", command=self.prev_question, grid=[1,13])
self.right_button = PushButton(self.app, text="", command=self.start_quiz, grid=[5,13])
self.home()
self.app.display()
|
gpl-3.0
| 7,559,067,779,343,305,000 | 39.343234 | 151 | 0.590396 | false |
cellular-nanoscience/pyotic
|
pyoti/modification/modification.py
|
1
|
26859
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 14:22:31 2016
@author: Tobias Jachowski
"""
import collections
import matplotlib.pyplot as plt
import numpy as np
from abc import ABCMeta, abstractmethod
from .. import gui
from .. import helpers as hp
from .. import traces as tc
from ..evaluate import signal as sn
from ..graph import GraphMember
from ..picklable import InteractiveAttributes
class GraphicalMod(object):
"""
This class's subclasses should implement `_figure()` and `_update_fig()`,
which return and update a matplotlib figure, respectively. The figure can
be accessed by `self.figure`.
Parameters
----------
figure
modification : Modification
"""
def __init__(self, modification=None, **kwargs):
# Register the modification which should be graphically adjusted
self.modification = modification
# Initialize figure to None, which effectively disables
# `self.update_fig()` and Co. and prevent them from throwing an error
self._fig = None
def _set_plot_params(self, plot_params=None):
if plot_params is None:
plot_params = {}
gui.set_plot_params(plot_params=plot_params)
def display(self, plot_params=None):
self.init_fig(plot_params=plot_params)
def init_fig(self, show=True, plot_params=None):
"""
This method calls self._figure() to create an interactive figure and
interact with the user to determine the parameters necessary to
calculate the modification (see self._recalculate()). and
self._close_fig() to release all references to the actors of the
figure.
`self._figure()` and self._close_fig() should be (over)written by
subclasses.
"""
# Only create a figure, if the function `self._figure()` is implemented
if not hasattr(self, '_figure'):
return
# close the figure
# nbagg backend needs to have the figure closed and recreated
# whenever the code of the cell displaying the figure is executed.
# A simple update of the figure would let it disappear. Even a
# self.figure.show() wouldn't work anymore.
# For backends this just means a bit of extra calculation.
# Therefore, close the figure first before replotting it.
self.close_fig()
# set default plot parameters, can be recalled / overwritten in
# `self._figure()`
self._set_plot_params(plot_params=plot_params)
# create the figure
self.figure = self._figure()
# update the figure
self.update_fig()
# show the figure
if show:
self.figure.show()
def update(self, **kwargs):
self.update_fig(**kwargs)
def update_fig(self, **kwargs):
if self._fig is not None:
self._update_fig(**kwargs)
self._figure_canvas_draw()
def _update_fig(self, **kwargs):
pass
def close_fig(self):
if self._fig is not None:
self._pre_close_fig()
self._close_fig()
self._post_close_fig()
def _pre_close_fig(self):
"""
Method to be overwritten by subclasses.
"""
pass
def _close_fig(self):
# force redraw of the figure
self._figure_canvas_draw()
# close the figure
plt.close(self.figure)
# release memory
self.figure = None
def _post_close_fig(self):
"""
Method to be overwritten by subclasses.
"""
pass
def _figure_canvas_draw(self):
# Some matplotlib backends will throw an error when trying to draw the
# canvas. Simply ignoring the error that could happen here will prevent
# the figure from not beeing closed, left open, and preventing the next
# figure to be drawn. Even though the "except: pass" clause is
# considered bad, here the worst thing that could happen is that the
# figure produced by the matplotlib backend upon closing is not
# updated. Therefore, "except: pass" should be considered as an
# acceptable workaround for this case.
try:
# redraw the figure, before closing it
self.figure.canvas.draw()
except:
pass
@property
def figure(self):
"""
The matplotlib figure that represents and/or adjusts the parameters of
`self.modification`.
"""
# Automatically initialize a figure
if self._fig is None:
self.init_fig(show=False)
# Return a previously initialized figure
return self._fig
@figure.setter
def figure(self, figure):
self._fig = figure
class Modification(GraphMember, metaclass=ABCMeta):
"""
Modification is an abstract class, that implements methods to modify the
data of a `View` (`view_apply`) and adjust the parameters which control the
behaviour of the modifications applied.
Whenever one of the parameters needed to calculate the modification is
changed, the view, this modification is applied to, is informed.
`self.set_changed()` Has to be called upon any change of the modification
that influences the behaviour of `self.modify()`. In essence, these are all
parameters that are used to determine the modification. Therefore, this
should be called by all setters of the parameters/attributes.
Every subclass of Modification has to implement a constructor method
`self.__init__(self, **kwargs)`, which calls the superclasses' constructor
and sets the traces, the modification is applied to with the keyword
parameter `traces_apply`. An example could be:
super().__init__(traces_apply=['psdX', 'psdZ'], **kwargs)
"""
# set a graphical modification, which will, per default, do nothing
GRAPHICALMOD = GraphicalMod
def __init__(self, traces_apply=None, view_apply=None, view_based=None,
automatic_switch=False, datapoints=-1, **kwargs):
# Call the constructor of the superclass `GraphMember` and set the
# maximum allowed number of parents (`view_based`) and childs
# (`view_apply`) to one.
super().__init__(max_children=1, max_parents=1, **kwargs)
# A `Modification` has to be applied to a `View`!
if view_apply is None:
raise TypeError("Modification missing required positional argument"
" `view_apply`.")
# Set the view, from where the parameters for the modification are
# calculated from
if view_based is not None:
self.view_based = view_based
# Set the view, whose data is going to be modified
self.view_apply = view_apply
# Set the traces, which are modified by this `Modification`
self.traces_apply = traces_apply
# Initialize InteractiveAttributes object, which will hold all the
# parameters that the user should interact with.
self.iattributes = InteractiveAttributes()
# A checkbox to switch on/off the automatic determination of the
# parameters that are used to calculate the modification in the method
# `self.recalculate()`. The attribute `self.automatic` is checked in
# the method `self.recalculate()`. If `automatic` is True, the
# parameters are recalculated, otherwise the parameters are left
# unchanged. Whenever `automatic` is changed (by the user or
# automatically), `self.evaluate()` is called.
if automatic_switch:
self.add_iattribute('automatic', description='Automatic mode',
value=True, unset_automatic=False,
set_changed=False,
callback_functions=[self.evaluate])
# A checkbox to de-/activate this `Modification`. This attribute gets
# evaluated by `self.modify()`. If the `Modification` is active, it
# modifies data, otherwise not, i.e. modify() returns modified or
# unmodified original data, respectively.
desc = "".join((self.__class__.__name__, " active"))
self.add_iattribute('active', description=desc, value=True,
unset_automatic=False)
# Datapoints is used to calculate and/or present modification. The
# attribute `datapoints` is used to calculate a decimating factor and
# speed up the calculations and/or plot commands.
if datapoints > 0:
desc = "Datapoints to calculate/visualize modification"
self.add_iattribute('datapoints', description=desc,
value=datapoints, unset_automatic=False)
# Add a Button to manually call the method `self.evaluate()`.
self.add_iattribute('evaluate', description='Evaluate',
unset_automatic=False, set_changed=False,
callback_functions=[self.evaluate])
def add_iattribute(self, key, description=None, value=None,
unset_automatic=True, set_changed=True,
callback_functions=None, **kwargs):
"""
Add logic for automatic checkbox.
Register widget with unset_automatic=True
(-> Upon change of widget, unset automatic mode).
Change default behaviour by setting kwarg: unset_automatic = False
Add logic for triggering changed (calling self.set_changed).
Register widget with set_changed=True.
"""
if callback_functions is None:
callback_functions = []
if unset_automatic:
callback_functions.append(self._unset_automatic)
if set_changed:
callback_functions.append(self.set_changed)
self.iattributes.add(key, description=description, value=value,
callback_functions=callback_functions, **kwargs)
def _unset_automatic(self, leave_automatic=False, **kwargs):
"""
Add the logic for the automatic checkbox. If the value of an attribute
is changed and the attribute was created with `unset_automatic=True`,
deactivate the automatic mode (see `self.add_iattribute()`). To
temporarily leave the automatic mode status untouched when changing the
value of an attribute, i.e. not unset the automatic mode, set the value
of the attribute with the keyword argument `leave_automatic=True`
(see method `self.iattributes.set_value()`)
"""
if not leave_automatic:
self.iattributes.set_value('automatic', False, callback=False)
def evaluate(self):
"""
Implement the (re)calculation for the values necessary to calculate the
modification in the subclass and call recalculate() of the superclass
(this class).
"""
if self.updated:
# This method makes sure the modification is calculated with the
# current values of the View this modification is based on. It is
# called by self.modify().
# When a View requests data, it calls modify(), which in turn calls
# recalculate(). Recalculate(), if necessary, calls
# get_data_modified() from the View it is based on, which again
# triggers a call of modify() and a subsequent recalcaulte() of all
# modifications associated with this View.
# Modification need update, because view, this mod is based on,
# was changed.
# self._view_based.evaluate()is not needed, it is called via:
# recalculate() -> get_data_based() -> _view_based.get_data() ->
# get_modified_data() -> super().evaluate()
return
# Recalculate and print info of recalculated values if in automatic
# mode
if self.recalculate():
self.print_info()
# Update figure after recalculation has taken place
self.graphicalmod.update()
def recalculate(self):
# Check if recalculation of parameters is necessary
if self.updated:
return False
# Check the attribute self.automatic, whether the parameters needed for
# the calculation of the modification should be determined
# automatically or not. If values are set manually, no recalculation is
# necessary, and `self` is therefore up to date.
if not self.automatic:
self.updated = True
return True
# Recalculate the parameters, inform the view this `Modification`
# is applied to about the change, and set `self` to be updated.
self._recalculate()
self.set_changed(updated=True)
return True
def _recalculate(self):
"""
This method should be overwritten by subclasses and perform the
recalculation necessary to determine the parameters used by this
Modification to modify the data in `self._modify()`.
"""
pass
def print_info(self):
print("Values for Modification of class %s:"
% self.__class__.__name__)
if not self.automatic:
print(" Parameters set manually!")
for key, widget in self.iattributes._widgets.items():
if hasattr(widget, 'value'):
if isinstance(widget.value, float):
print(" %s: %.5f" % (widget.description, widget.value))
if isinstance(widget.value, collections.Iterable):
print(" %s: %s" % (widget.description, widget.value))
self._print_info()
def _print_info(self):
"""
This method should be overwritten by subclasses, which want to print
extra info additionally to the info of the calculated paremeters.
"""
pass
def modify(self, data, samples, traces_idx):
"""
Modifies data and returns the modified array.
Parameters
----------
data : 2D numpy.ndarray of type float
`data` holds the data to be modified
samples : index array or slice
`samples` is the index of the samples that was used to get the
`data`
traces : index array or slice
`traces` is the index of the traces that was used to get the `data`
"""
# Modification is active.
if self.active:
# Check if traces contained in data are modified by this
# modification.
data_traces = self.view_apply.idx_to_traces(traces_idx)
mod_traces = self.traces_apply
# Calculate the indices of traces contained in data and
# modification. First, calculate indices of modification traces.
mod_index = hp.overlap_index(mod_traces, data_traces)
if len(mod_index) > 0:
# At least one trace exists in both data and modification.
# Therefore, the data needs to be modified...
mod_index = hp.slicify(mod_index)
# Calculate indices of traces of the data in such a way that
# `data[:, data_index]` indexes the same traces as
# `self.traces_apply[mod_index]`
data_index = np.array([data_traces.index(trace)
for trace
in np.array(mod_traces)[mod_index]])
data_index = hp.slicify(data_index)
# Trigger a recalculation of the parameters for the
# modification (if necessary) before modifying the data.
self.evaluate()
# Modify and return the modified data
return self._modify(data=data,
samples=samples,
data_traces=data_traces,
data_index=data_index,
mod_index=mod_index)
# Return unmodified data
return data
@abstractmethod
def _modify(self, data, samples, data_traces, data_index, mod_index):
"""
Is called by self.modify() whenever data is requested and needs to be
modified.
Parameters
----------
data : 2D numpy.array()
Contains the data, indexed by samples and data_traces
samples : slice or 1D numpy.array()
Is the index of the samples contained in data, which was
given/asked by the user/process who called _get_data().
data_traces : list of str
Contains a list of traces (str) existent in data, which
was given/asked by the user/process who called _get_data().
data_index : slice or 1D numpy.array()
data[:, data_index] gives the data, which is modified by
this modification
mod_index : slice or 1D numpy.array()
np.array(self.traces_apply)[mod_index] gives the traces,
which are existent in data and also modified by this modfication.
Returns
-------
2D numpy.array()
The modified data.
"""
# modify data here, like so:
# data[:,data_index] -= modification[:,mod_index]
return data
@property
def updated(self):
return self._updated
@updated.setter
def updated(self, value):
"""
Gets set to True, after all `Views`, this `Modification` is based on,
have been updated and after this `Modification` has been recalculated.
This is automatically taken care of by `self.evaluate()` ->
`self.recalculate()`.
Gets called by a `View`, this `Modification` is based on, whenever the
`View` (a `Modification` of the `View`) has been changed. It
automatically informs its own `View`, that there was a change, by
calling `self.set_changed()`.
"""
self._updated = value
def member_changed(self, ancestor=True, calledfromself=False,
index_shift=None, **kwargs):
# If a change of an ancestor View or a MultiRegion was triggered by an
# index_shift, the modification needs to recalculate itself, i.e.
# the modification will alter its changeing behaviour. Because an
# index_shift change is only transmitted to `level=1`, inform the
# descendants of the change itself. A change of descendants is ignored.
if index_shift is not None and not calledfromself and ancestor:
self.set_changed(includeself=False)
# Update update status
super().member_changed(ancestor=ancestor,
calledfromself=calledfromself, **kwargs)
def _get_data(self, based=True, samples=None, traces=None, window=False,
decimate=False, copy=True):
if based:
view = self.view_based
else:
view = self.view_apply
if not isinstance(window, bool) and isinstance(window, int):
window = window
elif window:
window = self.decimate
else:
window = 1
if not isinstance(decimate, bool) and isinstance(decimate, int):
decimate = decimate
elif decimate:
decimate = self.decimate
else:
decimate = 1
if not based:
old_active = self.iattributes.active
self.iattributes.set_value('active', False, callback=False)
data = view.get_data(traces=traces, samples=samples,
moving_filter='mean', window=window,
decimate=decimate, copy=copy)
if not based:
self.iattributes.set_value('active', old_active, callback=False)
return data
def _get_data_based(self, samples=None, traces=None, window=False,
decimate=False, copy=True):
"""
decimate is False per default. If decimate is True, it only gets used,
if samples are set to None (step information in samples precedes over
decimate).
"""
return self._get_data(based=True, samples=samples, traces=traces,
window=window, decimate=decimate, copy=copy)
def _get_data_apply(self, samples=None, traces=None, window=False,
decimate=False, copy=True):
"""
Get data of view apply with all modifications applied, except self.
This is achieved by setting the self.__active flag to False.
self.__active is intentionally set directly by accessing the attribute
and not using the property/set_active() method, to prevent firing the
self.set_changed() method within the set_active() method.
decimate is False per default. If decimate is True, it only gets used,
if samples are set to None (step information in samples precedes over
decimate).
"""
return self._get_data(based=False, samples=samples, traces=traces,
window=window, decimate=decimate, copy=copy)
def calculate_bin_means(self, data=None, traces=None, bins=None,
datapoints_per_bin=None, sorttrace=0):
"""
Calculates binned means based on the data to be fitted. The binned
means are usually used by data fitting routines.
Parameters
----------
data : 2D numpy.ndarray of type float, optional
Defaults to `self._get_data_based(traces=traces, decimate=True)`.
traces : str or list of str, optional
Defaults to `self.traces_apply`.
bins : int, optional
Number of bins that contain the datapoints to be averaged. If
possible, it defaults to (`self.iattributes.datapoints` /
`datapoints_per_bin`), otherwise bins defaults to
(`self.view_based.datapoints` / `datapoints_per_bin`).
datapoints_per_bin : int, optional
Average number of datapoints to be averaged in one bin. Defaults to
25.
sorttrace : int, optional
Trace (column) of `data` that acts as sorting index upon binning
for the rest of the data. Defaults to the first trace of the data.
Returns
-------
1D numpy.ndarray of type float
The averaged bin values.
float
The size of one bin.
"""
# Bin data and average bins to prevent arbitrary weighting of bins with
# more datapoints
if bins is None:
bins = self._bins(datapoints_per_bin=datapoints_per_bin)
# get the traces to retrieve data from
if traces is None:
traces = self.traces_apply
# get the data to bin
if data is None:
data = self._get_data_based(traces=traces, decimate=True)
# create the bins based on one trace of the data
minimum = np.min(data[:, sorttrace])
maximum = np.max(data[:, sorttrace])
edges = np.linspace(minimum, maximum, bins + 1)
# Get the indices of the bins to which each value in input array
# belongs.
bin_idx = np.digitize(data[:, sorttrace], edges)
# Find which points are on the rightmost edge.
on_edge = data[:, sorttrace] == edges[-1]
# Shift these points one bin to the left.
bin_idx[on_edge] -= 1
# fill the bins with the means of the data contained in each bin
bin_means = np.array([data[bin_idx == i].mean(axis=0)
for i in range(1, bins + 1)
if np.any(bin_idx == i)])
bin_width = edges[1] - edges[0]
return bin_means, bin_width
def _bins(self, datapoints_per_bin=None):
# On average 25 datapoints per bin
datapoints_per_bin = datapoints_per_bin or 25
if 'datapoints' in self.iattributes:
bins = self.iattributes.datapoints / datapoints_per_bin
else:
bins = self.view_based.datapoints / datapoints_per_bin
bins = max(1, int(np.round(bins)))
return bins
_NAME = {
'position': ['positionX', 'positionY'],
'psd': ['psdX', 'psdY'],
'axis': ['X', 'Y']
}
def _excited(self, traces=None):
traces = traces or ['positionX', 'positionY']
data = self._get_data_based(traces=traces, copy=False)
return sn.get_excited_signal(data)
def interact(self):
self.recalculate()
self.iattributes.display()
self.graphicalmod.display()
@property
def graphicalmod(self):
# ZODB volatile
if not hasattr(self, '_v_graphicalmod'):
self._v_graphicalmod \
= self.__class__.GRAPHICALMOD(modification=self)
return self._v_graphicalmod
@property
def active(self):
active = False
if 'active' in self.iattributes:
active = self.iattributes.active
return active
@active.setter
def active(self, active=True):
if 'active' in self.iattributes:
self.iattributes.active = active
@property
def automatic(self):
# Does the modification automatically calculate its parameters
automatic = True
if 'automatic' in self.iattributes:
automatic = self.iattributes.automatic
return automatic
@property
def datapoints(self):
if 'datapoints' in self.iattributes:
return self.iattributes.datapoints
else:
return self.view_based.datapoints
@property
def decimate(self):
if 'datapoints' in self.iattributes:
return max(1, int(np.round(self.view_based.datapoints
/ self.datapoints)))
else:
return 1
@property
def view_based(self):
return self.parent
@property
def view_apply(self):
return self.child
@view_based.setter
def view_based(self, view):
self.set_parent(view)
@view_apply.setter
def view_apply(self, view):
self.set_child(view)
def lia(self, trace):
"""
Return the local index of trace in traces_apply
"""
return self.traces_apply.index(trace)
@property
def traces_apply(self):
# return a copy to protect local copy
return self._traces_apply.copy()
@traces_apply.setter
def traces_apply(self, traces):
if traces is None:
traces_apply = []
else:
traces_apply = tc.normalize(traces)
self._traces_apply = traces_apply
|
apache-2.0
| -1,852,494,407,888,080,000 | 37.757576 | 82 | 0.601102 | false |
dougbenjamin/panda-harvester
|
pandaharvester/harvestermisc/apfmon.py
|
1
|
16445
|
"""
API described here: http://apfmon.lancs.ac.uk/help
"""
import requests
import json
import time
import traceback
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestercore import core_utils
from pandaharvester import panda_pkg_info
from pandaharvester.harvestermisc import generic_utils
from pandaharvester.harvestercore.work_spec import WorkSpec
from pandaharvester.harvestermisc.info_utils import PandaQueuesDict
_base_logger = core_utils.setup_logger('apfmon')
NO_CE = 'noCE'
def apfmon_active(method, *args, **kwargs):
if cls.__active:
method(*args, **kwargs)
else:
return
def clean_ce(ce):
return ce.split('.')[0].split('://')[-1]
class Apfmon(object):
def __init__(self, queue_config_mapper):
try:
self.__active = harvester_config.apfmon.active
except:
self.__active = False
try:
self.__worker_timeout = harvester_config.apfmon.worker_timeout
except:
self.__worker_timeout = 0.5
try:
self.__worker_update_timeout = harvester_config.apfmon.worker_timeout
except:
self.__worker_update_timeout = 0.2
try:
self.__label_timeout = harvester_config.apfmon.worker_timeout
except:
self.__label_timeout = 1
# TODO: make proper exception handling and defaults
try:
self.harvester_id = harvester_config.master.harvester_id
except:
self.harvester_id = 'DUMMY'
try:
self.base_url = harvester_config.apfmon.base_url
except:
self.base_url = 'http://apfmon.lancs.ac.uk/api'
self.queue_config_mapper = queue_config_mapper
def create_factory(self):
"""
Creates or updates a harvester instance to APF Mon. Should be done at startup of the instance.
"""
start_time = time.time()
tmp_log = core_utils.make_logger(_base_logger, 'harvester_id={0}'.format(self.harvester_id),
method_name='create_factory')
if not self.__active:
tmp_log.debug('APFMon reporting not enabled')
return
try:
tmp_log.debug('start')
url = '{0}/factories/{1}'.format(self.base_url, self.harvester_id)
f = {'url': 'url_to_logs',
'email': 'atlas-adc-harvester-central-support@cern.ch',
'version': panda_pkg_info.release_version}
payload = json.dumps(f)
r = requests.put(url, data=payload, timeout=self.__label_timeout)
tmp_log.debug('registration ended with {0} {1}'.format(r.status_code, r.text))
end_time = time.time()
tmp_log.debug('done (took {0})'.format(end_time - start_time))
except:
tmp_log.error('Excepted with: {0}'.format(traceback.format_exc()))
def create_labels(self):
"""
Creates or updates a collection of labels (=panda queue+CE)
"""
start_time = time.time()
tmp_log = core_utils.make_logger(_base_logger, 'harvester_id={0}'.format(self.harvester_id),
method_name='create_labels')
if not self.__active:
tmp_log.debug('APFMon reporting not enabled')
return
try:
tmp_log.debug('start')
url = '{0}/labels'.format(self.base_url)
# get the active queues from the config mapper
all_sites = self.queue_config_mapper.get_active_queues().keys()
panda_queues_dict = PandaQueuesDict()
# publish the active queues to APF mon in shards
for sites in generic_utils.create_shards(all_sites, 20):
labels = []
for site in sites:
try:
site_info = panda_queues_dict.get(site, dict())
if not site_info:
tmp_log.warning('No site info for {0}'.format(site))
continue
# when no CEs associated to a queue, e.g. P1, HPCs, etc. Try to see if there is something
# in local configuration, otherwise set it to a dummy value
try:
ce = self.queue_config_mapper.queueConfig[site].submitter['ceEndpoint']
queues = [{'ce_endpoint': ce}]
except KeyError:
if site_info['queues']:
queues = site_info['queues']
else:
queues = [{'ce_endpoint': NO_CE}]
for queue in queues:
try:
ce = clean_ce(queue['ce_endpoint'])
except:
ce = ''
try:
ce_queue_id = queue['ce_queue_id']
except KeyError:
ce_queue_id = 0
labels.append({'name': '{0}-{1}'.format(site, ce),
'wmsqueue': site,
'ce_queue_id': ce_queue_id,
'factory': self.harvester_id})
except:
tmp_log.error('Excepted for site {0} with: {1}'.format(site, traceback.format_exc()))
continue
payload = json.dumps(labels)
r = requests.put(url, data=payload, timeout=self.__label_timeout)
tmp_log.debug('label creation for {0} ended with {1} {2}'.format(sites, r.status_code, r.text))
end_time = time.time()
tmp_log.debug('done (took {0})'.format(end_time - start_time))
except:
tmp_log.error('Excepted with: {0}'.format(traceback.format_exc()))
def massage_label_data(self, data):
tmp_log = core_utils.make_logger(_base_logger, 'harvester_id={0}'.format(self.harvester_id),
method_name='massage_label_data')
if not data:
return data
try:
any = data['ANY']
agg = {}
for rtype in data:
if rtype == 'ANY':
continue
else:
for value in data[rtype]:
agg.setdefault(value, 0)
agg[value] += data[rtype][value]
if agg:
data['ANY'] = agg
else:
data['ANY'] = any
tmp_log.debug('Massaged to data: {0}'.format(data))
except Exception:
tmp_log.debug('Exception in data: {0}'.format(data))
return data
def update_label(self, site, msg, data):
"""
Updates a label (=panda queue+CE)
"""
start_time = time.time()
tmp_log = core_utils.make_logger(_base_logger, 'harvester_id={0}'.format(self.harvester_id),
method_name='update_label')
if not self.__active:
tmp_log.debug('APFMon reporting not enabled')
return
try:
tmp_log.debug('start')
data = self.massage_label_data(data)
# get the active queues from the config mapper
all_sites = self.queue_config_mapper.get_active_queues().keys()
panda_queues_dict = PandaQueuesDict()
site_info = panda_queues_dict.get(site, dict())
if not site_info:
tmp_log.warning('No site info for {0}'.format(site))
return
# when no CEs associated to a queue, e.g. P1, HPCs, etc. Try to see if there is something
# in local configuration, otherwise set it to a dummy value
try:
ce = self.queue_config_mapper.queueConfig[site].submitter['ceEndpoint']
queues = [{'ce_endpoint': ce}]
except KeyError:
if site_info['queues']:
queues = site_info['queues']
else:
queues = [{'ce_endpoint': NO_CE}]
for queue in queues:
try:
try:
ce = clean_ce(queue['ce_endpoint'])
except:
ce = ''
label_data = {'status': msg, 'data': data}
label = '{0}-{1}'.format(site, ce)
label_id = '{0}:{1}'.format(self.harvester_id, label)
url = '{0}/labels/{1}'.format(self.base_url, label_id)
r = requests.post(url, data=json.dumps(label_data), timeout=self.__label_timeout)
tmp_log.debug('label update for {0} ended with {1} {2}'.format(label, r.status_code, r.text))
except:
tmp_log.error('Excepted for site {0} with: {1}'.format(label, traceback.format_exc()))
end_time = time.time()
tmp_log.debug('done (took {0})'.format(end_time - start_time))
except:
tmp_log.error('Excepted with: {0}'.format(traceback.format_exc()))
def create_workers(self, worker_spec_list):
"""
Creates a worker
"""
start_time = time.time()
tmp_log = core_utils.make_logger(_base_logger, 'harvester_id={0}'.format(self.harvester_id),
method_name='create_workers')
if not self.__active:
tmp_log.debug('APFMon reporting not enabled')
return
try:
tmp_log.debug('start')
url = '{0}/jobs'.format(self.base_url)
for worker_spec_shard in generic_utils.create_shards(worker_spec_list, 20):
apfmon_workers = []
for worker_spec in worker_spec_shard:
batch_id = worker_spec.batchID
worker_id = worker_spec.workerID
if not batch_id:
tmp_log.debug('no batchID found for workerID {0}... skipping'.format(worker_id))
continue
factory = self.harvester_id
computingsite = worker_spec.computingSite
try:
ce = clean_ce(worker_spec.computingElement)
except AttributeError:
tmp_log.debug('no CE found for workerID {0} batchID {1}'.format(worker_id, batch_id))
ce = NO_CE
# extract the log URLs
stdout_url = ''
stderr_url = ''
log_url = ''
jdl_url = ''
work_attribs = worker_spec.workAttributes
if work_attribs:
if 'stdOut' in work_attribs:
stdout_url = work_attribs['stdOut']
jdl_url = '{0}.jdl'.format(stdout_url[:-4])
if 'stdErr' in work_attribs:
stderr_url = work_attribs['stdErr']
if 'batchLog' in work_attribs:
log_url = work_attribs['batchLog']
apfmon_worker = {'cid': batch_id,
'factory': factory,
'label': '{0}-{1}'.format(computingsite, ce),
'jdlurl': jdl_url,
'stdouturl': stdout_url,
'stderrurl': stderr_url,
'logurl': log_url
}
tmp_log.debug('packed worker: {0}'.format(apfmon_worker))
apfmon_workers.append(apfmon_worker)
payload = json.dumps(apfmon_workers)
try:
r = requests.put(url, data=payload, timeout=self.__worker_timeout)
tmp_log.debug('worker creation for {0} ended with {1} {2}'.format(apfmon_workers, r.status_code, r.text))
except:
tmp_log.debug(
'worker creation for {0} failed with'.format(apfmon_workers, format(traceback.format_exc())))
end_time = time.time()
tmp_log.debug('done (took {0})'.format(end_time - start_time))
except:
tmp_log.error('Excepted with: {0}'.format(traceback.format_exc()))
def convert_status(self, harvester_status):
"""
convert harvester status to APFMon status
:param harvester_status
:return: list with apfmon_status. Usually it's just one status, except for exiting&done
"""
if harvester_status == 'submitted':
return 'created'
if harvester_status in ['running', 'idle']:
return 'running'
if harvester_status in ['missed', 'failed', 'cancelled']:
return 'fault'
if harvester_status == 'finished':
return 'done'
def update_worker(self, worker_spec, worker_status):
"""
Updates the state of a worker. This can also be done directly from the wrapper, assuming there is outbound
connectivity on the worker node
"""
start_time = time.time()
tmp_log = core_utils.make_logger(_base_logger, 'harvester_id={0}'.format(self.harvester_id),
method_name='update_worker')
if not self.__active:
tmp_log.debug('APFMon reporting not enabled')
return
try:
tmp_log.debug('start')
batch_id = worker_spec.batchID
factory = self.harvester_id
url = '{0}/jobs/{1}:{2}'.format(self.base_url, factory, batch_id)
apfmon_status = self.convert_status(worker_status)
apfmon_worker = {}
apfmon_worker['state'] = apfmon_status
# For final states include panda id's if available (push mode only)
if apfmon_status in ('fault', 'done') and hasattr(worker_spec, 'pandaid_list') and worker_spec.pandaid_list:
apfmon_worker['ids'] = ','.join(str(x) for x in worker_spec.pandaid_list)
tmp_log.debug('updating worker {0}: {1}'.format(batch_id, apfmon_worker))
r = requests.post(url, data=apfmon_worker, timeout=self.__worker_update_timeout)
tmp_log.debug('worker update for {0} ended with {1} {2}'.format(batch_id, r.status_code, r.text))
end_time = time.time()
tmp_log.debug('done (took {0})'.format(end_time - start_time))
except:
tmp_log.error('Excepted with: {0}'.format(traceback.format_exc()))
if __name__== "__main__":
"""
Quick tests
"""
from pandaharvester.harvestercore.queue_config_mapper import QueueConfigMapper
queue_config_mapper = QueueConfigMapper()
apfmon = Apfmon(queue_config_mapper)
apfmon.create_factory()
apfmon.create_labels()
worker_a = WorkSpec()
worker_a.batchID = 1
worker_a.computingSite = 'CERN-PROD-DEV_UCORE'
worker_a.computingElement = 'bla1'
worker_a.workAttributes = {"batchLog": "https://aipanda024.cern.ch/condor_logs/18-07-19_09/grid.9659.0.log", "stdErr": "https://aipanda024.cern.ch/condor_logs/18-07-19_09/grid.9659.0.err", "stdOut": "https://aipanda024.cern.ch/condor_logs/18-07-19_09/grid.9659.0.out"}
worker_a.pandaid_list = [1234, 5678]
worker_b = WorkSpec()
worker_b.batchID = 2
worker_b.computingSite = 'CERN-PROD-DEV_UCORE'
worker_b.computingElement = 'bla2'
worker_b.workAttributes = {"batchLog": "https://aipanda024.cern.ch/condor_logs/18-07-19_09/grid.9659.0.log", "stdErr": "https://aipanda024.cern.ch/condor_logs/18-07-19_09/grid.9659.0.err", "stdOut": "https://aipanda024.cern.ch/condor_logs/18-07-19_09/grid.9659.0.out"}
workers = [worker_a, worker_b]
apfmon.create_workers(workers)
worker_a.status = 'running'
worker_b.status = 'running'
apfmon.update_workers(workers)
worker_a.status = 'finished'
worker_b.status = 'failed'
apfmon.update_workers(workers)
|
apache-2.0
| 1,477,390,547,732,146,700 | 38.24821 | 272 | 0.515172 | false |
willingc/gentledocs
|
config/settings/common.py
|
1
|
8186
|
# -*- coding: utf-8 -*-
"""
Django settings for Gentle Docs project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('gentledocs')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'gentledocs.users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'gentledocs.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Carol Willing""", 'carolcode@willingconsulting.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///gentledocs"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
|
bsd-3-clause
| 7,349,928,663,983,266,000 | 34.903509 | 98 | 0.607256 | false |
kipe/miplant
|
miplant/miplant.py
|
1
|
4030
|
# -*- encoding: utf-8 -*-
from bluepy import btle
import logging
from math import isnan
class MiPlant(object):
def __init__(self, address, interface_index=0):
'''
Initializes the MiPlant -object.
Parameters
----------
address : string
The MAC-address of the device.
interface_index : string
The bluetooth device index to use for reading, defaults to 0.
'''
self.address = address
self.interface_index = interface_index
self._log = logging.getLogger('MiPlant')
self._battery = 0
self._firmware = ''
self._temperature = float('nan')
self._light = float('nan')
self._moisture = float('nan')
self._conductivity = float('nan')
def __repr__(self):
return "<MiPlant '%s'>" % (self.address)
def read(self):
'''
Read the sensor with gattlib.
Returns
-------
success : boolean
True if the data was successfully received, otherwise False.
'''
try:
peripheral = btle.Peripheral(self.address, iface=self.interface_index)
peripheral.writeCharacteristic(0x33, bytearray([0xA0, 0x1F]), withResponse=True)
received_bytes = bytearray(peripheral.readCharacteristic(0x38))
self._battery = received_bytes[0]
self._firmware = ''.join([chr(x) for x in received_bytes[1:7]])
received_bytes = bytearray(peripheral.readCharacteristic(0x35))
self._temperature = float(received_bytes[1] * 256 + received_bytes[0]) / 10
self._light = received_bytes[4] * 256 + received_bytes[3]
self._moisture = received_bytes[7]
self._conductivity = received_bytes[9] * 256 + received_bytes[8]
return True
except:
return False
@property
def battery(self):
''' Get battery level in percents (?), calls read-function if read yet. '''
if self._battery == 0:
self.read()
return self._battery
@property
def firmware(self):
''' Get firmware as a string, calls read-function if read yet. '''
if self._firmware == '':
self.read()
return self._firmware
@property
def temperature(self):
''' Get temperature value in degrees Celsius, calls read-function if read yet. '''
if isnan(self._temperature):
self.read()
return self._temperature
@property
def light(self):
''' Get light level in lux, calls read-function if read yet. '''
if isnan(self._light):
self.read()
return self._light
@property
def moisture(self):
''' Get moisture level in percent, calls read-function if read yet. '''
if isnan(self._moisture):
self.read()
return self._moisture
@property
def conductivity(self):
''' Get conductivity in µS/cm, calls read-function if read yet. '''
if isnan(self._conductivity):
self.read()
return self._conductivity
@staticmethod
def discover(interface_index=0, timeout=2):
'''
Discover devices.
Only does basic checking by comparing name of the device to the default name.
Parameters
----------
device : string
The bluetooth device to use for discovery, defaults to 'hci0'.
timeout : int
Timeout for searching the devices, defaults to 2.
Returns
-------
devices : list of MiPlant -objects
A list of MiPlant -objects corresponding to the devices found.
'''
return [
MiPlant(device.addr, interface_index=interface_index)
for device in btle.Scanner(interface_index).scan(timeout)
if device.addr.startswith('c4:7c') and len([[1 for x in device.getScanData() if x[1] == 'Complete Local Name' and x[2] == 'Flower mate']]) != 0
]
|
mit
| -1,960,080,027,998,492,200 | 30.724409 | 155 | 0.573591 | false |
monikagrabowska/osf.io
|
kinto/kinto/core/errors.py
|
1
|
7950
|
import six
from pyramid import httpexceptions
from enum import Enum
from kinto.core.logs import logger
from kinto.core.utils import json, reapply_cors, encode_header
class ERRORS(Enum):
"""Predefined errors as specified by the API.
+-------------+-------+------------------------------------------------+
| Status code | Errno | Description |
+=============+=======+================================================+
| 401 | 104 | Missing Authorization Token |
+-------------+-------+------------------------------------------------+
| 401 | 105 | Invalid Authorization Token |
+-------------+-------+------------------------------------------------+
| 400 | 106 | request body was not valid JSON |
+-------------+-------+------------------------------------------------+
| 400 | 107 | invalid request parameter |
+-------------+-------+------------------------------------------------+
| 400 | 108 | missing request parameter |
+-------------+-------+------------------------------------------------+
| 400 | 109 | invalid posted data |
+-------------+-------+------------------------------------------------+
| 404 | 110 | Invalid Token / id |
+-------------+-------+------------------------------------------------+
| 404 | 111 | Missing Token / id |
+-------------+-------+------------------------------------------------+
| 411 | 112 | Content-Length header was not provided |
+-------------+-------+------------------------------------------------+
| 413 | 113 | Request body too large |
+-------------+-------+------------------------------------------------+
| 412 | 114 | Resource was modified meanwhile |
+-------------+-------+------------------------------------------------+
| 405 | 115 | Method not allowed on this end point |
+-------------+-------+------------------------------------------------+
| 404 | 116 | Requested version not available on this server |
+-------------+-------+------------------------------------------------+
| 429 | 117 | Client has sent too many requests |
+-------------+-------+------------------------------------------------+
| 403 | 121 | Resource's access forbidden for this user |
+-------------+-------+------------------------------------------------+
| 409 | 122 | Another resource violates constraint |
+-------------+-------+------------------------------------------------+
| 500 | 999 | Internal Server Error |
+-------------+-------+------------------------------------------------+
| 503 | 201 | Service Temporary unavailable due to high load |
+-------------+-------+------------------------------------------------+
| 410 | 202 | Service deprecated |
+-------------+-------+------------------------------------------------+
"""
MISSING_AUTH_TOKEN = 104
INVALID_AUTH_TOKEN = 105
BADJSON = 106
INVALID_PARAMETERS = 107
MISSING_PARAMETERS = 108
INVALID_POSTED_DATA = 109
INVALID_RESOURCE_ID = 110
MISSING_RESOURCE = 111
MISSING_CONTENT_LENGTH = 112
REQUEST_TOO_LARGE = 113
MODIFIED_MEANWHILE = 114
METHOD_NOT_ALLOWED = 115
VERSION_NOT_AVAILABLE = 116
CLIENT_REACHED_CAPACITY = 117
FORBIDDEN = 121
CONSTRAINT_VIOLATED = 122
UNDEFINED = 999
BACKEND = 201
SERVICE_DEPRECATED = 202
def http_error(httpexception, errno=None,
code=None, error=None, message=None, info=None, details=None):
"""Return a JSON formated response matching the error HTTP API.
:param httpexception: Instance of :mod:`~pyramid:pyramid.httpexceptions`
:param errno: stable application-level error number (e.g. 109)
:param code: matches the HTTP status code (e.g 400)
:param error: string description of error type (e.g. "Bad request")
:param message: context information (e.g. "Invalid request parameters")
:param info: information about error (e.g. URL to troubleshooting)
:param details: additional structured details (conflicting record)
:returns: the formatted response object
:rtype: pyramid.httpexceptions.HTTPException
"""
errno = errno or ERRORS.UNDEFINED
if isinstance(errno, Enum):
errno = errno.value
# Track error number for request summary
logger.bind(errno=errno)
body = {
"code": code or httpexception.code,
"errno": errno,
"error": error or httpexception.title
}
if message is not None:
body['message'] = message
if info is not None:
body['info'] = info
if details is not None:
body['details'] = details
response = httpexception
response.body = json.dumps(body).encode("utf-8")
response.content_type = 'application/json'
return response
def json_error_handler(errors):
"""Cornice JSON error handler, returning consistant JSON formatted errors
from schema validation errors.
This is meant to be used is custom services in your applications.
.. code-block:: python
upload = Service(name="upload", path='/upload',
error_handler=errors.json_error_handler)
.. warning::
Only the first error of the list is formatted in the response.
(c.f. HTTP API).
"""
assert len(errors) != 0
sorted_errors = sorted(errors, key=lambda x: six.text_type(x['name']))
error = sorted_errors[0]
name = error['name']
description = error['description']
if isinstance(description, six.binary_type):
description = error['description'].decode('utf-8')
if name is not None:
if name in description:
message = description
else:
message = '%(name)s in %(location)s: %(description)s' % error
else:
message = '%(location)s: %(description)s' % error
response = http_error(httpexceptions.HTTPBadRequest(),
code=errors.status,
errno=ERRORS.INVALID_PARAMETERS.value,
error='Invalid parameters',
message=message,
details=errors)
response.status = errors.status
response = reapply_cors(errors.request, response)
return response
def raise_invalid(request, location='body', name=None, description=None,
**kwargs):
"""Helper to raise a validation error.
:param location: location in request (e.g. ``'querystring'``)
:param name: field name
:param description: detailed description of validation error
:raises: :class:`~pyramid:pyramid.httpexceptions.HTTPBadRequest`
"""
request.errors.add(location, name, description, **kwargs)
request.errors.request = request # Needed by json_error_handler to reapply_cors
response = json_error_handler(request.errors)
raise response
def send_alert(request, message=None, url=None, code='soft-eol'):
"""Helper to add an Alert header to the response.
:param code: The type of error 'soft-eol', 'hard-eol'
:param message: The description message.
:param url: The URL for more information, default to the documentation url.
"""
if url is None:
url = request.registry.settings['project_docs']
request.response.headers['Alert'] = encode_header(json.dumps({
'code': code,
'message': message,
'url': url
}))
|
apache-2.0
| -8,970,654,734,963,827,000 | 40.19171 | 84 | 0.470189 | false |
apophys/freeipa
|
ipatests/test_ipapython/test_session_storage.py
|
1
|
1069
|
#
# Copyright (C) 2017 FreeIPA Contributors see COPYING for license
#
"""
Test the `session_storage.py` module.
"""
import pytest
from ipapython import session_storage
@pytest.mark.skip_ipaclient_unittest
@pytest.mark.needs_ipaapi
class test_session_storage(object):
"""
Test the session storage interface
"""
def setup(self):
# TODO: set up test user and kinit to it
# tmpdir = tempfile.mkdtemp(prefix = "tmp-")
# os.environ['KRB5CCNAME'] = 'FILE:%s/ccache' % tmpdir
self.principal = 'admin'
self.key = 'X-IPA-test-session-storage'
self.data = b'Test Data'
def test_01(self):
session_storage.store_data(self.principal, self.key, self.data)
def test_02(self):
data = session_storage.get_data(self.principal, self.key)
assert(data == self.data)
def test_03(self):
session_storage.remove_data(self.principal, self.key)
try:
session_storage.get_data(self.principal, self.key)
except session_storage.KRB5Error:
pass
|
gpl-3.0
| 8,177,739,086,821,916,000 | 25.725 | 71 | 0.638915 | false |
natukikazemizo/sedna
|
Sedna/src/python/recasting/grep_object.py
|
1
|
5780
|
#!BPY
# -*- coding: UTF-8 -*-
# Set IK Limits
#
# 2016.01.17 Natukikazemizo
import bpy
import math
import re
# Using Regular expression
#"[0]" 1文字が0
#"[0-9]" 1文字が0,1,2,3,4,5,6,7,8,9のいずれか
#"[02468]" 1文字が偶数
#"[BCD]" 1文字がB,C,Dのいずれか
#"[B-D]" 1文字がB,C,Dのいずれか
#"[0-2A-C]" 1文字が0,1,2,A,B,Cのいずれか
#"[^12]" 1文字が1,2以外
#"\w" 1文字が英数字
#"\W" 1文字が英数字以外
#"\d" 1文字が数字
#"\D" 1文字が数字以外
#"\s" 1文字が空白
#"\S" 1文字が空白以外
#"." 1文字が任意の1文字(改行文字以外の)
#"\t" 1文字がタブ
#"\n" 1文字が改行
#"A*" Aが連続で0個以上
#"A+" Aが連続で1個以上
#/AB?/ BがAの次に0個または1個あるか
#/z{4}$/ zが連続で4個あって、データ末尾か?
#/z{3,}/ zが連続で3個以上あるか?
#/(ABC){2,4} 2回以上4回以下のABCの繰り返しになっているか?
#"(?:.)" ()を記憶しない
#/(?:a.b){1,3}/ a.b a.ba.b a.ba.ba.b いずれかに一致
#/\bABC\b/ ABCが単語単位か?
#"\." .があるか?
#"\\" \があるか?
#"\*" *があるか?
#"\?" ?があるか?
#"\+" +があるか?
#"\^" ^があるか?
#"(HELLO).*\1" HELLOが行中にもう1回出現するか?
#"(HELLO)(SEEU).*\2" SEEUが行中にもう1回出現するか?
# At pose mode Selected bones: bpy.context.selected_pose_bones:
# Way of selecting objects is Description in here
# https://sites.google.com/site/matosus304blendernotes/home/blender-python-script
##選択しているボーンに対して
#for x in bpy.context.selected_bones:
#選択しているボーンに対して(エディットモードで使用)
#for x in bpy.context.selected_editable_bones:
#選択しているボーンに対して(ポーズモードで使用)
#for x in bpy.context.selected_pose_bones:
#選択しているオブジェクトのすべてのボーンに対して
#for x in bpy.context.active_object.pose.bones:
#選択オブジェクトのボーングループに対して
#obj = bpy.context.active_object.pose
#ボーン関連のコードです。
#Blenderのボーンデータには、ボーンそのものを表すデータ(bones)と、エディットモードで編集できるのと同等の操作ができるエディットボーン、ボーンのポーズを表すデータ(pose)とがあります。混同しやすいので注意してください。
#なお、ポーズからは".bone"でボーンデータにアクセスできるのに対して、逆にボーンデータからポーズにそのままアクセスはできない様です。
#ボーンに対して
##選択しているボーンに対して
#for x in bpy.context.selected_bones:
# print(x.name) #ボーン名を表示
# x.use_deform = False #ボーンによる変形を行わない
# x.hide = True #非表示にする
##選択しているオブジェクトのすべてのボーンに対して
#for x in bpy.context.active_object.data.bones:
# print(x.name) #ボーン名を表示
#エディットボーンに対して
##選択しているボーンに対して(エディットモードで使用)
#for x in bpy.context.selected_editable_bones:
# print(x.name) #ボーン名を表示
# x.head = (0,0,0) #ボーンのヘッド位置を変更
# x.tail = (0,0,1) #ボーンのテール位置を変更
##選択しているオブジェクトのすべてのボーンに対して
#for x in bpy.context.active_object.data.edit_bones:
# print(x.name) #ボーン名を表示
#ポーズに対して
##選択しているボーンに対して(ポーズモードで使用)
#for x in bpy.context.selected_pose_bones:
# print(x.name) #ボーン名を表示
# x.location = (0,0,0) #位置をセット
# x.rotation_quaternion = (1,0,0,0) #クオータニオン回転をセット
# x.custom_shape #カスタムシェイプ
# x.bone_group = bpy.context.active_object.pose.bone_groups["Group"] #ボーングループを設定する
# x.bone #ボーンデータにアクセス
##選択しているオブジェクトのすべてのボーンに対して
#for x in bpy.context.active_object.pose.bones:
# print(x.name) #ボーン名を表示
# if x.bone_group != None and x.bone_group.name == "Group": #指定したボーングループに属するボーンに対して
# print(x.name)
##選択オブジェクトのボーングループに対して
#obj = bpy.context.active_object.pose
#obj.bone_groups[0].color_set = "CUSTOM" #カラーセットをカスタム(任意のカラーに設定)に変更
##bpy.ops.pose
#bpy.ops.pose.group_add() #ボーングループを追加
#コンストレインに対して
##選択しているボーンに対して(ポーズモードで使用)
#pose_bone = bpy.context.active_pose_bone
#pose_bone.constraints[0] #コンストレイントにアクセス
#new_constraint = pose_bone.constraints.new(type="COPY_SCALE") #新しいコンストレインを追加
from . import debug
if __name__ == "__main__":
debug.startdebug()
print("##### START #####")
p=re.compile(r".*(0).*\1")
for x in bpy.context.selected_pose_bones:
if p.match(x.name):
print(x.name)
print("##### END #####")
|
mit
| 7,223,568,034,813,345,000 | 27.887218 | 117 | 0.598126 | false |
702nADOS/sumo
|
tools/contributed/sumopy/coremodules/network/network.py
|
1
|
164969
|
# size limit at 1280x1280
# http://maps.googleapis.com/maps/api/staticmap?size=500x500&path=color:0x000000|weight:10|44.35789,11.3093|44.4378,11.3935&format=GIF&maptype=satellite&scale=2
import os
import sys
import subprocess
import platform
from xml.sax import saxutils, parse, handler
if __name__ == '__main__':
try:
APPDIR = os.path.dirname(os.path.abspath(__file__))
except:
APPDIR = os.path.dirname(os.path.abspath(sys.argv[0]))
SUMOPYDIR = os.path.join(APPDIR, '..', '..')
sys.path.append(os.path.join(SUMOPYDIR))
import time
import numpy as np
from collections import OrderedDict
import agilepy.lib_base.classman as cm
import agilepy.lib_base.arrayman as am
import agilepy.lib_base.xmlman as xm
from agilepy.lib_base.processes import Process, CmlMixin, P
from agilepy.lib_base.geometry import *
from agilepy.lib_base.misc import filepathlist_to_filepathstring, filepathstring_to_filepathlist
#TESTNODES = [294,295]
MODES = OrderedDict([
("ignoring", 0),
("pedestrian", 1),
("bicycle", 2),
("motorcycle", 3),
("passenger", 4),
("bus", 5),
("tram", 6),
("rail_urban", 7),
("delivery", 8),
("private", 9),
("taxi", 10),
("hov", 11),
("evehicle", 12),
("emergency", 13),
("authority", 14),
("army", 15),
("vip", 16),
("coach", 17),
("truck", 18),
("trailer", 19),
("rail", 20),
("rail_electric", 21),
("moped", 22),
("custom1", 23),
("custom2", 24),
("ship", 25),
])
OSMEDGETYPE_TO_MODES = {'highway.cycleway': ([MODES['bicycle']], 5.6),
'highway.pedestrian': ([MODES['pedestrian']], 0.8),
'highway.footway': ([MODES['pedestrian']], 0.8),
'highway.path': ([MODES['pedestrian'], MODES['bicycle']], 5.6),
'highway.service': ([MODES['delivery'], MODES['bicycle']], 13.8),
}
class SumoIdsConf(am.ArrayConf):
"""
Sumo id array coniguration
"""
# def __init__(self, **attrs):
# print 'ColConf',attrs
def __init__(self, refname, name=None, info=None, perm='rw', xmltag='id'):
if name == None:
name = 'ID ' + refname
if info == None:
info = refname + ' ID of SUMO network'
am.ArrayConf.__init__(self, attrname='ids_sumo', default='',
dtype='object',
perm=perm,
is_index=True,
name=name,
info=info,
xmltag=xmltag,
)
class Modes(am.ArrayObjman):
# http://www.sumo.dlr.de/userdoc/Networks/Building_Networks_from_own_XML-descriptions.html#Edge_Descriptions
def __init__(self, parent, **kwargs):
ident = 'modes'
self._init_objman(ident=ident, parent=parent, name='Transport Modes',
xmltag=('vClasses', 'vClass', 'names'),
**kwargs)
self.add_col(am.ArrayConf('names', '',
dtype=np.object,
perm='r',
is_index=True,
name='Name',
info='Name of mode. Used as key for implementing acces restrictions on edges as well as demand modelling.',
xmltag='vClass',
))
def get_id_mode(self, modename):
return self.names.get_id_from_index(modename)
class TrafficLightProgram(am.ArrayObjman):
def __init__(self, ident, parent, **kwargs):
self._init_objman(ident, parent=parent,
name='TLL Program',
info='Signale phases of a traffic light program.',
xmltag=('', 'phase', ''), **kwargs)
self.add_col(am.NumArrayConf('durations', 0,
dtype=np.int32,
name='Duration',
unit='s',
info='The duration of the phase.',
xmltag='duration',
))
self.add_col(am.NumArrayConf('durations_min', 0,
dtype=np.int32,
name='Min. duration',
unit='s',
info='The minimum duration of the phase when using type actuated. Optional, defaults to duration.',
xmltag='minDur',
))
self.add_col(am.NumArrayConf('durations_max', 0,
dtype=np.int32,
name='Max. duration',
unit='s',
info='The maximum duration of the phase when using type actuated. Optional, defaults to duration.',
xmltag='maxDur',
))
self.add_col(am.ArrayConf('states', None,
dtype=np.object,
perm='rw',
name='State',
info="The traffic light states for this phase. Values can be one of these characters: 'r'=red, 'y'=yellow, 'g'=green give priority, 'G'=Green always priority, 'o'=blinking ,'O'=TLS switched off",
xmltag='state',
))
def add_multi(self, **kwargs):
# print 'add_multi',self.ident
# print ' durations',kwargs.get('durations',None)
# print ' durations_min',kwargs.get('durations_min',None)
# print ' durations_max',kwargs.get('durations_max',None)
# print ' states',kwargs.get('states',None)
return self.add_rows(durations=kwargs.get('durations', None),
durations_min=kwargs.get('durations_min', None),
durations_max=kwargs.get('durations_max', None),
states=kwargs.get('states', None),
)
# def write_xml(self, fd, indent, **kwargs):
#
# # never print begin-end tags
# # this could go into xml config
# if kwargs.has_key('is_print_begin_end'):
# del kwargs['is_print_begin_end']
# am.ArrayObjman.write_xml(self, fd, indent,is_print_begin_end = False,**kwargs)
class TrafficLightLogics(am.ArrayObjman):
def __init__(self, ident, tlss, **kwargs):
self._init_objman(ident, parent=tlss,
name='Traffic Light Logics',
info='Traffic light Logics (TLLs) for Trafic Light Systems (TLSs).',
xmltag=('tlLogics', 'tlLogic', 'ids_tls'),
**kwargs)
self.add_col(am.IdsArrayConf('ids_tls', tlss,
groupnames=['state'],
name='ID tls',
info='ID of traffic light system. Typically the id for a traffic light is identical with the junction id. The name may be obtained by right-clicking the red/green bars in front of a controlled intersection.',
# this will be ID TLS tag used as ID in
# xml file
xmltag='id',
))
self.add_col(am.ArrayConf('ids_prog', '',
dtype=np.object,
perm='rw',
name='Prog ID',
info='Sumo program ID, which is unique within the same traffic light system.',
xmltag='programID',
))
self.add_col(am.ArrayConf('ptypes', 1,
choices={
"static": 1,
"actuated": 2,
},
dtype=np.int32,
perm='rw',
name='Prog. type',
info='The type of the traffic light program (fixed phase durations, phase prolongation based time gaps between vehicles).',
xmltag='type',
))
self.add_col(am.NumArrayConf('offsets', 0,
dtype=np.int32,
perm='rw',
name='Offset',
unit='s',
info='The initial time offset of the program.',
#is_plugin = True,
xmltag='offset',
))
self.add_col(cm.ObjsConf('programs',
name='program',
info='Tls program.',
))
def make(self, id_tls, id_prog=None, ptype=None, offset=None, **kwargs_prog):
# print 'make',id_tls,id_prog
if id_prog is None:
id_prog = str(len(np.flatnonzero(self.ids_tls == id_tls)))
id_tll = self.add_row(ids_tls=id_tls,
ids_prog=id_prog,
ptypes=ptype,
offsets=offset,
)
# init programme
program = TrafficLightProgram(('prog', id_tll), self)
self.programs[id_tll] = program
# add phases
program.add_multi(**kwargs_prog)
return id_tll
class TrafficLightSystems(am.ArrayObjman):
# http://www.sumo.dlr.de/userdoc/Networks/Building_Networks_from_own_XML-descriptions.html#Traffic_Light_Program_Definition
def __init__(self, net, **kwargs):
self._init_objman(ident='tlss', parent=net,
name='Traffic Light Systems',
# actually tlls table is exported, but ids_sumo is
# required for ID
xmltag=('tlSystems', 'tlSystem', 'ids_sumo'),
**kwargs)
self.add_col(SumoIdsConf('TLS', info='SUMO ID of traffic light system.',
xmltag='id'))
self.add(cm.ObjConf(TrafficLightLogics('tlls', self)))
self.add_col(am.IdlistsArrayConf('ids_tlls', self.tlls.value,
groupnames=['state'],
name='IDs TLL',
info='ID list of available Traffic Light Logics (or programs) for the Traffic Light System.',
))
self.add_col(am.IdlistsArrayConf('ids_cons', self.parent.connections,
groupnames=['state'],
name='IDs con.',
info='ID list of controlled connections. These connections corrispond to the elements of the state vector within the program-phases.',
))
def make(self, id_sumo, **kwargs):
if self.ids_sumo.has_index(id_sumo):
# recycle ID from existing
id_tls = self.ids_sumo.get_id_from_index(id_sumo)
else:
# make new TLS
id_tls = self.add_row(ids_sumo=id_sumo, ids_tlls=[])
# make a new TL logic for this traffic light systems
id_tll = self.tlls.get_value().make(id_tls, **kwargs)
# append new logic to list
self.ids_tlls[id_tls].append(id_tll)
return id_tls
def set_connections(self, id_tls, ids_con):
"""
Set connections, which represent the controlled links of TLD with id_tls
Called after connections in ttl file have been parsed.
"""
#id_tls = self.ids_sumo.get_id_from_index(id_sumo)
self.ids_cons[id_tls] = ids_con
def export_sumoxml(self, filepath=None, encoding='UTF-8'):
"""
Export traffic light systems to SUMO xml file.
"""
# here we export actually the traffic light logics table
# and the controlled connections table
tlls = self.tlls.get_value()
connections = self.parent.connections # self.ids_cons.get_linktab()
lanes = self.parent.lanes
edges = self.parent.edges
# this is the preferred way to specify default filepath
if filepath == None:
filepath = self.parent.get_rootfilepath() + '.tll.xml'
print 'export_sumoxml', filepath
try:
fd = open(filepath, 'w')
except:
print 'WARNING in export_sumoxml: could not open', filepath
return False
fd.write('<?xml version="1.0" encoding="%s"?>\n' % encoding)
xmltag_ttl, xmltag_id, attrconf_id = tlls.xmltag
fd.write(xm.begin(xmltag_ttl))
indent = 2
#ids_modes_used = set(self.parent.vtypes.ids_mode[self.ids_vtype.get_value()])
ids_tlls = tlls.get_ids()
tlls.write_xml(fd, indent=indent,
#xmltag_id = 'id',
#ids = ids_tlls,
#ids_xml = self.ids_sumo[tlls.ids_tls[ids_tlls]],
is_print_begin_end=False,
)
# write controlled connections
ids_tls = self.get_ids()
xmltag_con = 'connection'
for ids_con, id_sumo_tls in zip(self.ids_cons[ids_tls], self.ids_sumo[ids_tls]):
ids_fromlane = connections.ids_fromlane[ids_con]
ids_tolane = connections.ids_tolane[ids_con]
inds_fromlane = lanes.indexes[ids_fromlane]
inds_tolane = lanes.indexes[ids_tolane]
ids_sumo_fromedge = edges.ids_sumo[lanes.ids_edge[ids_fromlane]]
ids_sumo_toedge = edges.ids_sumo[lanes.ids_edge[ids_tolane]]
ind_link = 0
for id_sumo_fromedge, id_sumo_toedge, ind_fromlane, ind_tolane in \
zip(ids_sumo_fromedge, ids_sumo_toedge, inds_fromlane, inds_tolane):
fd.write(xm.start(xmltag_con, indent))
fd.write(xm.num('from', id_sumo_fromedge))
fd.write(xm.num('to', id_sumo_toedge))
fd.write(xm.num('fromLane', ind_fromlane))
fd.write(xm.num('toLane', ind_tolane))
fd.write(xm.num('tl', id_sumo_tls))
fd.write(xm.num('linkIndex', ind_link))
fd.write(xm.stopit())
ind_link += 1
fd.write(xm.end(xmltag_ttl))
class Crossings(am.ArrayObjman):
# http://www.sumo.dlr.de/userdoc/Networks/Building_Networks_from_own_XML-descriptions.html#Edge_Descriptions
def __init__(self, parent, **kwargs):
ident = 'crossings'
self._init_objman(ident=ident, parent=parent, name='Crossings',
xmltag=('crossings', 'crossing', ''),
**kwargs)
self.add_col(am.IdsArrayConf('ids_node', parent.nodes,
groupnames=['state'],
name='ID node',
info='ID of node where crossings are located.',
xmltag='node',
))
self.add_col(am.IdlistsArrayConf('ids_edges', parent.edges,
groupnames=['state'],
name='IDs Edge',
info='Edge IDs at specific node, where street crossing is possible.',
xmltag='edges',
))
self.add_col(am.ArrayConf('widths', 4.0,
dtype=np.float32,
perm='rw',
unit='m',
name='Width',
info='Crossing width.',
xmltag='width',
))
self.add_col(am.ArrayConf('are_priority', False,
dtype=np.bool,
perm='rw',
name='Priority',
info='Whether the pedestrians have priority over the vehicles (automatically set to true at tls-controlled intersections).',
xmltag='priority',
))
self.add_col(am.ArrayConf('are_discard', False,
dtype=np.bool,
perm='rw',
name='Discard',
info='Whether the crossing with the given edges shall be discarded.',
xmltag='discard',
))
def multimake(self, ids_node=[], **kwargs):
n = len(ids_node)
return self.add_rows(n=n,
ids_node=ids_node,
**kwargs
)
def make(self, **kwargs):
return self.add_row(ids_node=kwargs['id_node'],
ids_edges=kwargs['ids_edge'],
widths=kwargs.get('width', None),
are_priority=kwargs.get('is_priority', None),
are_discard=kwargs.get('is_discard', None),
)
class Connections(am.ArrayObjman):
# http://www.sumo.dlr.de/userdoc/Networks/Building_Networks_from_own_XML-descriptions.html#Edge_Descriptions
def __init__(self, parent, **kwargs):
ident = 'connections'
self._init_objman(ident=ident, parent=parent, name='Connections',
xmltag=('connections', 'connection', None),
**kwargs)
self._init_attributes()
def _init_attributes(self):
lanes = self.parent.lanes
self.add_col(am.IdsArrayConf('ids_fromlane', lanes,
groupnames=['state'],
name='ID from-lane',
info='ID of lane at the beginning of the connection.',
xmltag='fromLane',
))
self.add_col(am.IdsArrayConf('ids_tolane', lanes,
name='ID to-lane',
info='ID of lane at the end of the connection.',
xmltag='toLane',
))
self.add_col(am.ArrayConf('are_passes', False,
dtype=np.bool,
perm='rw',
name='Pass',
info=' if set, vehicles which pass this (lane-to-lane) connection) will not wait.',
xmltag='pass',
))
self.add_col(am.ArrayConf('are_keep_clear', True,
dtype=np.bool,
groupnames=['state'],
perm='rw',
name='keep clear',
info='if set to false, vehicles which pass this (lane-to-lane) connection will not worry about blocking the intersection.',
xmltag='keepClear',
))
self.add_col(am.ArrayConf('positions_cont', 0.0,
dtype=np.float32,
perm='rw',
unit='m',
name='Cont. Pos.',
info='if set to 0, no internal junction will be built for this connection. If set to a positive value, an internal junction will be built at this position (in m) from the start of the internal lane for this connection. ',
xmltag='contPos',
))
self.add_col(am.ArrayConf('are_uncontrolled', False,
dtype=np.bool,
perm='rw',
name='uncontrolled',
info='if set to true, This connection will not be TLS-controlled despite its node being controlled.',
xmltag='uncontrolled',
))
def make(self, **kwargs):
return self.add_row(ids_fromlane=kwargs['id_fromlane'],
ids_tolane=kwargs['id_tolane'],
are_passes=kwargs.get('is_passes', None),
are_keep_clear=kwargs.get('is_keep_clear', None),
positions_cont=kwargs.get('position_cont', None),
are_uncontrolled=kwargs.get(
'is_uncontrolled', None),
)
def multimake(self, ids_fromlane=[], ids_tolane=[], **kwargs):
n = len(ids_fromlane)
return self.add_rows(n=n,
ids_fromlane=ids_fromlane,
ids_tolane=ids_tolane,
**kwargs
)
def get_id_from_sumoinfo(self, id_sumo_fromedge, id_sumo_toedge, ind_fromlane, ind_tolane):
get_id_lane = self.parent.edges.get_id_lane_from_sumoinfo
id_fromlane = get_id_lane(id_sumo_fromedge, ind_fromlane)
id_tolane = get_id_lane(id_sumo_toedge, ind_tolane)
ids_con = self.select_ids((self.ids_fromlane.value == id_fromlane) & (
self.ids_tolane.value == id_tolane))
if len(ids_con) == 1:
return ids_con[0]
else:
return -1
def export_sumoxml(self, filepath, encoding='UTF-8'):
try:
fd = open(filepath, 'w')
except:
print 'WARNING in export_sumoxml: could not open', filepath
return False
fd.write('<?xml version="1.0" encoding="%s"?>\n' % encoding)
indent = 0
self.write_xml(fd, indent)
fd.close()
def write_xml(self, fd, indent):
# print 'Connections.write_xml'
xmltag, xmltag_item, attrname_id = self.xmltag
#attrsman = self.get_attrsman()
#attrsman = self.get_attrsman()
#config_fromlane = attrsman.get_config('ids_fromlane')
#config_tolane = attrsman.get_config('ids_tolane')
colconfigs = self.get_colconfigs(is_all=True)
ids_sumoedges = self.parent.edges.ids_sumo
ids_laneedge = self.parent.lanes.ids_edge
# print ' header'
fd.write(xm.start(xmltag, indent))
# print ' ', self.parent.get_attrsman().get_config('version').attrname,self.parent.get_attrsman().get_config('version').get_value()
#fd.write( self.parent.get_attrsman().get_config('version').write_xml(fd) )
self.parent.get_attrsman().get_config('version').write_xml(fd)
fd.write(xm.stop())
for _id in self.get_ids():
fd.write(xm.start(xmltag_item, indent + 2))
# print ' make tag and id',_id
# fd.write(xm.num(xmltag_id,attrconfig_id[_id]))
# print ' write columns'
for attrconfig in colconfigs:
# print ' colconfig',attrconfig.attrname
if attrconfig == self.ids_fromlane:
fd.write(xm.num('from', ids_sumoedges[
ids_laneedge[self.ids_fromlane[_id]]]))
attrconfig.write_xml(fd, _id)
elif attrconfig == self.ids_tolane:
fd.write(xm.num('to', ids_sumoedges[
ids_laneedge[self.ids_tolane[_id]]]))
attrconfig.write_xml(fd, _id)
else:
attrconfig.write_xml(fd, _id)
fd.write(xm.stopit())
self.parent.crossings.write_xml(
fd, indent=indent + 2, is_print_begin_end=False)
fd.write(xm.end(xmltag, indent))
class Lanes(am.ArrayObjman):
# http://www.sumo.dlr.de/userdoc/Networks/Building_Networks_from_own_XML-descriptions.html#Edge_Descriptions
def __init__(self, parent, edges, modes, **kwargs):
ident = 'lanes'
self._init_objman(ident=ident, parent=parent, name='Lanes',
is_plugin=True,
xmltag=('lanes', 'lane', 'indexes'),
**kwargs)
self.add_col(am.ArrayConf('indexes', 0,
dtype=np.int32,
perm='r',
name='Lane index',
info='The enumeration index of the lane (0 is the rightmost lane, <NUMBER_LANES>-1 is the leftmost one).',
xmltag='index',
))
self.add_col(am.ArrayConf('widths', 3.5,
dtype=np.float32,
perm='rw',
unit='m',
name='Width',
info='Lane width.',
is_plugin=True,
xmltag='width',
))
self.add_col(am.NumArrayConf('speeds_max', 50.0 / 3.6,
dtype=np.float32,
groupnames=['state'],
perm='rw',
name='Max speed',
unit='m/s',
info='Maximum speed on lane.',
xmltag='speed',
))
self.add_col(am.NumArrayConf('offsets_end', 0.0,
dtype=np.float32,
groupnames=['state'],
perm='r',
name='End offset',
unit='m',
info='Move the stop line back from the intersection by the given amount (effectively shortening the lane and locally enlarging the intersection).',
xmltag='endOffset',
))
self.add_col(am.IdlistsArrayConf('modes_allow', modes,
name='IDs allowed',
info='Allowed modes on this lane.',
xmltag='allow',
))
self.add_col(am.IdlistsArrayConf('modes_disallow', modes,
name='IDs disallow',
info='Disallowed modes on this lane.',
xmltag='disallow',
))
self.add_col(am.IdsArrayConf('ids_mode', modes,
groupnames=['state'],
name='Main mode ID',
info='ID of main mode of this lane.',
is_plugin=True,
))
self.add_col(am.IdsArrayConf('ids_edge', edges,
groupnames=['state'],
name='ID edge',
info='ID of edge in which the lane is contained.',
))
self.add_col(am.ListArrayConf('shapes',
groupnames=['_private'],
perm='rw',
name='Shape',
unit='m',
info='List of 3D Shape coordinates to describe polyline.',
is_plugin=True,
))
def get_edges(self):
return self.parent.edges
def multimake(self, indexes=[], **kwargs):
n = len(indexes)
# print 'Lanes.make',kwargs
#width = kwargs.get('widths',None)
#speed_max = kwargs.get('speed_max',-1)
#modes_allow = kwargs.get('modes_allow',[])
return self.add_rows(n=n,
indexes=indexes,
widths=kwargs['widths'],
speeds_max=kwargs['speeds_max'],
offsets_end=kwargs['offsets_end'],
modes_allow=kwargs['modes_allow'],
modes_disallow=kwargs['modes_disallow'],
ids_mode=kwargs['ids_mode'],
ids_edge=kwargs['ids_edge'],
# shapes = kwargs.get('shapes',[]), # if empty,
# then computation later from edge shape
)
def make(self, **kwargs):
edges = self.get_edges()
id_edge = kwargs['id_edge']
index = kwargs['index']
# print 'Lanes.make',kwargs
width = kwargs.get('width', -1)
speed_max = kwargs.get('speed_max', -1)
modes_allow = kwargs.get('modes_allow', [])
is_sidewalk_edge = False
is_sidewalk = False
if len(modes_allow) > 0:
id_mode = modes_allow[0] # pick first as major mode
else:
id_mode = -1 # no mode specified
if index == 0:
width_sidewalk_edge = edges.widths_sidewalk[id_edge]
is_sidewalk_edge = width_sidewalk_edge > 0
# test for pedestrian sidewalk
is_sidewalk = (MODES['pedestrian'] in modes_allow)
if speed_max < 0:
if (index == 0) & is_sidewalk:
speed_max = 0.8 # default walk speed
else:
speed_max = edges.speeds_max[id_edge]
# print ' is_sidewalk_edge ,is_sidewalk',is_sidewalk_edge ,is_sidewalk
if width < 0:
width = edges.widths_lanes_default[id_edge]
if index == 0:
if is_sidewalk_edge: # edge wants sidewalks
width = width_sidewalk_edge
# edge does not want sidewalks, but actually there is a
# sidewalk
elif (not is_sidewalk_edge) & is_sidewalk:
width = 0.9 # default sidewalk width
edges.widths_sidewalk[id_edge] = width
# if sidewalk, then the edge attribute widths_sidewalk
# should be set to actual lane width in case it is less than zero
elif index == 0: # width set for lane 0
# edge does not want sidewalks, but actually there is a sidewalk
if (not is_sidewalk_edge) & is_sidewalk:
edges.widths_sidewalk[id_edge] = width
# if index == 0:
# edges.widths_sidewalk[id_edge]= width
return self.add_row(indexes=index,
widths=width,
speeds_max=speed_max,
offsets_end=kwargs.get('offset_end', None),
modes_allow=modes_allow,
modes_disallow=kwargs.get('modes_disallow', []),
ids_mode=id_mode,
ids_edge=id_edge,
# if empty, then computation later from edge shape
shapes=kwargs.get('shapes', []),
)
def reshape(self):
for id_edge in self.parent.edges.get_ids():
self.reshape_edgelanes(id_edge)
def reshape_edgelanes(self, id_edge):
"""
Recalculate shape of all lanes contained in edge id_edge
based on the shape information of this edge.
"""
#
#lanes = self.get_lanes()
edges = self.parent.edges
ids_lane = edges.ids_lanes[id_edge]
shape = np.array(edges.shapes[id_edge], np.float32)
# print 'reshape: edgeshape id_edge,ids_lane=',id_edge,ids_lane
# print ' shape =',shape
n_lanes = len(ids_lane)
n_vert = len(shape)
angles_perb = get_angles_perpendicular(shape)
dxn = np.cos(angles_perb)
dyn = np.sin(angles_perb)
#laneshapes = np.zeros((n_lanes,n_vert,3), np.float32)
id_lane = ids_lane[0]
# np.ones(n_lanes,np.float32)#lanes.widths[ids_lane]
widths = self.widths[ids_lane]
widths_tot = np.sum(widths)
if edges.types_spread[id_edge] == 1: # center lane spread
widths2 = np.concatenate(([0.0], widths[:-1]))
# print ' widths',widths_tot,widths
# print ' widths2',widths2
displacement = np.cumsum(widths2)
displacement = 0.5 * (widths_tot) - displacement - 0.5 * widths
# print ' displacement',displacement
else:
widths2 = np.concatenate(([0.0], widths[:-1]))
displacement = np.cumsum(widths2)
displacement = displacement[-1] - \
displacement - 0.5 * widths + widths[-1]
for i in range(n_lanes):
id_lane = ids_lane[i]
# print ' displacement[i] ',displacement[i]#,
# if 1:#len(self.shapes[id_lane])==0: # make only if not existant
laneshape = np.zeros(shape.shape, np.float32)
# print ' dx \n',dxn*displacement[i]
# print ' dy \n',dyn*displacement[i]
laneshape[:, 0] = dxn * displacement[i] + shape[:, 0]
laneshape[:, 1] = dyn * displacement[i] + shape[:, 1]
laneshape[:, 2] = shape[:, 2]
self.shapes[id_lane] = laneshape
self.shapes.set_modified(True)
class Roundabouts(am.ArrayObjman):
# http://www.sumo.dlr.de/userdoc/Networks/Building_Networks_from_own_XML-descriptions.html#Edge_Descriptions
def __init__(self, parent, edges, nodes, **kwargs):
ident = 'roundabouts'
self._init_objman(ident=ident, parent=parent,
name='Roundabouts',
xmltag=('roundabouts', 'roundabout', ''),
**kwargs)
self.add_col(am.IdlistsArrayConf('ids_edges', edges,
groupnames=['state'],
name='IDs edges',
info='List with edges IDs.',
xmltag='edges',
))
self.add_col(am.IdlistsArrayConf('ids_nodes', nodes,
groupnames=['state'],
name='IDs Nodes',
info='List with node IDs.',
xmltag='nodes',
))
def multimake(self, ids_nodes=[], **kwargs):
n = len(ids_nodes)
return self.add_rows(n=n,
ids_nodes=ids_nodes, **kwargs
)
def make(self, **kwargs):
return self.add_row(ids_nodes=kwargs['ids_node'],
ids_edges=kwargs['ids_edge'],
)
class Edges(am.ArrayObjman):
# http://www.sumo.dlr.de/userdoc/Networks/Building_Networks_from_own_XML-descriptions.html#Edge_Descriptions
def __init__(self, parent, **kwargs):
ident = 'edges'
self._init_objman(ident=ident, parent=parent,
name='Edges',
xmltag=('edges', 'edge', 'ids_sumo'),
version=0.1,
**kwargs)
self._init_attributes()
def _init_attributes(self):
if self.get_version() < 0.1:
pass
self.add_col(SumoIdsConf('Edge'))
self.add_col(am.ArrayConf('types', '',
dtype=np.object,
perm='rw',
name='Type',
info='Edge reference OSM type.',
xmltag='type', # should not be exported?
))
self.add_col(am.ArrayConf('nums_lanes', 1,
dtype=np.int32,
perm='r',
name='# of lanes',
info='Number of lanes.',
xmltag='numLanes',
))
self.add_col(am.NumArrayConf('speeds_max', 50.0 / 3.6,
dtype=np.float32,
groupnames=['state'],
perm='rw',
name='Max speed',
unit='m/s',
info='Maximum speed on edge.',
xmltag='speed',
))
self.add_col(am.ArrayConf('priorities', 1,
dtype=np.int32,
perm='rw',
name='Priority',
info='Road priority (1-9).',
xmltag='priority',
))
self.add_col(am.NumArrayConf('lengths', 0.0,
dtype=np.float32,
groupnames=['state'],
perm='r',
name='Length',
unit='m',
info='Edge length.',
#xmltag = 'length ',
))
self.add_col(am.NumArrayConf('widths', 0.0,
dtype=np.float32,
groupnames=['state'],
perm='r',
name='Width',
unit='m',
info='Edge width.',
is_plugin=True,
#xmltag = 'width',
))
self.add_col(am.ListArrayConf('shapes',
groupnames=['_private'],
perm='rw',
name='Shape',
unit='m',
info='List of 3D Shape coordinates to describe polyline.',
is_plugin=True,
xmltag='shape',
))
self.add_col(am.ArrayConf('types_spread', 0,
choices={
"right": 0,
"center": 1,
},
dtype=np.int32,
perm='rw',
name='Spread type',
info='Determines how the lanes are spread with respect to main link coordinates.',
xmltag='spreadType',
))
self.add_col(am.ArrayConf('names', '',
dtype=np.object,
perm='rw',
name='Name',
info='Road name, for visualization only.',
xmltag='name',
))
self.add_col(am.NumArrayConf('offsets_end', 0.0,
dtype=np.float32,
groupnames=['state'],
perm='r',
name='End offset',
unit='m',
info='Move the stop line back from the intersection by the given amount (effectively shortening the edge and locally enlarging the intersection).',
xmltag='endOffset',
))
self.add_col(am.NumArrayConf('widths_lanes_default', 3.5,
dtype=np.float32,
groupnames=['state'],
perm='rw',
name='Default lane width',
unit='m',
info='Default lane width for all lanes of this edge in meters (used for visualization).',
#xmltag = '',
))
self.add_col(am.NumArrayConf('widths_sidewalk', -1.0,
dtype=np.float32,
groupnames=['state'],
perm='rw',
name='Sidewalk width',
unit='m',
info='Adds a sidewalk with the given width (defaults to -1 which adds nothing).',
#xmltag = 'sidewalkWidth',
))
def set_nodes(self, nodes):
# set ref to nodes table, once initialized
self.add_col(am.IdsArrayConf('ids_fromnode', nodes,
groupnames=['state'],
name='ID from-node',
info='ID of node at the beginning of the edge.',
xmltag='from',
))
self.add_col(am.IdsArrayConf('ids_tonode', nodes,
groupnames=['state'],
name='ID to-node',
info='ID of node at the end of the edge.',
xmltag='to',
))
def set_lanes(self, lanes):
self.add_col(am.IdlistsArrayConf('ids_lanes', lanes,
groupnames=['state'],
name='IDs Lanes',
info='List with IDs of lanes.',
xmltag='lanes',
is_xml_include_tab=True,
))
def get_outgoing(self, id_edge):
# print
# 'get_outgoing',id_edge,self.ids_tonode[id_edge],self.parent.nodes.ids_outgoing[self.ids_tonode[id_edge]]
ids_edges = self.parent.nodes.ids_outgoing[self.ids_tonode[id_edge]]
if ids_edges == None: # dead end
return []
else:
return ids_edges
def get_incoming(self, id_edge):
# TODO: would be good to have [] as default instead of None!!
ids_edges = self.parent.nodes.ids_incoming[self.ids_fromnode[id_edge]]
if ids_edges == None: # dead end
return []
else:
return ids_edges
def get_lanes(self):
return self.parent.lanes
def get_id_lane_from_sumoinfo(self, id_sumo_edge, ind_lane):
id_edge = self.ids_sumo.get_id_from_index(id_sumo_edge)
return self.ids_lanes[id_edge][ind_lane]
def has_sidewalk(self, id_edge):
return MODES["pedestrian"] in self.parent.lanes.modes_allow[self.ids_lanes[id_edge][0]]
def get_laneindex_allowed(self, id_edge, id_mode):
"""
Returns first lane index of edge id_edge on which id_mode
is allowed.
-1 means not allowed on edge
"""
ind = 0
modes_allow = self.parent.lanes.modes_allow
modes_disallow = self.parent.lanes.modes_disallow
is_disallowed = False
id_lanes = self.ids_lanes[id_edge]
is_cont = True
while is_cont & (ind < len(id_lanes)):
id_lane = id_lanes[ind]
if len(modes_allow[id_lane]) > 0:
if id_mode in modes_allow[id_lane]:
return ind
else:
ind += 1
elif len(modes_disallow[id_lane]) > 0:
if id_mode in modes_disallow[id_lane]:
ind += 1
else:
return ind
else:
# no restrictions
return ind
# no unrestricted lane found
return -1 # not allowed on this edge
def multimake(self, ids_sumo=[], **kwargs):
# fixing of insufficient shape data in edge reader
return self.add_rows(n=len(ids_sumo), ids_sumo=ids_sumo, **kwargs)
def make(self, id_fromnode=0,
id_tonode=0,
id_sumo='',
type_edge='',
num_lanes=1,
speed_max=50.0 / 3.6,
priority=1,
#length = 0.0,
shape=[],
type_spread='right',
name='',
offset_end=0.0,
width_lanes_default=None,
width_sidewalk=-1,
):
if len(shape) < 2: # insufficient shape data
#shape = np.array([ nodes.coords[id_fromnode], nodes.coords[id_tonode] ], np.float32)
# shape should be a list of np array coords
# ATTENTIOn: we need to copy here, otherwise the reference
# to node coordinates will be kept!!
coords = self.ids_tonode.get_linktab().coords
shape = [1.0 * coords[id_fromnode], 1.0 * coords[id_tonode]]
# print 'Edges.make'
# print ' shape',shape,type(shape)
return self.add_row(ids_sumo=id_sumo,
ids_fromnode=id_fromnode,
ids_tonode=id_tonode,
types=type_edge,
nums_lanes=num_lanes,
speeds_max=speed_max,
priorities=priority,
#lengths = length,
shapes=shape,
types_spread=self.types_spread.choices[
type_spread],
names=name,
offsets_end=offset_end,
widths_lanes_default=width_lanes_default,
widths_sidewalk=width_sidewalk,
)
def make_segment_edge_map(self):
"""
Generates a vertex matrix with line segments of all edges
and a map that maps each line segment to edge index.
"""
# here we can make some selection on edge inds
inds = self.get_inds()
# print 'make_linevertices',len(inds)
linevertices = np.zeros((0, 2, 3), np.float32)
vertexinds = np.zeros((0, 2), np.int32)
polyinds = []
lineinds = []
#linecolors = []
#linecolors_highl = []
linebeginstyles = []
lineendstyles = []
i = 0
ind_line = 0
polylines = self.shapes.value[inds]
for ind in inds:
polyline = polylines[ind]
n_seg = len(polyline)
# print ' =======',n_seg#,polyline
if n_seg > 1:
polyvinds = range(n_seg)
# print ' polyvinds\n',polyvinds
vi = np.zeros((2 * n_seg - 2), np.int32)
vi[0] = polyvinds[0]
vi[-1] = polyvinds[-1]
# Important type conversion!!
v = np.zeros((2 * n_seg - 2, 3), np.float32)
v[0] = polyline[0]
v[-1] = polyline[-1]
if len(v) > 2:
# print 'v[1:-1]',v[1:-1]
# print 'v=\n',v
#m = np.repeat(polyline[1:-1],2,0)
# print 'm\n',m,m.shape,m.dtype
#v[1:-1] = m
v[1:-1] = np.repeat(polyline[1:-1], 2, 0)
vi[1:-1] = np.repeat(polyvinds[1:-1], 2)
#vadd = v.reshape((-1,2,3))
# print ' v\n',v
# print ' vi\n',vi
n_lines = len(v) / 2
# print ' v\n',v
polyinds += n_lines * [ind]
lineinds.append(np.arange(ind_line, ind_line + n_lines))
ind_line += n_lines
# print ' polyinds\n',polyinds,n_lines
#linecolors += n_lines*[colors[ind]]
#linecolors_highl += n_lines*[colors_highl[ind]]
# print ' linebeginstyle',linebeginstyle,beginstyles[ind]
else:
# empty polygon treatment
v = np.zeros((0, 3), np.float32)
vi = np.zeros((0), np.int32)
linevertices = np.concatenate(
(linevertices, v.reshape((-1, 2, 3))))
vertexinds = np.concatenate((vertexinds, vi.reshape((-1, 2))))
# print ' linevertex\n',linevertices
i += 1
self._segvertices = linevertices
self._edgeinds = np.array(polyinds, np.int32)
self._seginds = lineinds
self._segvertexinds = np.array(vertexinds, np.int32)
def get_closest_edge(self, p):
"""
Returns edge ids which is closest to point p.
Requires execution of make_segment_edge_map
"""
# print 'get_closest_edge',p
if len(self) == 0:
return np.array([], np.int)
vertices = self._segvertices
x1 = vertices[:, 0, 0]
y1 = vertices[:, 0, 1]
x2 = vertices[:, 1, 0]
y2 = vertices[:, 1, 1]
# print ' x1', x1
# print ' x2', x2
#halfwidths = 0.5*self.get_widths_array()[self._polyinds]
d2 = get_dist_point_to_segs(p[0:2], x1, y1, x2, y2, is_ending=True)
# print '
# min(d2)=',np.min(d2),'argmin=',np.argmin(d2),self.get_ids(self._edgeinds[np.argmin(d2)])
return self.get_ids(self._edgeinds[np.argmin(d2)])
def export_sumoxml(self, filepath, encoding='UTF-8'):
try:
fd = open(filepath, 'w')
except:
print 'WARNING in export_sumoxml: could not open', filepath
return False
fd.write('<?xml version="1.0" encoding="%s"?>\n' % encoding)
fd.write(xm.begin('edges'))
indent = 2
self.write_xml(fd, indent=indent, is_print_begin_end=False)
self.parent.roundabouts.write_xml(
fd, indent=indent, is_print_begin_end=False)
fd.write(xm.end('edges'))
fd.close()
def update(self, ids=None, is_update_lanes=False):
# print 'Edges.update'
if ids == None:
self.widths.value = self.nums_lanes.value * self.widths_lanes_default.value \
+ (self.widths_sidewalk.value >= 0) * \
(self.widths_sidewalk.value - self.widths_lanes_default.value)
# print ' self.widths.values = \n',self.widths.value
#polylines = polypoints_to_polylines(self.shapes.value)
# print ' polylines[0:4]=\n',polylines[0:4]
# print ' polylines[3].shape',polylines[3].shape
#self.lengths.value = get_length_polylines(polypoints_to_polylines(self.shapes.value))
self.lengths.value = get_length_polypoints(self.shapes.value)
ids = self.get_ids()
else:
self.widths[ids] = self.nums_lanes[ids] * self.widths_lanes_default[ids] \
+ (self.widths_sidewalk[ids] >= 0) * \
(self.widths_sidewalk[ids] - self.widths_lanes_default[ids])
# print '
# self.shapes[ids]',self.shapes[ids],type(self.shapes[ids])
self.lengths[ids] = get_length_polypoints(self.shapes[ids])
self.widths.set_modified(True)
self.lengths.set_modified(True)
if is_update_lanes:
# print 'recalc laneshapes',ids
lanes = self.get_lanes()
for id_edge in ids:
lanes.reshape_edgelanes(id_edge)
def set_shapes(self, ids, vertices, is_update_lanes=True):
# print 'set_shapes',ids,vertices
self.shapes[ids] = vertices
if not hasattr(ids, '__iter__'):
ids = [ids]
self.update(ids, is_update_lanes=is_update_lanes)
def update_lanes(self, id_edge, ids_lane):
# print 'update_lanes',id_edge,self.ids_sumo[id_edge] ,ids_lanes,self.nums_lanes[id_edge]
# if self._is_laneshape:
# laneshapes = edges.get_laneshapes(self._id_edge, )
# lanes.shapes[self._ids_lanes[0]]
if len(ids_lane) == 0:
# no lanes given...make some with default values
ids_lane = []
lanes = self.get_lanes()
for i in xrange(self.nums_lanes[id_edge]):
id_lane = lanes.make(index=i, id_edge=id_edge)
ids_lane.append(id_lane)
self.ids_lanes[id_edge] = ids_lane
def correct_endpoint(self):
"""
Corrects end-point for older versione.
"""
ids_sumo = self.ids_sumo.get_value()
types_spread = self.types_spread.get_value()
shapes = self.shapes.get_value()
ids_fromnode = self.ids_fromnode.get_value()
ids_tonode = self.ids_tonode.get_value()
coords = self.parent.nodes.coords
ind = 0
is_corrected = False
eps = 50.0
for id_sumo, type_spread, shape, id_fromnode, id_tonode in zip(ids_sumo, types_spread, shapes, ids_fromnode, ids_tonode):
inds_oppo = np.flatnonzero(
(ids_tonode == id_fromnode) & (ids_fromnode == id_tonode))
if len(inds_oppo) >= 1:
ind_oppo = inds_oppo[0]
# print ' correct',id_sumo,ids_sumo[ind_oppo]
ind_oppo = inds_oppo[0]
shape_oppo = list(shapes[ind_oppo])
shape_oppo.reverse()
# print ' shape',shape
# print ' shape',shape_oppo
# print ' id_fromnode',id_fromnode,ids_tonode[ind_oppo]
# print ' id_tomnode',id_tonode,ids_fromnode[ind_oppo]
# print ' coords',coords[id_fromnode], coords[id_tonode]
if len(shape_oppo) == len(shape):
shapes[ind][0] = coords[id_fromnode]
shapes[ind_oppo][-1] = coords[id_fromnode]
#types_spread[inds_oppo[0]] = 0
#types_spread[ind] = 0
is_corrected = True
ind += 1
if is_corrected:
self.update(is_update_lanes=True)
def correct_spread(self):
"""
Corrects spread type for older versione.
"""
ids_sumo = self.ids_sumo.get_value()
types_spread = self.types_spread.get_value()
shapes = self.shapes.get_value()
ind = 0
is_corrected = False
eps = 50.0
for id_sumo, type_spread, shape in zip(ids_sumo, types_spread, shapes):
if type_spread == 1:
if id_sumo[0] == '-':
inds_oppo = np.flatnonzero(ids_sumo == id_sumo[1:])
if len(inds_oppo) == 1:
ind_oppo = inds_oppo[0]
shape_oppo = np.array(shapes[ind_oppo], np.float32)
if len(shape_oppo) == len(shape):
shape_oppo = list(shapes[ind_oppo])
shape_oppo.reverse()
shape_oppo = np.array(shape_oppo, np.float32)
dist = np.sum(
np.abs(shape_oppo - np.array(shape, np.float32))) / float(len(shape))
# print ' id_sumo,dist',id_sumo,dist,eps
if dist < eps:
types_spread[inds_oppo[0]] = 0
types_spread[ind] = 0
is_corrected = True
ind += 1
if is_corrected:
self.update(is_update_lanes=True)
class Nodes(am.ArrayObjman):
# http://www.sumo.dlr.de/userdoc/Networks/Building_Networks_from_own_XML-descriptions.html#Node_Descriptions
def __init__(self, parent,
**kwargs):
ident = 'nodes'
self._init_objman(ident=ident, parent=parent, name='Nodes',
xmltag=('nodes', 'node', 'ids_sumo'),
version=0.1,
**kwargs)
self._init_attributes()
def _init_attributes(self):
self.add_col(SumoIdsConf('Node'))
self.add_col(am.ArrayConf('coords', np.zeros(3, dtype=np.float32),
dtype=np.float32,
groupnames=['state'],
perm='r',
name='Coords',
unit='m',
info='Node center coordinates.',
))
self.add_col(am.ArrayConf('radii', 5.0,
dtype=np.float32,
groupnames=['state'],
perm='rw',
name='Radius',
info='Node radius',
))
self.add(cm.AttrConf('radius_default', 3.0,
groupnames=['options'],
perm='rw',
unit='m',
name='Default radius',
info='Default node radius.',
))
self.add_col(am.ArrayConf('types', 0,
choices={
"priority": 0,
"traffic_light": 1,
"right_before_left": 2,
"unregulated": 3,
"priority_stop": 4,
"traffic_light_unregulated": 5,
"allway_stop": 6,
"rail_signal": 7,
"zipper": 8,
"traffic_light_right_on_red": 9,
"rail_crossing": 10,
"dead_end": 11,
},
dtype=np.int32,
perm='rw',
name='Type',
info='Node type.',
xmltag='type',
))
# this is actually a property defined in the TLS logic
self.add_col(am.ArrayConf('types_tl', 0,
dtype=np.int32,
choices={
"none": 0,
"static": 1,
"actuated": 2,
},
perm='rw',
name='TL type',
info='Traffic light type.',
xmltag='tlType',
))
self.add_col(am.ArrayConf('turnradii', 1.5,
dtype=np.float32,
groupnames=['state'],
perm='rw',
name='Turn rad',
unit='m',
info='optional turning radius (for all corners) for that node.',
xmltag='radius',
))
self.add_col(am.ArrayConf('are_keep_clear', True,
dtype=np.bool,
groupnames=['state'],
perm='rw',
name='keep clear',
info='Whether the junction-blocking-heuristic should be activated at this node.',
xmltag='keepClear',
))
if self.get_version() < 0.1:
self.delete('ids_tl_prog')
self.turnradii.xmltag = 'radius'
self.are_keep_clear.xmltag = 'keepClear'
self.types_tl.xmltag = 'tlType'
self.add_col(am.IdlistsArrayConf('ids_controlled', edges,
groupnames=['state'],
name='IDs controlled',
info='ID list of controlled edges. Edges which shall be controlled by a joined TLS despite being incoming as well as outgoing to the jointly controlled nodes.',
))
def set_edges(self, edges):
self.add_col(am.IdlistsArrayConf('ids_incoming', edges,
groupnames=['state'],
name='ID incoming',
info='ID list of incoming edges.',
))
self.add_col(am.IdlistsArrayConf('ids_outgoing', edges,
groupnames=['state'],
name='ID outgoing',
info='ID list of outgoing edges.',
))
self.add_col(am.IdlistsArrayConf('ids_controlled', edges,
groupnames=['state'],
name='IDs controlled',
info='ID list of controlled edges. Edges which shall be controlled by a joined TLS despite being incoming as well as outgoing to the jointly controlled nodes.',
xmltag='controlledInner',
))
def set_tlss(self, tlss):
self.add_col(am.IdsArrayConf('ids_tls', tlss,
groupnames=['state'],
name='ID Tls',
info='ID of traffic light system (TLS). Nodes with the same tls-value will be joined into a single traffic light system.',
xmltag='tl',
))
def multimake(self, ids_sumo=[], **kwargs):
return self.add_rows(n=len(ids_sumo), ids_sumo=ids_sumo, **kwargs)
def make(self, id_sumo='', nodetype='priority', coord=[],
type_tl='Static', id_tl_prog=0,
turnradius=1.5, is_keep_clear=True):
return self.add_row(ids_sumo=id_sumo,
types=self.types.choices[nodetype],
coords=coord,
types_tl=self.types_tl.choices[type_tl],
ids_tl_prog=id_tl_prog,
turnradii=turnradius,
are_keep_clear=is_keep_clear,
)
def add_outgoing(self, id_node, id_edge):
if self.ids_outgoing[id_node] != None:
if id_edge not in self.ids_outgoing[id_node]:
self.ids_outgoing[id_node].append(id_edge)
else:
self.ids_outgoing[id_node] = [id_edge]
def add_incoming(self, id_node, id_edge):
if self.ids_incoming[id_node] != None:
if id_edge not in self.ids_incoming[id_node]:
self.ids_incoming[id_node].append(id_edge)
else:
self.ids_incoming[id_node] = [id_edge]
def export_sumoxml(self, filepath, encoding='UTF-8'):
try:
fd = open(filepath, 'w')
except:
print 'WARNING in export_sumoxml: could not open', filepath
return False
fd.write('<?xml version="1.0" encoding="%s"?>\n' % encoding)
indent = 0
self.write_xml(fd, indent)
fd.close()
def write_xml(self, fd, indent):
# print 'Nodes.write_xml'
xmltag, xmltag_item, attrname_id = self.xmltag
attrsman = self.get_attrsman()
# getattr(self.get_attrsman(), attrname_id)
attrconfig_id = attrsman.get_config(attrname_id)
xmltag_id = attrconfig_id.xmltag
#attrsman = self.get_attrsman()
coordsconfig = attrsman.get_config('coords')
colconfigs = attrsman.get_colconfigs(is_all=True)
# print ' header'
fd.write(xm.start(xmltag, indent))
# print ' ', self.parent.get_attrsman().get_config('version').attrname,self.parent.get_attrsman().get_config('version').get_value()
#fd.write( self.parent.get_attrsman().get_config('version').write_xml(fd) )
self.parent.get_attrsman().get_config('version').write_xml(fd)
fd.write(xm.stop())
fd.write(xm.start('location', indent + 2))
# print ' groups:',self.parent.get_attrsman().get_groups()
for attrconfig in self.parent.get_attrsman().get_group('location'):
# print ' locationconfig',attrconfig.attrname
attrconfig.write_xml(fd)
fd.write(xm.stopit())
for _id in self.get_ids():
fd.write(xm.start(xmltag_item, indent + 2))
# print ' make tag and id',_id
fd.write(xm.num(xmltag_id, attrconfig_id[_id]))
# print ' write columns'
for attrconfig in colconfigs:
# print ' colconfig',attrconfig.attrname
if attrconfig == coordsconfig:
x, y, z = attrconfig[_id]
fd.write(xm.num('x', x))
fd.write(xm.num('y', y))
fd.write(xm.num('z', z))
elif attrconfig != attrconfig_id:
attrconfig.write_xml(fd, _id)
fd.write(xm.stopit())
fd.write(xm.end(xmltag, indent))
# def clean_node(self, id_node):
def clean(self, is_reshape_edgelanes=False, nodestretchfactor=2.8, n_min_nodeedges=2):
#is_reshape_edgelanes = False
print 'Nodes.clean', len(self), 'is_reshape_edgelanes', is_reshape_edgelanes
edges = self.parent.edges
lanes = self.parent.lanes
rad_min = self.radius_default.value
# print ' id(edges.shapes),id(edges.shapes.value)', id(edges.shapes),id(edges.shapes.value)#,edges.shapes.value
# print ' id(self.coords),id(self.coords.value)', id(self.coords),id(self.coords.value)#,self.coords.value
# print ' self.coords.value.shape',self.coords.value.shape
# print '
# len(self.coords),self.coords.shape',len(self.coords.value),self.coords.value
for id_node in self.get_ids():
ind_node = self.get_inds(id_node)
# if id_node in TESTNODES:
# print 79*'_'
# print ' node',id_node
# print ' coords',self.coords[id_node]
# print ' coords',TESTNODES[0],self.coords[TESTNODES[0]]
# print ' coords',TESTNODES[1],self.coords[TESTNODES[1]]
# print ' radii',self.radii[id_node]
# distanza ad altri nodi
#d = np.sum(np.abs(self.coords[id_node]-self.coords.value),1)
#d = np.linalg.norm(self.coords[id_node]-self.coords.value,1)
coords = self.coords[id_node]
d = get_norm_2d(coords - self.coords.value)
d[ind_node] = np.inf
d_min = np.min(d)
# print ' d_min',d_min
ids_edge_out = edges.select_ids(
edges.ids_fromnode.value == id_node)
ids_edge_in = edges.select_ids(edges.ids_tonode.value == id_node)
# estimate circumference of junction and determine node radius
n_edges = len(ids_edge_in) + len(ids_edge_out)
width_av = np.mean(np.concatenate(
(edges.widths[ids_edge_in], edges.widths[ids_edge_out])))
# here we assume a node with 6 entrance sides and a and 2 average width edges per side
#circum = 2.0*max(6,n_edges)*width_av
circum = nodestretchfactor * max(2, n_edges) * width_av
# print '
# n_edges,width_av,radius',n_edges,width_av,max(6,n_edges)*width_av/(2*np.pi)
radius = min(
max(circum / (n_min_nodeedges * np.pi), rad_min), 0.4 * d_min)
self.radii[id_node] = radius
# if id_node in TESTNODES:
# print ' AFTER change radius:'#OK
# print ' coords',TESTNODES[0],self.coords[TESTNODES[0]]
# print ' coords',TESTNODES[1],self.coords[TESTNODES[1]]
for id_edge in ids_edge_in:
# print ' in edge',id_edge
shape = edges.shapes[id_edge]
n_shape = len(shape)
# edges.shapes[id_edge][::-1]:
for i in xrange(n_shape - 1, -1, -1):
d = get_norm_2d(np.array([shape[i] - coords]))[0]
# print ' i,d,r',i , d, radius,d>radius
if d > radius:
# print ' **',i,d, radius
break
x, y = shape[i][:2]
# print 'shape',shape,
#dx,dy = shape[i+1][:2] - shape[i][:2]
dx, dy = coords[:2] - shape[i][:2]
dn = np.sqrt(dx * dx + dy * dy)
x1 = x + (d - radius) * dx / dn
y1 = y + (d - radius) * dy / dn
if i == n_shape - 1:
shape[-1][:2] = [x1, y1]
edges.shapes[id_edge] = shape
else: # elif i>0:
shape[i + 1][:2] = [x1, y1]
edges.shapes[id_edge] = shape[:i + 2]
# print ' x,y',x,y
# print ' x1,y1',x1,y1
# print ' shape[:i+2]',shape[:i+2]
# print ' shapes[id_edge]',edges.shapes[id_edge]
if is_reshape_edgelanes:
lanes.reshape_edgelanes(id_edge)
for id_edge in ids_edge_out:
# print ' out edge',id_edge
shape = edges.shapes[id_edge]
n_shape = len(shape)
# edges.shapes[id_edge][::-1]:
for i in xrange(n_shape):
d = get_norm_2d(np.array([shape[i] - coords]))[0]
# print ' i,d,r',i , d, radius,d>radius
if d > radius:
# print ' **',i,d, radius
break
x, y = coords[:2] # shape[i-1][:2]
# print 'shape',shape,
#dx,dy = shape[i][:2]- shape[i-1][:2]
dx, dy = shape[i][:2] - coords[:2]
dn = np.sqrt(dx * dx + dy * dy)
x1 = x + (radius) * dx / dn
y1 = y + (radius) * dy / dn
if i == 0:
shape[0][:2] = [x1, y1]
edges.shapes[id_edge] = shape
elif i < n_shape:
shape[i - 1][:2] = [x1, y1]
edges.shapes[id_edge] = shape[i - 1:]
# print ' x,y',x,y
# print ' x1,y1',x1,y1
# print ' shape[:i+2]',shape[:i+2]
# print ' shapes[id_edge]',edges.shapes[id_edge]
if is_reshape_edgelanes:
lanes.reshape_edgelanes(id_edge)
self.radii.set_modified(True)
edges.shapes.set_modified(True)
class Network(cm.BaseObjman):
def __init__(self, parent=None, name='Network', **kwargs):
# print 'Network.__init__',parent,name
self._init_objman(ident='net', parent=parent, name=name,
# xmltag = 'net',# no, done by netconvert
**kwargs)
attrsman = self.set_attrsman(cm.Attrsman(self))
# print ' Network.parent',self.parent
self._init_attributes()
self.modes.add_rows(ids=MODES.values(), names=MODES.keys())
def _init_attributes(self):
attrsman = self.get_attrsman()
self.version = attrsman.add(cm.AttrConf('version', '0.25',
groupnames=['aux'],
perm='r',
name='Network version',
info='Sumo network version',
xmltag='version'
))
self.modes = attrsman.add(cm.ObjConf(Modes(self)))
# print 'Network.__init__'
# print ' MODES.values()',MODES.values()
# print ' MODES.keys()',MODES.keys()
# self.modes.print_attrs()
##
##
self.nodes = attrsman.add(cm.ObjConf(Nodes(self)))
self.edges = attrsman.add(cm.ObjConf(Edges(self)))
self.lanes = attrsman.add(cm.ObjConf(
Lanes(self, self.edges, self.modes)))
self.edges.set_nodes(self.nodes)
self.edges.set_lanes(self.lanes)
self.nodes.set_edges(self.edges)
self.roundabouts = attrsman.add(cm.ObjConf(
Roundabouts(self, self.edges, self.nodes)))
self.connections = attrsman.add(cm.ObjConf(Connections(self)))
self.crossings = attrsman.add(cm.ObjConf(Crossings(self)))
self.tlss = attrsman.add(cm.ObjConf(TrafficLightSystems(self)))
self.nodes.set_tlss(self.tlss)
self._offset = attrsman.add(cm.AttrConf('_offset', np.array([0.0, 0.0], dtype=np.float32),
groupnames=['location', ],
perm='r',
name='Offset',
info='Network offset in WEP coordinates',
xmltag='netOffset',
xmlsep=',',
))
self._projparams = attrsman.add(cm.AttrConf('_projparams', "!",
groupnames=['location', ],
perm='r',
name='Projection',
info='Projection parameters',
xmltag='projParameter',
))
self._boundaries = attrsman.add(cm.AttrConf('_boundaries', np.array([0.0, 0.0, 0.0, 0.0], dtype=np.float32),
groupnames=['location', ],
perm='r',
name='Boundaries',
unit='m',
info='Network boundaries',
xmltag='convBoundary',
xmlsep=',',
))
self._boundaries_orig = attrsman.add(cm.AttrConf('_boundaries_orig', np.array([0.0, 0.0, 0.0, 0.0]),
groupnames=[
'location', ],
perm='r',
name='Orig. boundaries',
info='Original network boundaries',
xmltag='origBoundary',
xmlsep=',',
))
def _init_constants(self):
pass
#self._oldoffset = self._offset.copy()
# print 'net._init_constants',self._offset,self._oldoffset
# def set_oldoffset(self, offset):
# """
# Set explicitely an old net offset, if existing.
# This allows to update coordinates and shapes outside the network.
#
# """
# self._oldoffset = offset
def set_version(self, version):
self.version = version
def get_version(self):
return self.version
def is_empty(self):
return (len(self.nodes) == 0) & (len(self.edges) == 0)
def set_offset(self, offset):
# if (offset is not self._offset) :
# self._oldoffset = self._offset.copy()
self._offset = offset
def get_offset(self):
return self._offset
# def is_offset_change(self):
# """
# Returns true if offset changed be approx 1 mm after last net import
# """
# return np.sum(abs(self._oldoffset - self._offset))>0.002
# def get_deltaoffset(self):
# return self._offset - self._oldoffset
# def remove_oldoffset(self):
# self._oldoffset = None
def set_boundaries(self, convBoundary, origBoundary=None):
"""
Format of Boundary box
[MinX, MinY ,MaxX, MaxY ]
"""
self._boundaries = convBoundary
if origBoundary == None:
self._boundaries_orig = self._boundaries
else:
self._boundaries_orig = origBoundary
def get_boundaries(self):
return self._boundaries, self._boundaries_orig
def merge_boundaries(self, convBoundary, origBoundary=None):
"""
Format of Boundary box
[MinX, MinY ,MaxX, MaxY ]
"""
# print 'mergeBoundaries'
self._boundaries = self.get_boundary_union(
convBoundary, self._boundaries)
if origBoundary == None:
self._boundaries_orig = self._boundaries
else:
self._boundaries_orig = self.get_boundary_union(
origBoundary, self._boundaries_orig)
# print ' self._boundaries_orig =',self._boundaries_orig
# print ' self._boundaries =',self._boundaries
def get_boundary_union(self, BB1, BB2):
return [min(BB1[0], BB2[0]), min(BB1[1], BB2[1]), max(BB1[2], BB2[2]), max(BB1[3], BB2[3])]
def get_projparams(self):
return self._projparams
def set_projparams(self, projparams="!"):
# print 'setprojparams',projparams
self._projparams = projparams
def get_rootfilename(self):
if self.parent is not None: # scenario exists
return self.parent.get_rootfilename()
else:
return self.get_ident()
def get_rootfilepath(self):
if self.parent is not None:
return self.parent.get_rootfilepath()
else:
return os.path.join(os.getcwd(), self.get_rootfilename())
def get_filepath(self):
"""
Default network filepath.
"""
return self.get_rootfilepath() + '.net.xml'
# def clear(self):
# """
# Remove all netelements.
# """
# #self.reset()
# # at some stage in the future this should be automatic
# self.nodes.clear()
# self.edges.clear()
# self.lanes.clear()
# self.roundabouts.clear()
# self.connections.clear()
# self.crossings.clear()
# self.tlss.clear()
def call_netedit(self, filepath=None, is_maps=False):
filepath = self.export_netxml()
if filepath != "":
# print ' netconvert: success'
names = os.path.basename(filepath).split('.')
dirname = os.path.dirname(filepath)
if len(names) >= 3:
rootname = '.'.join(names[:-2])
elif len(names) <= 2:
rootname = names[0]
configfilepath = self._write_guiconfig(rootname, dirname, is_maps)
cml = 'netedit '\
+ ' --sumo-net-file ' + filepathlist_to_filepathstring(filepath)\
+ ' --gui-settings-file ' + \
filepathlist_to_filepathstring(configfilepath)
#+ ' --output-prefix '+ filepathlist_to_filepathstring(os.path.join(dirname,rootname))
proc = subprocess.Popen(cml, shell=True)
# print ' run_cml cml=',cml
# print ' pid = ',proc.pid
proc.wait()
if proc.returncode == 0:
print ' netedit:success'
return self.import_netxml()
# return self.import_xml() # use if netedit exports to plain
# xml files
else:
print ' netedit:error'
return False
else:
print ' netconvert:error'
return False
def call_sumogui(self, filepath=None, is_maps=False):
if filepath == None:
filepath = self.get_filepath()
dirname = os.path.dirname(filepath)
names = os.path.basename(filepath).split('.')
dirname = os.path.dirname(filepath)
if len(names) >= 3:
rootname = '.'.join(names[:-2])
elif len(names) <= 2:
rootname = names[0]
configfilepath = self._write_guiconfig(rootname, dirname, is_maps)
polyfilepath = os.path.join(dirname, rootname + '.poly.xml')
if os.path.isfile(polyfilepath):
option_addfiles = ' --additional-files ' + \
filepathlist_to_filepathstring(polyfilepath)
else:
option_addfiles = ''
cml = 'sumo-gui '\
+ ' --net-file ' + filepathlist_to_filepathstring(filepath)\
+ ' --gui-settings-file ' + filepathlist_to_filepathstring(configfilepath)\
+ option_addfiles
proc = subprocess.Popen(cml, shell=True)
print ' run_cml cml=', cml
print ' pid = ', proc.pid
proc.wait()
return proc.returncode
def _write_guiconfig(self, rootname, dirname, is_maps):
# check if there are maps
maps = None
if is_maps:
if self.parent != None:
maps = self.parent.landuse.maps
# write netedit configfile
templatedirpath = os.path.dirname(os.path.abspath(__file__))
fd_template = open(os.path.join(
templatedirpath, 'netedit_config.xml'), 'r')
configfilepath = os.path.join(dirname, rootname + '.netedit.xml')
fd_config = open(configfilepath, 'w')
for line in fd_template.readlines():
if line.count('<decals>') == 1:
fd_config.write(line)
if is_maps:
maps.write_decals(fd_config, indent=12)
else:
fd_config.write(line)
fd_template.close()
fd_config.close()
return configfilepath
def import_netxml(self, filepath=None, rootname=None, is_clean_nodes=False, is_remove_xmlfiles=False):
print 'import_netxml', filepath
if rootname == None:
rootname = self.get_rootfilename()
if filepath == None:
filepath = self.get_filepath()
dirname = os.path.dirname(filepath)
# print ' modes.names',self.modes.names
cml = 'netconvert'\
+ ' --sumo-net-file ' + filepathlist_to_filepathstring(filepath)\
+ ' --plain-output-prefix ' + \
filepathlist_to_filepathstring(os.path.join(dirname, rootname))
proc = subprocess.Popen(cml, shell=True)
print ' run_cml cml=', cml
print ' pid = ', proc.pid
proc.wait()
if not proc.returncode:
print ' modes.names', self.modes.names
return self.import_xml(rootname, dirname)
else:
return False
def export_netxml(self, filepath=None):
if filepath == None:
filepath = self.get_filepath()
print 'Net.export_netxml', filepath
# now create rootfilepath in order to export first
# the various xml file , then call netconvert
names = os.path.basename(filepath).split('.')
dirname = os.path.dirname(filepath)
if len(names) >= 3:
rootname = '.'.join(names[:-2])
elif len(names) <= 2:
rootname = names[0]
filepath_edges = os.path.join(dirname, rootname + '.edg.xml')
filepath_nodes = os.path.join(dirname, rootname + '.nod.xml')
filepath_connections = os.path.join(dirname, rootname + '.con.xml')
filepath_tlss = os.path.join(dirname, rootname + '.tll.xml')
self.edges.export_sumoxml(filepath_edges)
self.nodes.export_sumoxml(filepath_nodes)
self.connections.export_sumoxml(filepath_connections)
if len(self.tlss) > 0:
self.tlss.export_sumoxml(filepath_tlss)
cml = 'netconvert --verbose --ignore-errors.edge-type'\
+ ' --node-files ' + filepathlist_to_filepathstring(filepath_nodes)\
+ ' --edge-files ' + filepathlist_to_filepathstring(filepath_edges)\
+ ' --connection-files ' + filepathlist_to_filepathstring(filepath_connections)\
+ ' --output-file ' + filepathlist_to_filepathstring(filepath)
if len(self.tlss) > 0:
cml += ' --tllogic-files ' + \
filepathlist_to_filepathstring(filepath_tlss)
proc = subprocess.Popen(cml, shell=True)
print 'run_cml cml=', cml
print ' pid = ', proc.pid
proc.wait()
if proc.returncode == 0:
print ' success'
return filepath
else:
print ' success'
return ''
def import_xml(self, rootname=None, dirname=None, is_clean_nodes=False, is_remove_xmlfiles=False):
if not self.is_empty():
oldoffset = self.get_offset()
else:
oldoffset = None
print 'Network.import_xml oldoffset', oldoffset
# remove current network
# print ' remove current network'
self.clear()
# reload default SUMO MODES (maybe should not be here)
self.modes.add_rows(ids=MODES.values(), names=MODES.keys())
if rootname is None:
rootname = self.get_rootfilename()
if dirname == None:
dirname = os.path.dirname(self.get_rootfilepath())
# print 'import_xml',dirname,rootname
nodefilepath = os.path.join(dirname, rootname + '.nod.xml')
edgefilepath = os.path.join(dirname, rootname + '.edg.xml')
confilepath = os.path.join(dirname, rootname + '.con.xml')
tlsfilepath = os.path.join(dirname, rootname + '.tll.xml')
if os.path.isfile(edgefilepath) & os.path.isfile(nodefilepath) & os.path.isfile(confilepath):
nodereader = self.import_sumonodes(
nodefilepath, is_remove_xmlfiles)
edgereader = self.import_sumoedges(
edgefilepath, is_remove_xmlfiles)
if is_clean_nodes:
# make edges and lanes end at the node boundaries
# also recalculate lane shapes from edge shapes...if lane shapes are missing
#self.lanes.reshape() #
self.nodes.clean(is_reshape_edgelanes=True)
else:
# just recalculate lane shapes from edge shapes...if lane
# shapes are missing
self.lanes.reshape()
# #pass
self.import_sumoconnections(confilepath, is_remove_xmlfiles)
if os.path.isfile(tlsfilepath):
self.import_sumotls(tlsfilepath, is_remove_xmlfiles)
# this fixes some references to edges and tls
nodereader.write_to_net_post()
if oldoffset is not None:
# check if offset changed
# if self.is_offset_change():
deltaoffset = self.get_offset() - oldoffset
# print ' check
# update_netoffset',deltaoffset,oldoffset,self.get_offset(),np.sum(abs(deltaoffset))>0.002
if np.sum(abs(deltaoffset)) > 0.002:
# communicate to scenario
if self.parent is not None:
self.parent.update_netoffset(deltaoffset)
# clean up ...should be done in each importer??
# if is_remove_xmlfiles:
# os.remove(nodefilepath)
# os.remove(edgefilepath)
# os.remove(confilepath)
# if os.path.isfile(tlsfilepath):
# os.remove(tlsfilepath)
return True
else:
self.get_logger().w(
'import_sumonodes: files not found', key='message')
return False
def import_sumonodes(self, filename, is_remove_xmlfiles=False, logger=None, **others):
print 'import_sumonodes', filename
# print ' parent',self.parent
self.get_logger().w('import_sumonodes', key='message')
# timeit
exectime_start = time.clock()
counter = SumoNodeCounter()
#reader = SumoEdgeReader(self, **others)
# try:
parse(filename, counter)
# print ' after: n_edge', counter.n_edge
fastreader = SumoNodeReader(self, counter)
parse(filename, fastreader)
fastreader.write_to_net()
# timeit
print ' exec time=', time.clock() - exectime_start
return fastreader
def import_sumoedges(self, filename, is_remove_xmlfiles=False, logger=None, **others):
print 'import_sumoedges', filename
logger = self.get_logger()
logger.w('import_sumoedges', key='message')
# timeit
exectime_start = time.clock()
counter = SumoEdgeCounter()
#reader = SumoEdgeReader(self, **others)
# try:
parse(filename, counter)
# print ' after: n_edge', counter.n_edge
fastreader = SumoEdgeReader(self, counter)
parse(filename, fastreader)
fastreader.write_to_net()
self.edges.update()
if is_remove_xmlfiles:
os.remove(filename)
# timeit
print ' exec time=', time.clock() - exectime_start
# except KeyError:
# print >> sys.stderr, "Please mind that the network format has changed in 0.16.0, you may need to update your network!"
# raise
return fastreader
def import_sumoconnections(self, filename, is_remove_xmlfiles=False, logger=None, **others):
print 'import_sumoedges', filename
logger = self.get_logger()
logger.w('import_sumoconnections', key='message')
# timeit
exectime_start = time.clock()
counter = SumoConnectionCounter()
parse(filename, counter)
fastreader = SumoConnectionReader(self, counter)
parse(filename, fastreader)
fastreader.write_to_net()
# timeit
exectime_end = time.clock()
print ' exec time=', exectime_end - exectime_start
return fastreader
def import_sumotls(self, filename, is_remove_xmlfiles=False, logger=None, **others):
print 'import_sumotls', filename
logger = self.get_logger()
logger.w('import_sumotls', key='message')
# timeit
exectime_start = time.clock()
reader = SumoTllReader(self)
parse(filename, reader)
# timeit
exectime_end = time.clock()
print ' exec time=', exectime_end - exectime_start
return reader
def get_id_mode(self, modename):
return self.modes.get_id_mode(modename)
def add_node(self, **kwargs):
return self.nodes.make(**kwargs)
def add_nodes(self, **kwargs):
# print 'add_nodes'
return self.nodes.multimake(**kwargs)
def add_edge(self, **kwargs):
# print 'add_edge'
return self.edges.make(**kwargs)
def add_edges(self, **kwargs):
# print 'add_edges'
return self.edges.multimake(**kwargs)
def add_roundabout(self, **kwargs):
return self.roundabouts.make(**kwargs)
def add_roundabouts(self, **kwargs):
return self.roundabouts.multimake(**kwargs)
def add_lane(self, **kwargs):
# print 'add_lane\n',
# for key, value in kwargs.iteritems():
# print ' ',key,type(value),value
return self.lanes.make(**kwargs)
def add_lanes(self, **kwargs):
# print 'add_lanes\n',
# for key, value in kwargs.iteritems():
# print ' ',key,type(value),value
return self.lanes.multimake(**kwargs)
def add_connection(self, id_fromlane=-1, id_tolane=-1, **kwargs):
# print 'add_lane\n',
# for key, value in kwargs.iteritems():
# print ' ',key,type(value),value
id_fromedge = self.lanes.ids_edge[id_fromlane]
id_toedge = self.lanes.ids_edge[id_tolane]
id_node = self.edges.ids_tonode[id_fromedge]
self.nodes.add_incoming(id_node, id_fromedge)
self.nodes.add_outgoing(id_node, id_toedge)
return self.connections.make(id_fromlane=id_fromlane, id_tolane=id_tolane, **kwargs)
def add_connections(self, ids_fromlane=[], ids_tolane=[], **kwargs):
# print 'add_lane\n',
# for key, value in kwargs.iteritems():
# print ' ',key,type(value),value
ids_fromedge = self.lanes.ids_edge[ids_fromlane]
ids_toedge = self.lanes.ids_edge[ids_tolane]
ids_node = self.edges.ids_tonode[ids_fromedge]
add_incoming = self.nodes.add_incoming
add_outgoing = self.nodes.add_outgoing
for id_node, id_fromedge, id_toedge in zip(ids_node, ids_fromedge, ids_toedge):
add_incoming(id_node, id_fromedge)
add_outgoing(id_node, id_toedge)
return self.connections.multimake(ids_fromlane=ids_fromlane, ids_tolane=ids_tolane, **kwargs)
def add_crossing(self, **kwargs):
# print 'add_crossing\n',
return self.crossings.make(**kwargs)
def add_crossings(self, **kwargs):
# print 'add_crossings\n',
return self.crossings.multimake(**kwargs)
class SumoConnectionCounter(handler.ContentHandler):
"""Parses a SUMO edge XML file and counts edges and lanes."""
def __init__(self):
self.n_con = 0
self.n_cross = 0
def startElement(self, name, attrs):
if name == 'connection':
self.n_con += 1
if name == 'crossing':
self.n_cross += 1
class SumoConnectionReader(handler.ContentHandler):
"""Parses a SUMO connection XML file"""
def __init__(self, net, counter):
self._net = net
# print
# 'SumoConnectionReader:n_con,n_cross',counter.n_con,counter.n_cross
# connections
self._ind_con = -1
self.ids_fromlane = np.zeros(counter.n_con, np.int32)
self.ids_tolane = np.zeros(counter.n_con, np.int32)
self.are_passes = np.zeros(counter.n_con, np.bool)
self.are_keep_clear = np.zeros(counter.n_con, np.bool)
self.positions_cont = np.zeros(counter.n_con, np.float32)
self.are_uncontrolled = np.zeros(counter.n_con, np.bool)
# crossings
self._ind_cross = -1
self.ids_node = np.zeros(counter.n_cross, np.int32)
self.ids_edges = np.zeros(counter.n_cross, np.object)
self.widths = np.zeros(counter.n_cross, np.float32)
self.are_priority = np.zeros(counter.n_cross, np.bool)
self.are_discard = np.zeros(counter.n_cross, np.bool)
self._ids_node_sumo = self._net.nodes.ids_sumo
self._ids_edge_sumo = self._net.edges.ids_sumo
self._ids_edgelanes = self._net.edges.ids_lanes
def startElement(self, name, attrs):
# print 'startElement',name
if name == 'connection':
# <connection from="153009994" to="153009966#1" fromLane="0" toLane="0" pass="1"/>
self._ind_con += 1
i = self._ind_con
# print 'startElement',name,i
id_fromedge = self._ids_edge_sumo.get_id_from_index(attrs['from'])
id_toedge = self._ids_edge_sumo.get_id_from_index(attrs['to'])
#id_fromlane = self._ids_edgelanes[id_fromedge][int(attrs.get('fromLane',0))]
#id_tolane = self._ids_edgelanes[id_toedge][int(attrs.get('toLane',0))]
# print ' id_sumo fromedge',
# attrs['from'],len(self._ids_edgelanes[id_fromedge]) ,
# int(attrs['fromLane'])
self.ids_fromlane[i] = self._ids_edgelanes[
id_fromedge][int(attrs['fromLane'])]
self.ids_tolane[i] = self._ids_edgelanes[
id_toedge][int(attrs['toLane'])]
self.are_passes[i] = int(attrs.get('pass', 0))
self.are_keep_clear[i] = int(attrs.get('keepClear ', 1))
self.positions_cont[i] = float(attrs.get('contPos ', 0.0))
self.are_uncontrolled[i] = int(attrs.get('uncontrolled', 0))
if name == 'crossing':
self._ind_cross += 1
i = self._ind_cross
# print 'startElement',name
self.ids_node[i] = self._ids_node_sumo.get_id_from_index(attrs[
'node'])
self.ids_edges[i] = self._ids_edge_sumo.get_ids_from_indices(
attrs['edges'].split(' '))
self.widths[i] = float(attrs.get('width ', 4.0))
self.are_priority[i] = int(attrs.get('priority ', 0))
self.are_discard[i] = int(attrs.get('discard', 0))
def write_to_net(self):
# print 'write_to_net'
ids_con = self._net.add_connections(
ids_fromlane=self.ids_fromlane,
ids_tolane=self.ids_tolane,
sare_passes=self.are_passes,
are_keep_clear=self.are_keep_clear,
positions_cont=self.positions_cont,
are_uncontrolled=self.are_uncontrolled,
)
ids_cross = self._net.add_crossings(
ids_node=self.ids_node,
ids_edges=self.ids_edges,
widths=self.widths,
are_priority=self.are_priority,
are_discard=self.are_discard,
)
class SumoNodeCounter(handler.ContentHandler):
"""Parses a SUMO edge XML file and counts edges and lanes."""
def __init__(self):
self.n_node = 0
def startElement(self, name, attrs):
# print 'startElement',name,self.n_edge,self.n_lane,self.n_roundabout
if name == 'node':
self.n_node += 1
class SumoNodeReader(handler.ContentHandler):
"""Parses a SUMO node XML file"""
def __init__(self, net, counter):
self._net = net
# print 'SumoEdgeFastreader'
#self._ids_node_sumo = net.nodes.ids_sumo
#self._nodecoords = net.nodes.coords
self._nodetypemap = self._net.nodes.types.choices
self._tltypemap = self._net.nodes.types_tl.choices
# node attrs
self.ids_sumo = np.zeros(counter.n_node, np.object)
self.types = np.zeros(counter.n_node, np.int32)
self.coords = np.zeros((counter.n_node, 3), np.float32)
self.types_tl = np.zeros(counter.n_node, np.int32)
self.ids_sumo_tls = np.zeros(counter.n_node, np.object)
self.turnradii = np.zeros(counter.n_node, np.float32)
self.are_keep_clear = np.zeros(counter.n_node, np.bool)
self._ind_node = -1
self.ids_sumo_controlled = np.zeros(counter.n_node, np.object)
self.ids_sumo_controlled[:] = None
self._offset_delta = np.array([0.0, 0.0])
self._isNew = len(self._net.nodes) == 0
def write_to_net(self):
# print 'write_to_net'
self.ids_node = self._net.add_nodes(
ids_sumo=self.ids_sumo,
types=self.types,
coords=self.coords,
types_tl=self.types_tl,
turnradii=self.turnradii,
are_keep_clear=self.are_keep_clear,
)
# attention:
# attributes ids_sumo_tls and ids_sumo_controlled will be added later
# when tls and edges are read
# see write_to_net_post
def write_to_net_post(self):
"""
To be called after edges and tls are read.
"""
# print 'write_to_net_post'
get_ids_edge = self._net.edges.ids_sumo.get_ids_from_indices
ids_controlled = self._net.nodes.ids_controlled
for id_node, ids_sumo_edge in zip(self.ids_node, self.ids_sumo_controlled):
if ids_sumo_edge is not None:
if len(ids_sumo_edge) == 0:
ids_controlled[id_node] = []
else:
ids_controlled[id_node] = get_ids_edge(ids_sumo_edge)
# convert sumo ids into internal ids and set to nodes
# print ' self.ids_sumo_tls',self.ids_sumo_tls
# print ' self._net.tlss.ids_sumo',self._net.tlss.ids_sumo.value
self._net.nodes.ids_tls[
self.ids_node] = self._net.tlss.ids_sumo.get_ids_from_indices_save(self.ids_sumo_tls)
def startElement(self, name, attrs):
# print 'startElement',name
# if attrs.has_key('id'): print attrs['id']
# elif (attrs.has_key('from')&attrs.has_key('to')): print 'from',attrs['from'],'to',attrs['to']
# elif (attrs.has_key('from')&attrs.has_key('to')): print 'from',attrs['from'],'to',attrs['to']
# else: print '.'
if name == 'nodes':
version = self._net.get_version()
if self._isNew | (version == attrs['version']):
self._net.set_version(attrs['version'])
else:
print 'WARNING: merge with incompatible net versions %s versus %s.' % (version, attrs['version'])
elif name == 'location': # j.s
# print 'startElement',name,self._isNew
netOffsetStrings = attrs['netOffset'].strip().split(",")
offset = np.array([float(netOffsetStrings[0]),
float(netOffsetStrings[1])])
offset_prev = self._net.get_offset()
if self._isNew:
self._net.set_offset(offset)
# print ' offset_prev,offset',offset_prev,offset,type(offset)
else:
self._offset_delta = offset - offset_prev
self._net.set_offset(offset)
# print '
# offset_prev,offset,self._offset_delta',offset_prev,offset,type(offset),self._offset_delta
convBoundaryStr = attrs['convBoundary'].strip().split(",")
origBoundaryStr = attrs['origBoundary'].strip().split(",")
# print ' convBoundaryStr',convBoundaryStr
# print ' origBoundary',origBoundaryStr
if self._isNew:
self._net.set_boundaries([float(convBoundaryStr[0]),
float(convBoundaryStr[1]),
float(convBoundaryStr[2]),
float(convBoundaryStr[3])],
[float(origBoundaryStr[0]),
float(origBoundaryStr[1]),
float(origBoundaryStr[2]),
float(origBoundaryStr[3])]
)
else:
self._net.merge_boundaries([float(convBoundaryStr[0]),
float(convBoundaryStr[1]),
float(convBoundaryStr[2]),
float(convBoundaryStr[3])],
[float(origBoundaryStr[0]),
float(origBoundaryStr[1]),
float(origBoundaryStr[2]),
float(origBoundaryStr[3])]
)
if self._isNew:
if attrs.has_key('projParameter'):
self._net.set_projparams(attrs['projParameter'])
else:
if attrs.has_key('projParameter'):
if self._net.get_projparams() != attrs['projParameter']:
print 'WARNING: merge with incompatible projections %s versus %s.' % (self._net.getprojparams(), attrs['projparams'])
elif name == 'node':
if attrs['id'][0] != ':': # no internal node
self._ind_node += 1
i = self._ind_node
x0, y0 = self._offset_delta
self.ids_sumo[i] = attrs['id']
sumotypes_node = str(attrs.get('type', 'priority'))
self.types[i] = self._nodetypemap[sumotypes_node]
self.coords[i] = [
float(attrs['x']) - x0, float(attrs['y']) - y0, float(attrs.get('z', 0.0))]
sumotype_tl = attrs.get('tlType', 'none')
if sumotypes_node == 'traffic_light':
if sumotype_tl == 'none':
sumotype_tl = 'static'
self.types_tl[i] = self._tltypemap[sumotype_tl]
self.ids_sumo_tls[i] = attrs.get('tl', None)
self.turnradii[i] = attrs.get('radius', 1.5)
self.are_keep_clear[i] = attrs.get('keepClear', True)
#'controlledInner'
# Edges which shall be controlled by a joined TLS
# despite being incoming as well as outgoing to
# the jointly controlled nodes
# problem: we do not know yet the edge IDs
#
if attrs.has_key('controlledInner'):
self.ids_sumo_controlled[i] = attrs[
'controlledInner'].strip().split(' ')
else:
self.ids_sumo_controlled[i] = []
# class SumoTllCounter(handler.ContentHandler):
# """Parses a SUMO tll XML file and counts edges and lanes."""
#
# def __init__(self):
# self.n_tls = 0
#
#
# def startElement(self, name, attrs):
# #print 'startElement',name,self.n_tls
# if name == 'tlLogic':
# self.n_tls += 1
class SumoTllReader(handler.ContentHandler):
"""Parses a SUMO tll XML file and reads it into net."""
def __init__(self, net):
self.net = net
self.connections = net.connections
self.tlss = net.tlss
# print 'SumoEdgeFastreader'
self.get_id_tls = net.nodes.ids_sumo.get_id_from_index
#n_tls = counter.n_tls
self.ptypes_choices = self.tlss.tlls.value.ptypes.choices
self.ids_sumo_tls = self.tlss.ids_sumo
self.reset_prog()
self.tlsconnections = {}
def reset_prog(self):
self.id_sumo_tls = None
self.durations = []
self.durations_min = []
self.durations_max = []
self.states = []
def startElement(self, name, attrs):
if name == 'tlLogic':
# print '\n startElement',name,attrs['id']
self.id_sumo_tls = attrs['id']
self.ptype = self.ptypes_choices.get(attrs.get('type', None), 1)
self.id_prog = attrs.get('programID', None)
self.offset = attrs.get('offset', None)
elif name == 'phase':
# print 'startElement',name,self.id_sumo_tls
if self.id_sumo_tls is not None:
# print '
# append',attrs.get('duration',None),attrs.get('state',None),len(attrs.get('state',''))
duration = int(attrs.get('duration', 0))
self.durations.append(duration)
self.durations_min.append(int(attrs.get('minDur', duration)))
self.durations_max.append(int(attrs.get('maxDur', duration)))
self.states.append(attrs.get('state', None))
# elif name == 'tlLogics':
# pass
elif name == 'connection':
id_sumo_tls = attrs['tl']
# print 'startElement',name,id_sumo_tls,int(attrs['linkIndex'])
# print ' self.tlsconnections',self.tlsconnections
if not self.tlsconnections.has_key(id_sumo_tls):
self.tlsconnections[id_sumo_tls] = {}
id_con = self.connections.get_id_from_sumoinfo(attrs['from'],
attrs['to'], int(attrs['fromLane']), int(attrs['toLane']))
if id_con >= 0:
self.tlsconnections[id_sumo_tls][
int(attrs['linkIndex'])] = id_con
def endElement(self, name):
#edges = self._net.edges
#lanes = self._net.lanes
if name == 'tlLogic':
# print 'endElement',name,self.id_sumo_tls
# print ' ptype',self.ptype
# print ' durations',self.durations
# print ' durations_min',self.durations_min
# print ' durations_max',self.durations_max
# print ' states',self.states
# print ' self.id_prog='+self.id_prog+'='
self.tlss.make(self.id_sumo_tls,
id_prog=self.id_prog,
ptype=self.ptype,
offset=self.offset,
durations=self.durations,
durations_min=self.durations_min,
durations_max=self.durations_max,
states=self.states,
)
self.reset_prog()
elif name == 'tlLogics':
# print 'endElement',name,len(self.tlss)
# end of scanning. Write controlled connections to tlss
# print ' tlsconnections',self.tlsconnections
for id_sumo_tls, conmap in self.tlsconnections.iteritems():
inds_con = np.array(conmap.keys(), dtype=np.int32)
ids_con = np.zeros(np.max(inds_con) + 1, np.int32)
# print ' cons for',id_sumo_tls,conmap
# print ' inds',inds_con,len(ids_con)
# print ' values',conmap.values(),len(ids_con)
ids_con[inds_con] = conmap.values() # <<<<<<<<<<<
id_tls = self.tlss.ids_sumo.get_id_from_index(id_sumo_tls)
self.tlss.set_connections(id_tls, ids_con)
#self.tlss.set_connections(self.get_id_tls(id_sumo_tls), ids_con)
class SumoEdgeCounter(handler.ContentHandler):
"""Parses a SUMO edge XML file and counts edges and lanes."""
def __init__(self):
self.n_edge = 0
self.n_lane = 0
self.n_roundabout = 0
self._n_edgelane = 0
#self._net = net
#self._ids_edge_sumo = net.edges.ids_sumo
#self._ids_node_sumo = net.nodes.ids_sumo
def startElement(self, name, attrs):
# print 'startElement',name,self.n_edge,self.n_lane,self.n_roundabout
if name == 'edge':
self.n_edge += 1
self.n_lane += int(attrs['numLanes'])
elif name == 'roundabout':
self.n_roundabout += 1
class SumoEdgeReader(handler.ContentHandler):
"""Parses a SUMO edge XML file and reads it into net."""
def __init__(self, net, counter, offset_delta=np.array([0.0, 0.0])):
self._net = net
# print 'SumoEdgeFastreader'
self._ids_node_sumo = net.nodes.ids_sumo
self._nodecoords = net.nodes.coords
self._modenames = net.modes.names
self._offset_delta = offset_delta
#self._isNew = len(self._net.nodes)==0
# edge attrs
self._ind_edge = -1
# print ' n_edge',counter.n_edge
self.ids_edge_sumo = np.zeros(
counter.n_edge, np.object) # net.edges.ids_sumo
self.ids_edge_sumo[:] = None # ??needed
self.ids_fromnode = np.zeros(counter.n_edge, np.int32)
self.ids_tonode = np.zeros(counter.n_edge, np.int32)
self.types_edge = np.zeros(counter.n_edge, np.object)
# used only for lane width if no lane data is given
self.widths = np.zeros(counter.n_edge, np.float32)
self.nums_lanes = np.zeros(counter.n_edge, np.int32)
self.speeds_max = np.zeros(counter.n_edge, np.float32)
self.priorities = np.zeros(counter.n_edge, np.int32)
#length = 0.0,
self.shapes = np.zeros(counter.n_edge, np.object)
self.types_spread = np.zeros(counter.n_edge, np.int32)
self.spread_choices = net.edges.types_spread.choices
#"right": 0,
#"center": 1,
self.names = np.zeros(counter.n_edge, np.object)
self.offsets_end = np.zeros(counter.n_edge, np.float32)
self.widths_lanes_default = np.zeros(counter.n_edge, np.float32)
self.widths_sidewalk = np.zeros(counter.n_edge, np.float32)
self.inds_lanes_edges = np.zeros(counter.n_edge, np.object)
#self.inds_lanes_edges[:] = None
self._ind_lanes_edges = []
#self.ids_sumoedge_to_ind = {}
# lane attrs
# print ' n_lane',counter.n_lane
self._ind_lane = -1
self.index_lanes = np.zeros(counter.n_lane, np.int32)
self.width_lanes = np.zeros(counter.n_lane, np.float32)
self.speed_max_lanes = np.zeros(counter.n_lane, np.float32)
self.offset_end_lanes = np.zeros(counter.n_lane, np.float32)
self.modes_allow = np.zeros(counter.n_lane, np.object)
self.modes_disallow = np.zeros(counter.n_lane, np.object)
self.ids_mode_lanes = np.zeros(counter.n_lane, np.int32)
self.inds_edge_lanes = np.zeros(counter.n_lane, np.int32)
#self.shapes_lanes = np.zeros(counter.n_lane,np.object)
# roundabout attrs
# print ' n_roundabout',counter.n_roundabout
self._ind_ra = -1
self.ids_sumoedges_ra = np.zeros(counter.n_roundabout, np.object)
self.ids_nodes_ra = np.zeros(counter.n_roundabout, np.object)
############################
def startElement(self, name, attrs):
# print 'startElement',name
# if attrs.has_key('id'): print attrs['id']
# elif (attrs.has_key('from')&attrs.has_key('to')): print 'from',attrs['from'],'to',attrs['to']
# elif (attrs.has_key('from')&attrs.has_key('to')): print 'from',attrs['from'],'to',attrs['to']
# else: print '.'
if name == 'edge':
# if not attrs.has_key('function') or attrs['function'] != 'internal':
#id_fromnode = nodes.ids_sumo.get_id_from_index(id_fromnode_sumo)
#id_tonode = nodes.ids_sumo.get_id_from_index(id_tonode_sumo)
self._ind_edge += 1
ind = self._ind_edge
# print 'startElement edge',ind,attrs['id']
self.ids_edge_sumo[ind] = attrs['id']
id_fromnode = self._ids_node_sumo.get_id_from_index(
str(attrs['from']))
id_tonode = self._ids_node_sumo.get_id_from_index(str(attrs['to']))
self.ids_fromnode[ind] = id_fromnode
self.ids_tonode[ind] = id_tonode
self.types_edge[ind] = str(attrs.get('type', ''))
self.nums_lanes[ind] = int(attrs.get('numLanes', 1))
self.widths[ind] = float(
attrs.get('width', 3.5 * self.nums_lanes[ind]))
self.types_spread[ind] = self.spread_choices[
str(attrs.get('spreadType', 'right'))] # usually center
# print ' ',self.types_spread[ind]
#length = 0.0,
shape = np.array(xm.process_shape(
attrs.get('shape', ''), offset=self._offset_delta))
if len(shape) < 2: # insufficient shape data
# shape should be a list of np array coords
# ATTENTIOn: we need to copy here, otherwise the reference
# to node coordinates will be kept!!
shape = np.array(
[1.0 * self._nodecoords[id_fromnode], 1.0 * self._nodecoords[id_tonode]])
if self.types_spread[ind] == 1: # center
angles_perb = get_angles_perpendicular(shape)
halfwidth = self.widths[ind]
shape[:, 0] += np.cos(angles_perb) * halfwidth
shape[:, 1] += np.sin(angles_perb) * halfwidth
self.shapes[ind] = shape
self.speeds_max[ind] = float(attrs.get('speed', 13.888))
self.priorities[ind] = int(attrs.get('priority', 9))
self.names[ind] = unicode(attrs.get('name', ''))
self.offsets_end[ind] = float(attrs.get('endOffset', 0.0))
self.widths_lanes_default[ind] = float(attrs.get('width ', 3.0))
self.widths_sidewalk[ind] = float(attrs.get('sidewalkWidth', -1.0))
#self._is_laneshape = True
# print ' self._id_edge',self._id_edge
elif name == 'lane':
self._ind_lane += 1
ind = self._ind_lane
speed_max_default = -1
if attrs.has_key('allow'):
modes_allow = list(self._modenames.get_ids_from_indices(
attrs['allow'].split(' ')))
else:
edgetype = self.types_edge[self._ind_edge]
if OSMEDGETYPE_TO_MODES.has_key(edgetype):
modes_allow, speed_max_default = OSMEDGETYPE_TO_MODES[
edgetype]
else:
modes_allow = []
if attrs.has_key('disallow'):
modes_disallow = list(self._modenames.get_ids_from_indices(
attrs['disallow'].split(' ')))
else:
modes_disallow = []
index = int(attrs.get('index', -1))
width = float(attrs.get('width', -1))
speed_max = float(attrs.get('speed', speed_max_default))
is_sidewalk_edge = False
is_sidewalk = False
if len(modes_allow) == 1:
id_mode_main = modes_allow[0] # pick as major mode
# elif len(modes_allow) == 1:
else:
id_mode_main = -1 # no major mode specified
if index == 0:
width_sidewalk_edge = self.widths_sidewalk[
self._ind_edge] # copy from edge
is_sidewalk_edge = width_sidewalk_edge > 0
# test for pedestrian sidewalk
is_sidewalk = (MODES['pedestrian'] in modes_allow)
if speed_max < 0:
if (index == 0) & is_sidewalk:
speed_max = 0.8 # default walk speed
else:
speed_max = self.speeds_max[
self._ind_edge] # copy from edge
# print ' is_sidewalk_edge ,is_sidewalk',is_sidewalk_edge
# ,is_sidewalk
if width < 0:
width = self.widths_lanes_default[
self._ind_edge] # copy from edge
if index == 0:
if is_sidewalk_edge: # edge wants sidewalks
width = width_sidewalk_edge
# edge does not want sidewalks, but actually there is a
# sidewalk
elif (not is_sidewalk_edge) & is_sidewalk:
width = 0.9 # default sidewalk width
# update edge attr!!
self.widths_sidewalk[self._ind_edge] = width
# if sidewalk, then the edge attribute widths_sidewalk
# should be set to actual lane width in case it is less than zero
elif index == 0: # width set for lane 0
# edge does not want sidewalks, but actually there is a
# sidewalk
if (not is_sidewalk_edge) & is_sidewalk:
# update edge attr!!
self.widths_sidewalk[self._ind_edge] = width
self.index_lanes[ind] = index
self.width_lanes[ind] = width
self.speed_max_lanes[ind] = speed_max
self.offset_end_lanes[ind] = float(attrs.get('endOffset', 0.0))
self.modes_allow[ind] = modes_allow
self.modes_disallow[ind] = modes_disallow
self.ids_mode_lanes[ind] = id_mode_main
self.inds_edge_lanes[ind] = self._ind_edge
#self.shapes_lanes[ind] = self.getShape(attrs.get('shape',''), offset = self._offset_delta)
self._ind_lanes_edges.append(ind)
# self._ids_lane.append(id_lane)
elif name == 'roundabout':
self._ind_ra += 1
self.ids_sumoedges_ra[self._ind_ra] = attrs.get(
'edges', '').split(' ')
self.ids_nodes_ra[self._ind_ra] = self._ids_node_sumo.get_ids_from_indices(
attrs.get('nodes', '').split(' '))
# def characters(self, content):
# if self._currentLane!=None:
# self._currentShape = self._currentShape + content
def endElement(self, name):
#edges = self._net.edges
#lanes = self._net.lanes
if name == 'edge':
n_lane = self.nums_lanes[self._ind_edge]
# print 'SumoEdgeReader.endElement',self._ind_lane,n_lane
while len(self._ind_lanes_edges) < n_lane:
# if len(self._ind_lanes_edges) ==0:
# edge description provided no specific lane information
# create n_lanes and us some properties from current edge
self._ind_lane += 1
ind = self._ind_lane
edgetype = self.types_edge[self._ind_edge]
if OSMEDGETYPE_TO_MODES.has_key(edgetype):
modes_allow, speed_max_default = OSMEDGETYPE_TO_MODES[
edgetype]
else:
modes_allow = []
if len(modes_allow) == 1:
id_mode_main = modes_allow[0] # pick as major mode
else:
id_mode_main = -1 # no major mode specified
self.index_lanes[ind] = 0
self.width_lanes[ind] = self.widths[
self._ind_edge] # copy from edge attr
self.speed_max_lanes[ind] = self.speeds_max[
self._ind_edge] # copy from edge attr
self.offset_end_lanes[ind] = self.offset_end_lanes[
self._ind_edge] # copy from edge attr
self.modes_allow[ind] = modes_allow
self.modes_disallow[ind] = []
self.inds_edge_lanes[ind] = self._ind_edge
#self.shapes_lanes[ind] = self.getShape(attrs.get('shape',''), offset = self._offset_delta)
self.ids_mode_lanes[ind] = id_mode_main
self._ind_lanes_edges.append(ind)
self.inds_lanes_edges[self._ind_edge] = self._ind_lanes_edges
self._ind_lanes_edges = []
def write_to_net(self):
# print 'write_to_net'
ids_edge = self._net.add_edges(
ids_sumo=self.ids_edge_sumo,
ids_fromnode=self.ids_fromnode,
ids_tonode=self.ids_tonode,
types=self.types_edge,
nums_lanes=self.nums_lanes,
speeds_max=self.speeds_max,
priorities=self.priorities,
#lengths = length,
shapes=self.shapes,
types_spread=self.types_spread,
names=self.names,
offsets_end=self.offsets_end,
widths_lanes_default=self.widths_lanes_default,
widths_sidewalk=self.widths_sidewalk,
)
# print ' self.inds_edge_lanes',self.inds_edge_lanes
ids_lanes = self._net.add_lanes(
indexes=self.index_lanes,
widths=self.width_lanes,
speeds_max=self.speed_max_lanes,
offsets_end=self.offset_end_lanes,
modes_allow=self.modes_allow,
modes_disallow=self.modes_disallow,
# main mode will be determined from other attributes
ids_mode=self.ids_mode_lanes,
ids_edge=ids_edge[self.inds_edge_lanes],
# shapes = self.shapes_lanes, # lane shapes are not given -> must
# be derived from edge shape
)
#edges.update_lanes(self._id_edge, self._ids_lane)
ids_edgelanes = self._net.edges.ids_lanes
ind = 0
for inds_lane in self.inds_lanes_edges:
ids_edgelanes[ids_edge[ind]] = ids_lanes[inds_lane]
# print '
# id_edge,ids_lanes[inds_lane]',ids_edge[ind],ids_lanes[inds_lane]
ind += 1
# roundaboutS
ids_edge_sumo = self._net.edges.ids_sumo
ids_roundabout = self._net.add_roundabouts(
ids_nodes=self.ids_nodes_ra,
)
ids_edges_ra = self._net.roundabouts.ids_edges
i = 0
for id_roundabout in ids_roundabout:
ids_edges_ra[id_roundabout] = ids_edge_sumo.get_ids_from_indices(
self.ids_sumoedges_ra[i])
i += 1
class SumonetImporter(CmlMixin, Process):
def __init__(self, net, rootname=None, rootdirpath=None, netfilepath=None,
is_clean_nodes=False, logger=None, **kwargs):
self._init_common('sumonetimporter', name='SUMO net import',
logger=logger,
info='Converts a SUMO .net.xml file to nod.xml, edg.xml and con.xml file and reads into scenario.',
)
self._net = net
self.init_cml('netconvert')
if rootname == None:
rootname = net.parent.get_rootfilename()
if rootdirpath == None:
if net.parent != None:
rootdirpath = net.parent.get_workdirpath()
else:
rootdirpath = os.getcwd()
if netfilepath == None:
netfilepath = os.path.join(rootdirpath, rootname + '.net.xml')
attrsman = self.get_attrsman()
self.add_option('netfilepath', netfilepath,
# this will make it show up in the dialog
groupnames=['options'],
cml='--sumo-net-file',
perm='rw',
name='Net file',
wildcards='Net XML files (*.net.xml)|*.net.xml',
metatype='filepath',
info='SUMO Net file in XML format.',
)
self.workdirpath = attrsman.add(cm.AttrConf('workdirpath', rootdirpath,
# ['options'],#['_private'],
groupnames=['_private'],
perm='r',
name='Workdir',
metatype='dirpath',
info='Working directory for this scenario.',
))
self.rootname = attrsman.add(cm.AttrConf('rootname', rootname,
groupnames=['_private'],
perm='r',
name='Scenario shortname',
info='Scenario shortname is also rootname of converted files.',
))
self.is_clean_nodes = attrsman.add(cm.AttrConf('is_clean_nodes', is_clean_nodes,
groupnames=['options'],
perm='rw',
name='Clean Nodes',
info='If set, then shapes around nodes are cleaned up.',
))
def update_params(self):
"""
Make all parameters consistent.
example: used by import OSM to calculate/update number of tiles
from process dialog
"""
pass
#self.workdirpath = os.path.dirname(self.netfilepath)
#bn = os.path.basename(self.netfilepath).split('.')
# if len(bn)>0:
# self.rootname = bn[0]
def do(self):
self.update_params()
cml = self.get_cml() + ' --plain-output-prefix ' + \
filepathlist_to_filepathstring(
os.path.join(self.workdirpath, self.rootname))
# print 'SumonetImporter.do',cml
#import_xml(self, rootname, dirname, is_clean_nodes = True)
self.run_cml(cml)
if self.status == 'success':
self._net.import_xml(
self.rootname, self.workdirpath, is_clean_nodes=self.is_clean_nodes)
# print 'do',self.newident
# self._scenario = Scenario( self.newident,
# parent = None,
# workdirpath = self.workdirpath,
# logger = self.get_logger(),
# )
def get_net(self):
return self._net
class OsmImporter(CmlMixin, Process):
def __init__(self, net=None,
osmfilepaths=None,
netfilepath=None,
proj='',
is_import_elevation_osm=False,
typefilepath=None,
# ordinary roads+bikeways+footpath
roadtypes='ordinary roads+bikeways',
n_lanes_default=0,
edgespeed_default=13.9,
priority_default=-1,
is_remove_isolated_edges=True,
factor_edge_speed=1.0,
is_guess_sidewalks=False,
edgespeed_min_sidewalks=5.8,
edgespeed_max_sidewalks=13.89,
is_guess_sidewalks_from_permission=False,
width_sidewalks_default=2.0,
is_guess_crossings=False,
edgespeed_max_crossings=13.89,
is_join_nodes=True,
dist_join_nodes=15.0,
is_keep_nodes_clear=True,
is_keep_nodes_unregulated=False,
is_guess_tls=False,
is_join_tls=False,
joindist_tls=20.0,
is_uncontrolled_within_tls=False,
is_guess_signals_tls=False,
dist_guess_signal_tls=20.0,
#time_green_tls = 31,
time_yellow_tls=-1,
accel_min_yellow_tls=-1.0,
is_no_turnarounds=False,
is_no_turnarounds_tls=False,
is_check_lane_foes=False,
is_roundabouts_guess=True,
is_check_lane_foes_roundabout=False,
is_no_left_connections=False,
is_geometry_split=False,
is_geometry_remove=True,
length_max_segment=-1.0,
dist_min_geometry=-1.0,
is_guess_ramps=True,
rampspeed_max=-1,
highwayspeed_min=21.9444,
ramplength=100,
is_no_split_ramps=False,
#
is_clean_nodes=False,
#
logger=None, **kwargs):
self._init_common('osmimporter', name='OSM import',
logger=logger,
info='Converts a OSM file to SUMO nod.xml, edg.xml and con.xml file and reads into scenario.',
)
if net == None:
self._net = Network()
else:
self._net = net
self.init_cml('netconvert') # pass main shell command
if net.parent != None:
rootname = net.parent.get_rootfilename()
rootdirpath = net.parent.get_workdirpath()
else:
rootname = net.get_ident()
rootdirpath = os.getcwd()
if netfilepath == None:
netfilepath = os.path.join(rootdirpath, rootname + '.net.xml')
if osmfilepaths == None:
osmfilepaths = os.path.join(rootdirpath, rootname + '.osm.xml')
if typefilepath == None:
typefilepath = os.path.join(os.path.dirname(os.path.abspath(
__file__)), '..', '..', 'typemap', 'osmNetconvert.typ.xml')
attrsman = self.get_attrsman()
self.workdirpath = rootdirpath
self.rootname = rootname
self.add_option('osmfilepaths', osmfilepaths,
groupnames=['options'],
cml='--osm-files',
perm='rw',
name='OSM files',
wildcards='OSM XML files (*.osm)|*.osm*',
metatype='filepaths',
info='Openstreetmap files to be imported.',
)
self.add_option('netfilepath', netfilepath,
groupnames=[], # ['_private'],#
cml='--output-file',
perm='r',
name='Net file',
wildcards='Net XML files (*.net.xml)|*.net.xml',
metatype='filepath',
info='SUMO Net file in XML format.',
)
self.add_option('typefilepath', typefilepath,
groupnames=['options'],
cml='--type-files',
perm='rw',
name='Type files',
wildcards='Typemap XML files (*.typ.xml)|*.typ.xml',
metatype='filepaths',
info="""Typemap XML files. In these file,
OSM road types are mapped to edge and lane parameters such as width,
speeds, etc. These parameters are used as defaults in absents of explicit OSM attributes.
Use osmNetconvert.typ.xml as a base and additional type file to meet specific needs.""",
)
# self.add_option('projparams','!',
# groupnames = ['options'],#
# cml = '--proj',
# perm='rw',
# name = 'projection',
# info = 'Uses STR as proj.4 definition for projection.',
# )
# --offset.disable-normalization <BOOL> Turn off normalizing node positions; default: false
# self.add_option('offset_x',0.0,
# groupnames = ['options','geometry'],#
# cml = '--offset.x ',
# perm='rw',
# unit = 'm',
# name = 'X-Offset',
# info = 'Adds offset to net x-positions; default: 0.0',
# )
# self.add_option('offset_y',0.0,
# groupnames = ['options','geometry'],#
# cml = '--offset.y ',
# perm='rw',
# unit = 'm',
# name = 'Y-Offset',
# info = 'Adds offset to net x-positions; default: 0.0',
# )
# --flip-y-axis <BOOL> Flips the y-coordinate along zero; default: false
#----------------------------------------------------------------------
# osm specific
#--junctions.corner-detail <INT> Generate INT intermediate points to smooth out intersection corners; default: 0
# --junctions.internal-link-detail <INT> Generate INT intermediate points to smooth out lanes within the intersection; default: 5
self.add_option('is_import_elevation_osm', is_import_elevation_osm,
groupnames=['options', 'osm'],
cml='--osm.elevation',
perm='rw',
name='import elevation',
info='Imports elevation data.',
)
#-------------------------------------------------------------------------
# edge options
#--keep-edges.min-speed <FLOAT> Only keep edges with speed in meters/second > FLOAT
modesset = set(MODES.keys())
modesset_pt_rail = set(["rail_urban", "rail", "rail_electric"])
modesset_pt_road = set(["bus", "taxi", "coach", "tram"])
modesset_motorized = set(["private", "passenger", "emergency", "authority",
"army", "vip", "hov", "motorcycle", "moped", "evehicle", "delivery", "truck"])
roadtypes_to_disallowed_vtypes = {"roads for individual transport": ','.join(modesset.difference(["passenger", ])),
"ordinary roads": ','.join(modesset.difference(modesset_pt_road | modesset_motorized)),
"ordinary roads+bikeways": ','.join(modesset.difference(modesset_pt_road | modesset_motorized | set(['bicycle']))),
"ordinary roads+bikeways+footpath": ','.join(modesset.difference(modesset_pt_road | modesset_motorized | set(['bicycle']) | set(['pedestrian']))),
"ordinary roads+rails": ','.join(modesset.difference(modesset_pt_road | modesset_motorized | modesset_pt_rail)),
"ordinary roads+rails+bikeways": ','.join(modesset.difference(modesset_pt_road | modesset_motorized | modesset_pt_rail | set(['bicycle']))),
"ordinary roads+rails+bikeways+footpath": ','.join(modesset.difference(modesset_pt_road | modesset_motorized | modesset_pt_rail | set(['bicycle']) | set(['pedestrian']))),
"all ways": ""
}
# print ' access_to_vtypes=',access_to_vtypes
self.add_option('select_edges_by_access', roadtypes_to_disallowed_vtypes[roadtypes],
groupnames=['options', 'edges'],
cml='--remove-edges.by-vclass',
choices=roadtypes_to_disallowed_vtypes,
perm='rw',
name='Keep edge with acces',
info='Imports all edges with the given vehicle access patterns.',
is_enabled=lambda self: self.select_edges_by_access != "",
)
self.add_option('n_lanes_default', n_lanes_default,
groupnames=['options', 'edges'],
cml='--default.lanenumber',
perm='rw',
name='Default lanenumber',
info='The default number of lanes in an edge.',
is_enabled=lambda self: self.n_lanes_default > 0,
)
self.add_option('edgespeed_default', edgespeed_default,
groupnames=['options', 'edges'],
cml='--default.speed',
perm='rw',
unit='m/s',
name='Default edge speed',
info='The default speed on an edge.',
is_enabled=lambda self: self.edgespeed_default > 0,
)
choices_priority = {}
for i in range(11):
choices_priority[str(i)] = i
choices_priority['auto'] = -1
self.add_option('priority_default', priority_default,
groupnames=['options', 'edges'],
cml='--default.priority',
choices=choices_priority,
perm='rw',
name='Default priority',
info='The default priority of an edge. Value of-1 means automatic assignment.',
is_enabled=lambda self: self.priority_default > 0,
)
self.add_option('is_remove_isolated_edges', is_remove_isolated_edges,
groupnames=['options', 'edges'],
cml='--remove-edges.isolated',
perm='rw',
name='Remove isolated edges',
info='Remove isolated edges.',
)
# --edges.join <BOOL> Merges edges whch connect the same nodes and are close to each other (recommended for VISSIM import); default: false
# --speed.offset <FLOAT> Modifies all edge speeds by adding FLOAT; default: 0
self.add_option('factor_edge_speed', factor_edge_speed,
groupnames=['options', 'edges'],
cml='--speed.factor',
perm='rw',
name='Edge speed factor',
info='Modifies all edge speeds by multiplying with edge speed factor.',
is_enabled=lambda self: self.factor_edge_speed == 1.0,
)
#-------------------------------------------------------------------------
# pedestrians
self.add_option('is_guess_sidewalks', is_guess_sidewalks,
groupnames=['options', 'pedestrians'],
cml='--sidewalks.guess',
perm='rw',
name='Guess sidewalks',
info='Guess pedestrian sidewalks based on edge speed.',
)
self.add_option('edgespeed_min_sidewalks', edgespeed_min_sidewalks,
groupnames=['options', 'pedestrians'],
cml='--sidewalks.guess.min-speed',
perm='rw',
unit='m/s',
name='Min edge speed for sidewalk guess',
info='Add sidewalks for edges with a speed above the given limit.',
is_enabled=lambda self: self.is_guess_sidewalks | self.is_guess_sidewalks_from_permission,
)
self.add_option('edgespeed_max_sidewalks', edgespeed_max_sidewalks,
groupnames=['options', 'pedestrians'],
cml='--sidewalks.guess.max-speed',
perm='rw',
unit='m/s',
name='Max edge speed for sidewalk guess',
info='Add sidewalks for edges with a speed equal or below the given limit.',
is_enabled=lambda self: self.is_guess_sidewalks | self.is_guess_sidewalks_from_permission,
)
self.add_option('is_guess_sidewalks_from_permission', is_guess_sidewalks_from_permission,
groupnames=['options', 'pedestrians'],
cml='--sidewalks.guess.from-permissions ',
perm='rw',
name='Guess sidewalks from permission',
info='Add sidewalks for edges that allow pedestrians on any of their lanes regardless of speed.',
)
self.add_option('width_sidewalks_default', width_sidewalks_default,
groupnames=['options', 'pedestrians'],
cml='--default.sidewalk-width',
perm='rw',
unit='m',
name='Min edge speed for sidewalk guess',
info='Add sidewalks for edges with a speed above the given limit.',
is_enabled=lambda self: self.is_guess_sidewalks | self.is_guess_sidewalks_from_permission,
)
self.add_option('is_guess_crossings', is_guess_crossings,
groupnames=['options', 'pedestrians'],
cml='--crossings.guess',
perm='rw',
name='Guess crossings',
info='Guess pedestrian crossings based on the presence of sidewalks.',
)
self.add_option('edgespeed_max_crossings', edgespeed_max_crossings,
groupnames=['options', 'pedestrians'],
cml='--crossings.guess.speed-threshold',
perm='rw',
unit='m/s',
name='Max edge speed for crossings',
info='At uncontrolled nodes, do not build crossings across edges with a speed above this maximum edge speed.',
is_enabled=lambda self: self.is_guess_crossings,
)
#-------------------------------------------------------------------------
# node options
self.add_option('is_join_nodes', is_join_nodes,
groupnames=['options', 'nodes'],
cml='--junctions.join',
perm='rw',
name='Join nodes',
info='Join nearby nodes. Specify with node join distance which nodes will be joined.',
)
self.add_option('dist_join_nodes', dist_join_nodes,
groupnames=['options', 'nodes'],
cml='--junctions.join-dist',
perm='rw',
unit='m',
name='Node join distance',
info='Specify with node join distance which nodes will be joined. Join nodes option must be True.',
is_enabled=lambda self: self.is_join_nodes,
)
self.add_option('is_keep_nodes_clear', is_keep_nodes_clear,
groupnames=['options', 'nodes'],
cml='--default.junctions.keep-clear',
perm='rw',
name='Keep nodes clear',
info='Whether junctions should be kept clear by default.',
)
self.add_option('is_keep_nodes_unregulated', is_keep_nodes_unregulated,
groupnames=['options', 'nodes'],
cml='--keep-nodes-unregulated',
perm='rw',
name='Keep nodes unregulated',
info='Keep nodes unregulated.',
)
# --default.junctions.radius <FLOAT> The default turning radius of intersections; default: 1.5
#-------------------------------------------------------------------------
# TLS Building Options:
self.add_option('is_guess_tls', is_guess_tls,
groupnames=['options', 'traffic lights'],
cml='--tls.guess',
perm='rw',
name='TLS-guessing',
info='Turns on TLS guessing.',
)
# now same as is_join_tls
# self.add_option('is_guess_join_tls',True,
# groupnames = ['options','traffic lights'],#
# cml = '--tls-guess.joining',
# perm='rw',
# name = 'TLS-guess joining',
# info = 'Includes node clusters into guess.',
# )
self.add_option('is_join_tls', is_join_tls,
groupnames=['options', 'traffic lights'],
cml='--tls.join',
perm='rw',
name='TLS-joining',
info='Tries to cluster tls-controlled nodes.',
is_enabled=lambda self: self.is_guess_tls,
)
self.add_option('joindist_tls', joindist_tls,
groupnames=['options', 'traffic lights'],
cml='--tls.join-dist',
perm='rw',
unit='m',
name='TLS-join dist.',
info='Determines the maximal distance for joining traffic lights (defaults to 20)',
is_enabled=lambda self: self.is_guess_tls & self.is_join_tls,
)
self.add_option('is_uncontrolled_within_tls', is_uncontrolled_within_tls,
groupnames=['options', 'traffic lights'],
cml='--tls.uncontrolled-within',
perm='rw',
name='Uncontrolled within TLS.',
info='Do not control edges that lie fully within a joined traffic light. This may cause collisions but allows old traffic light plans to be used.',
is_enabled=lambda self: self.is_guess_tls,
)
self.add_option('is_guess_signals_tls', is_guess_signals_tls,
groupnames=['options', 'traffic lights'],
cml='--tls.guess-signals',
perm='rw',
name='Guess signals.',
info='Interprets tls nodes surrounding an intersection as signal positions for a larger TLS. This is typical pattern for OSM-derived networks',
is_enabled=lambda self: self.is_guess_tls,
)
self.add_option('dist_guess_signal_tls', dist_guess_signal_tls,
groupnames=['options', 'traffic lights'],
cml='--tls.guess-signals.dist',
perm='rw',
unit='m',
name='Signal guess dist.',
info='Distance for interpreting nodes as signal locations',
is_enabled=lambda self: self.is_guess_tls & self.is_guess_signals_tls,
)
# self.add_option('time_cycle_tls',90,
# groupnames = ['options','traffic lights'],#
# cml = '--tls.cycle.time',
# perm='rw',
# unit = 's',
# name = 'Cycle time',
# info = 'Cycle time of traffic light.',
# )
# self.add_option('time_green_tls',31,
# groupnames = ['options','traffic lights'],#
# cml = '--tls.green.time',
# perm='rw',
# unit = 's',
# name = 'Green cycle time',
# info = 'reen cycle time of traffic light.',
# )
# self.add_option('time_leftgreen_tls',6,
# groupnames = ['options','traffic lights'],#
# cml = '--tls.left-green.time',
# perm='rw',
# unit = 's',
# name = 'Left green cycle time',
# info = 'reen cycle time of traffic light.',
# )
self.add_option('time_yellow_tls', time_yellow_tls, # default 6
groupnames=['options', 'traffic lights'],
cml='--tls.yellow.time',
perm='rw',
unit='s',
name='Yellow cycle time',
info='Fixed yellow time of traffic light. The value of -1.0 means automatic determination',
is_enabled=lambda self: self.is_guess_tls & (
self.time_yellow_tls > 0),
)
self.add_option('accel_min_yellow_tls', accel_min_yellow_tls, # default 3.0
groupnames=['options'],
cml='--tls.yellow.min-decel',
perm='rw',
unit='m/s^2',
name='Min decel. at yellow',
info='Defines smallest vehicle deceleration at yellow. The value of -1.0 means automatic determination',
is_enabled=lambda self: self.is_guess_tls & (
self.accel_min_yellow_tls > 0),
)
# --tls.yellow.patch-small Given yellow times are patched even ifbeing too short
#-------------------------------------------------------------------------
# topology
self.add_option('is_no_turnarounds', is_no_turnarounds,
groupnames=['options', 'topology'],
cml='--no-turnarounds',
perm='rw',
name='no turnarounds',
info='Disables building turnarounds.',
)
self.add_option('is_no_turnarounds_tls', is_no_turnarounds_tls,
groupnames=['options', 'topology'],
cml='--no-turnarounds.tls',
perm='rw',
name='no TLS turnarounds',
info='Disables building turnarounds at traffic lights.',
is_enabled=lambda self: self.is_guess_tls,
)
self.add_option('is_check_lane_foes', is_check_lane_foes,
groupnames=['options', 'topology'],
cml='--check-lane-foes.all',
perm='rw',
name='Always allow entering multilane',
info='Always allow driving onto a multi-lane road if there are foes (arriving vehicles) on other lanes.',
)
self.add_option('is_roundabouts_guess', is_roundabouts_guess,
groupnames=['options', 'topology'],
cml='--roundabouts.guess',
perm='rw',
name='Roundabouts guess',
info='Enable roundabout-guessing.',
)
self.add_option('is_check_lane_foes_roundabout', is_check_lane_foes_roundabout,
groupnames=['options', 'topology'],
cml='--check-lane-foes.roundabout',
perm='rw',
name='Allow entering multilane roundabout',
info='Allow driving onto a multi-lane road at roundabouts if there are foes (arriving vehicles) on other lanes.',
is_enabled=lambda self: self.is_roundabouts_guess,
)
# --lefthand <BOOL> Assumes left-hand traffic on the network; default: false
self.add_option('is_no_left_connections', is_no_left_connections,
groupnames=['options', 'topology'],
cml='--no-left-connections',
perm='rw',
name='no left connections',
info='Disables building connections to left.',
)
#-------------------------------------------------------------------------
# geometry options
self.add_option('is_geometry_split', is_geometry_split,
groupnames=['options', 'geometry'],
cml='--geometry.split',
perm='rw',
name='geometry split',
info='Splits edges across geometry nodes.',
)
self.add_option('is_geometry_remove', is_geometry_remove,
groupnames=['options', 'geometry'],
cml='--geometry.remove',
perm='rw',
name='geometry remove',
info='Replace nodes which only define edge geometry by geometry points (joins edges).',
)
self.add_option('length_max_segment', length_max_segment,
groupnames=['options', 'geometry'],
cml='--geometry.max-segment-length',
perm='rw',
unit='m',
name='Max segment length',
info='Splits geometry to restrict segment length to maximum segment length. The value of -1.0 means no effect.',
is_enabled=lambda self: self.length_max_segment > 0,
)
self.add_option('dist_min_geometry', dist_min_geometry,
groupnames=['options', 'geometry'],
cml='--geometry.min-dist',
perm='rw',
unit='m',
name='Min geom dist',
info='Reduces too similar geometry points. The value of -1.0 means no effect.',
is_enabled=lambda self: self.dist_min_geometry > 0,
)
# self.add_option('angle_max_geometry',99.0,
# groupnames = ['options','geometry'],#
# cml = '--geometry.max-angle',
# perm='rw',
# unit = 'degree',
# name = 'Max geom angle',
# info = ' Warn about edge geometries with an angle above the maximum angle in successive segments.',
# )
# --geometry.min-radius <FLOAT> Warn about edge geometries with a turning radius less than METERS at the start or end; default: 9
# --geometry.junction-mismatch-threshold <FLOAT> Warn if the junction shape is to far away from the original node position; default: 20
#-------------------------------------------------------------------------
# Ramps
self.add_option('is_guess_ramps', is_guess_ramps,
groupnames=['options', 'ramps'],
cml='--ramps.guess',
perm='rw',
name='Guess ramps',
info='Enable ramp-guessing.',
)
self.add_option('rampspeed_max', rampspeed_max,
groupnames=['options', 'ramps'],
cml='--ramps.max-ramp-speed',
perm='rw',
unit='m/s',
name='Max ramp speed',
info='Treat edges with a higher speed as no ramps',
is_enabled=lambda self: self.is_guess_ramps,
)
self.add_option('highwayspeed_min', highwayspeed_min,
groupnames=['options', 'ramps'],
cml='--ramps.min-highway-speed',
perm='rw',
unit='m/s',
name='Min highway speed',
info='Treat edges with lower speed as no highways (no ramps will be build there)',
is_enabled=lambda self: self.is_guess_ramps,
)
self.add_option('ramplength', ramplength,
groupnames=['options', 'ramps'],
cml='--ramps.ramp-length',
perm='rw',
unit='m',
name='Ramp length',
info='Ramp length',
is_enabled=lambda self: self.is_guess_ramps,
)
self.add_option('is_no_split_ramps', is_no_split_ramps,
groupnames=['options', 'ramps'],
cml='--ramps.no-split',
perm='rw',
name='No ramp split',
info='Avoids edge splitting at ramps.',
is_enabled=lambda self: self.is_guess_ramps,
)
#-------------------------------------------------------------------------
self.is_clean_nodes = attrsman.add(cm.AttrConf('is_clean_nodes', is_clean_nodes,
groupnames=['options'],
perm='rw',
name='Clean Nodes',
info='If set, then shapes around nodes are cleaned up after importing.',
))
def update_params(self):
"""
Make all parameters consistent.
example: used by import OSM to calculate/update number of tiles
from process dialog
"""
pass
#self.workdirpath = os.path.dirname(self.netfilepath)
#bn = os.path.basename(self.netfilepath).split('.')
# if len(bn)>0:
# self.rootname = bn[0]
def do(self):
self.update_params()
cml = self.get_cml()
# print 'SumonetImporter.do',cml
#import_xml(self, rootname, dirname, is_clean_nodes = True)
self.run_cml(cml)
if self.status == 'success':
print ' OSM->sumo.xml done'
if os.path.isfile(self.netfilepath):
print ' OSM->sumo.xml successful, start generation of xml files'
cml = self._command + ' --sumo-net-file ' + filepathlist_to_filepathstring(
self.netfilepath) + ' --plain-output-prefix ' + filepathlist_to_filepathstring(os.path.join(self.workdirpath, self.rootname))
self.run_cml(cml)
if self.status == 'success':
print ' start import into sumopy'
self._net.import_xml(
self.rootname, self.workdirpath, is_clean_nodes=self.is_clean_nodes)
print ' import sumopy done.'
#self._net.import_xml(self.rootname, self.workdirpath, is_clean_nodes = self.is_clean_nodes)
# print 'do',self.newident
# self._scenario = Scenario( self.newident,
# parent = None,
# workdirpath = self.workdirpath,
# logger = self.get_logger(),
# )
def get_net(self):
return self._net
if __name__ == '__main__':
##########################################################################
# print 'sys.path',sys.path
from agilepy.lib_wx.objpanel import objbrowser
from agilepy.lib_base.logger import Logger
#net = Network(logger = Logger())
net = Network(logger=Logger())
net.import_xml('facsp2', 'testnet')
objbrowser(net)
|
gpl-3.0
| -8,687,516,388,149,125,000 | 42.039134 | 255 | 0.470337 | false |
fatherlinux/atomic-reactor
|
tests/plugins/test_import_image.py
|
1
|
5039
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import json
import os
from atomic_reactor.core import DockerTasker
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import PostBuildPluginsRunner, PluginFailedException
from atomic_reactor.util import ImageName
from tests.constants import INPUT_IMAGE, SOURCE
from atomic_reactor.plugins.post_import_image import ImportImagePlugin
import osbs.conf
from osbs.api import OSBS
from osbs.exceptions import OsbsResponseException
from flexmock import flexmock
import pytest
TEST_IMAGESTREAM = "library-imagestream1"
TEST_REPO = "registry.example.com/library/imagestream1"
class X(object):
image_id = INPUT_IMAGE
git_dockerfile_path = None
git_path = None
base_image = ImageName(repo="qwe", tag="asd")
def prepare():
"""
Boiler-plate test set-up
"""
tasker = DockerTasker()
workflow = DockerBuildWorkflow(SOURCE, "test-image")
setattr(workflow, 'builder', X())
setattr(workflow.builder, 'image_id', 'asd123')
setattr(workflow.builder, 'source', X())
setattr(workflow.builder.source, 'dockerfile_path', None)
setattr(workflow.builder.source, 'path', None)
fake_conf = osbs.conf.Configuration(conf_file=None, openshift_uri='/')
flexmock(osbs.conf).should_receive('Configuration').and_return(fake_conf)
runner = PostBuildPluginsRunner(tasker, workflow, [{
'name': ImportImagePlugin.key,
'args': {
'imagestream': TEST_IMAGESTREAM,
'docker_image_repo': TEST_REPO,
'url': '',
'verify_ssl': False,
'use_auth': False
}}])
return runner
def test_bad_setup():
"""
Try all the early-fail paths.
"""
runner = prepare()
(flexmock(OSBS)
.should_receive('get_image_stream')
.never())
(flexmock(OSBS)
.should_receive('create_image_stream')
.never())
(flexmock(OSBS)
.should_receive('import_image')
.never())
# No build JSON
if "BUILD" in os.environ:
del os.environ["BUILD"]
with pytest.raises(PluginFailedException):
runner.run()
@pytest.mark.parametrize(('namespace'), [
({}),
({'namespace': 'my_namespace'})
])
def test_create_image(namespace):
"""
Test that an ImageStream is created if not found
"""
runner = prepare()
build_json = {"metadata": {}}
build_json["metadata"].update(namespace)
os.environ["BUILD"] = json.dumps(build_json)
(flexmock(OSBS)
.should_receive('get_image_stream')
.once()
.with_args(TEST_IMAGESTREAM, **namespace)
.and_raise(OsbsResponseException('none', 404)))
(flexmock(OSBS)
.should_receive('create_image_stream')
.once()
.with_args(TEST_IMAGESTREAM, TEST_REPO, **namespace))
(flexmock(OSBS)
.should_receive('import_image')
.never())
runner.run()
@pytest.mark.parametrize(('namespace'), [
({}),
({'namespace': 'my_namespace'})
])
def test_import_image(namespace):
"""
Test importing tags for an existing ImageStream
"""
runner = prepare()
build_json = {"metadata": {}}
build_json["metadata"].update(namespace)
os.environ["BUILD"] = json.dumps(build_json)
(flexmock(OSBS)
.should_receive('get_image_stream')
.once()
.with_args(TEST_IMAGESTREAM, **namespace))
(flexmock(OSBS)
.should_receive('create_image_stream')
.never())
(flexmock(OSBS)
.should_receive('import_image')
.once()
.with_args(TEST_IMAGESTREAM, **namespace))
runner.run()
def test_exception_during_create():
"""
The plugin should fail if the ImageStream creation fails.
"""
runner = prepare()
os.environ["BUILD"] = json.dumps({
"metadata": {}
})
(flexmock(OSBS)
.should_receive('get_image_stream')
.with_args(TEST_IMAGESTREAM)
.and_raise(OsbsResponseException('none', 404)))
(flexmock(OSBS)
.should_receive('create_image_stream')
.once()
.with_args(TEST_IMAGESTREAM, TEST_REPO)
.and_raise(RuntimeError))
(flexmock(OSBS)
.should_receive('import_image')
.never())
with pytest.raises(PluginFailedException):
runner.run()
def test_exception_during_import():
"""
The plugin should fail if image import fails.
"""
runner = prepare()
os.environ["BUILD"] = json.dumps({
"metadata": {}
})
(flexmock(OSBS)
.should_receive('get_image_stream')
.with_args(TEST_IMAGESTREAM)
.and_raise(OsbsResponseException('none', 404)))
(flexmock(OSBS)
.should_receive('create_image_stream')
.once()
.with_args(TEST_IMAGESTREAM, TEST_REPO)
.and_raise(RuntimeError))
(flexmock(OSBS)
.should_receive('import_image')
.never())
with pytest.raises(PluginFailedException):
runner.run()
|
bsd-3-clause
| -6,431,589,620,434,505,000 | 24.321608 | 79 | 0.642985 | false |
firemark/grazyna
|
grazyna/tests/test_request.py
|
1
|
2043
|
from grazyna.request import RequestBot
from grazyna.test_mocks.sender import SayMessage, Message
from grazyna.irc.models import User
def make_bot(protocol, **kwargs):
return RequestBot(protocol, user=User('socek!a@b'), chan='#czarnobyl', **kwargs)
def test_init(protocol):
bot = RequestBot(protocol)
assert bot.protocol is protocol
def test_nick_chan_on_privmsg(protocol):
bot = make_bot(protocol, private=True)
assert bot.nick_chan == 'socek'
def test_nick_chan_on_channel(protocol):
bot = make_bot(protocol)
assert bot.nick_chan == '#czarnobyl'
def test_say(protocol):
bot = make_bot(protocol)
bot.say('foobar')
assert protocol.messages == [SayMessage('#czarnobyl', 'foobar')]
def test_notice(protocol):
bot = make_bot(protocol)
bot.notice('foobar')
assert protocol.messages == [Message('NOTICE', 'socek', ':foobar')]
def test_reply(protocol):
bot = make_bot(protocol)
bot.reply('foobar')
assert protocol.messages == [SayMessage('#czarnobyl', 'socek: foobar')]
def test_kick(protocol):
bot = make_bot(protocol)
bot.kick(who='firemark', why='dunno lol')
assert protocol.messages == [Message('KICK', '#czarnobyl', 'firemark', ':dunno lol')]
def test_kick_on_private(protocol):
bot = make_bot(protocol, private=True)
bot.kick() # is not possible to kick on private message
assert protocol.messages == []
def test_private_say(protocol):
bot = make_bot(protocol)
bot.private_say('foobar')
assert protocol.messages == [SayMessage('socek', 'foobar')]
def test_command(protocol):
bot = make_bot(protocol)
bot.command('WTF')
assert protocol.messages == [Message('WTF')]
def test_command_msg(protocol):
bot = make_bot(protocol)
bot.command_msg('WTF', 'dunno lol')
assert protocol.messages == [Message('WTF', ':dunno lol')]
def test_mode(protocol):
bot = make_bot(protocol)
bot.mode('#czarnobyl', '+oo', 'lol')
assert protocol.messages == [Message('MODE', 'lol', '#czarnobyl', '+oo')]
|
gpl-2.0
| 3,544,072,557,143,436,300 | 25.881579 | 89 | 0.669114 | false |
citrix-openstack/nodepool
|
nodepool/provider_manager.py
|
1
|
22543
|
#!/usr/bin/env python
# Copyright (C) 2011-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import paramiko
import threading
import time
import requests.exceptions
import sys
import shade
import novaclient
from nodeutils import iterate_timeout
from task_manager import Task, TaskManager, ManagerStoppedException
SERVER_LIST_AGE = 5 # How long to keep a cached copy of the server list
IPS_LIST_AGE = 5 # How long to keep a cached copy of the ip list
def get_public_ip(server, version=4):
for addr in server.addresses.get('public', []):
if type(addr) == type(u''): # Rackspace/openstack 1.0
return addr
if addr['version'] == version: # Rackspace/openstack 1.1
return addr['addr']
for addr in server.addresses.get('private', []):
# HPcloud
if (addr['version'] == version and version == 4):
quad = map(int, addr['addr'].split('.'))
if quad[0] == 10:
continue
if quad[0] == 192 and quad[1] == 168:
continue
if quad[0] == 172 and (16 <= quad[1] <= 31):
continue
return addr['addr']
for addr in server.addresses.get('Ext-Net', []):
if addr['version'] == version: # OVH
return addr['addr']
return None
def get_private_ip(server):
ret = []
for (name, network) in server.addresses.iteritems():
if name == 'private':
ret.extend([addrs['addr']
for addrs in network if addrs['version'] == 4])
else:
for interface_spec in network:
if interface_spec['version'] != 4:
continue
if ('OS-EXT-IPS:type' in interface_spec
and interface_spec['OS-EXT-IPS:type'] == 'fixed'):
ret.append(interface_spec['addr'])
if not ret:
if server.status == 'ACTIVE':
# Server expected to have at least one address in ACTIVE status
# TODO: uncomment this code when all nodes have private IPs
# raise KeyError('No private ip found for server')
return None
else:
return None
return ret[0]
def make_server_dict(server):
d = dict(id=str(server.id),
name=server.name,
status=server.status,
addresses=server.addresses)
if hasattr(server, 'adminPass'):
d['admin_pass'] = server.adminPass
if hasattr(server, 'key_name'):
d['key_name'] = server.key_name
if hasattr(server, 'progress'):
d['progress'] = server.progress
if hasattr(server, 'metadata'):
d['metadata'] = server.metadata
d['public_v4'] = get_public_ip(server)
d['private_v4'] = get_private_ip(server)
d['public_v6'] = get_public_ip(server, version=6)
return d
def make_image_dict(image):
d = dict(id=str(image.id), name=image.name, status=image.status,
metadata=image.metadata)
if hasattr(image, 'progress'):
d['progress'] = image.progress
return d
class NotFound(Exception):
pass
class CreateServerTask(Task):
def main(self, client):
server = client.nova_client.servers.create(**self.args)
return str(server.id)
class GetServerTask(Task):
def main(self, client):
try:
server = client.nova_client.servers.get(self.args['server_id'])
except novaclient.exceptions.NotFound:
raise NotFound()
return make_server_dict(server)
class DeleteServerTask(Task):
def main(self, client):
client.nova_client.servers.delete(self.args['server_id'])
class ListServersTask(Task):
def main(self, client):
servers = client.nova_client.servers.list()
return [make_server_dict(server) for server in servers]
class AddKeypairTask(Task):
def main(self, client):
client.nova_client.keypairs.create(**self.args)
class ListKeypairsTask(Task):
def main(self, client):
keys = client.nova_client.keypairs.list()
return [dict(id=str(key.id), name=key.name) for
key in keys]
class DeleteKeypairTask(Task):
def main(self, client):
client.nova_client.keypairs.delete(self.args['name'])
class CreateFloatingIPTask(Task):
def main(self, client):
ip = client.nova_client.floating_ips.create(**self.args)
return dict(id=str(ip.id), ip=ip.ip)
class AddFloatingIPTask(Task):
def main(self, client):
client.nova_client.servers.add_floating_ip(**self.args)
class GetFloatingIPTask(Task):
def main(self, client):
ip = client.nova_client.floating_ips.get(self.args['ip_id'])
return dict(id=str(ip.id), ip=ip.ip, instance_id=str(ip.instance_id))
class ListFloatingIPsTask(Task):
def main(self, client):
ips = client.nova_client.floating_ips.list()
return [dict(id=str(ip.id), ip=ip.ip,
instance_id=str(ip.instance_id)) for
ip in ips]
class RemoveFloatingIPTask(Task):
def main(self, client):
client.nova_client.servers.remove_floating_ip(**self.args)
class DeleteFloatingIPTask(Task):
def main(self, client):
client.nova_client.floating_ips.delete(self.args['ip_id'])
class CreateImageTask(Task):
def main(self, client):
# This returns an id
return str(client.nova_client.servers.create_image(**self.args))
class GetImageTask(Task):
def main(self, client):
try:
image = client.nova_client.images.get(**self.args)
except novaclient.exceptions.NotFound:
raise NotFound()
# HP returns 404, rackspace can return a 'DELETED' image.
if image.status == 'DELETED':
raise NotFound()
return make_image_dict(image)
class ListExtensionsTask(Task):
def main(self, client):
try:
resp, body = client.nova_client.client.get('/extensions')
return [x['alias'] for x in body['extensions']]
except novaclient.exceptions.NotFound:
# No extensions present.
return []
class ListFlavorsTask(Task):
def main(self, client):
flavors = client.nova_client.flavors.list()
return [dict(id=str(flavor.id), ram=flavor.ram, name=flavor.name)
for flavor in flavors]
class ListImagesTask(Task):
def main(self, client):
images = client.nova_client.images.list()
return [make_image_dict(image) for image in images]
class FindImageTask(Task):
def main(self, client):
image = client.nova_client.images.find(**self.args)
return dict(id=str(image.id))
class DeleteImageTask(Task):
def main(self, client):
client.nova_client.images.delete(**self.args)
class FindNetworkTask(Task):
def main(self, client):
for network in client.neutron_client.list_networks()['networks']:
if self.args['label'] == network['name']:
return dict(id=str(network['id']))
class ProviderManager(TaskManager):
log = logging.getLogger("nodepool.ProviderManager")
def __init__(self, provider):
super(ProviderManager, self).__init__(None, provider.name,
provider.rate)
self.provider = provider
self.resetClient()
self._images = {}
self._networks = {}
self._cloud_metadata_read = False
self.__flavors = {}
self.__extensions = {}
self._servers = []
self._servers_time = 0
self._servers_lock = threading.Lock()
self._ips = []
self._ips_time = 0
self._ips_lock = threading.Lock()
@property
def _flavors(self):
if not self._cloud_metadata_read:
self._getCloudMetadata()
return self.__flavors
@property
def _extensions(self):
if not self._cloud_metadata_read:
self._getCloudMetadata()
return self.__extensions
def _getCloudMetadata(self):
self.__flavors = self._getFlavors()
self.__extensions = self.listExtensions()
self._cloud_metadata_read = True
def _getClient(self):
return shade.OpenStackCloud(
cloud_config=self.provider.cloud_config,
**self.provider.cloud_config.config)
def runTask(self, task):
try:
task.run(self._client)
except requests.exceptions.ProxyError:
# Try to get a new client object if we get a ProxyError
self.log.exception('Resetting client due to ProxyError')
self.resetClient()
try:
task.run(self._client)
except requests.exceptions.ProxyError as e:
# If we get a second ProxyError, then make sure it gets raised
# the same way all other Exceptions from the Task object do.
# This will move the Exception to the main thread.
task.exception(e, sys.exc_info()[2])
def resetClient(self):
self._client = self._getClient()
def _getFlavors(self):
flavors = self.listFlavors()
flavors.sort(lambda a, b: cmp(a['ram'], b['ram']))
return flavors
def hasExtension(self, extension):
# Note: this will throw an error if the provider is offline
# but all the callers are in threads so the mainloop won't be affected.
if extension in self._extensions:
return True
return False
def findFlavor(self, min_ram, name_filter=None):
# Note: this will throw an error if the provider is offline
# but all the callers are in threads (they call in via CreateServer) so
# the mainloop won't be affected.
for f in self._flavors:
if (f['ram'] >= min_ram
and (not name_filter or name_filter in f['name'])):
return f
raise Exception("Unable to find flavor with min ram: %s" % min_ram)
def findImage(self, name):
if name in self._images:
return self._images[name]
image = self.submitTask(FindImageTask(name=name))
self._images[name] = image
return image
def findNetwork(self, label):
if label in self._networks:
return self._networks[label]
network = self.submitTask(FindNetworkTask(label=label))
self._networks[label] = network
return network
def deleteImage(self, name):
if name in self._images:
del self._images[name]
return self.submitTask(DeleteImageTask(image=name))
def addKeypair(self, name):
key = paramiko.RSAKey.generate(2048)
public_key = key.get_name() + ' ' + key.get_base64()
self.submitTask(AddKeypairTask(name=name, public_key=public_key))
return key
def listKeypairs(self):
return self.submitTask(ListKeypairsTask())
def deleteKeypair(self, name):
return self.submitTask(DeleteKeypairTask(name=name))
def createServer(self, name, min_ram, image_id=None, image_name=None,
az=None, key_name=None, name_filter=None,
config_drive=None, nodepool_node_id=None,
nodepool_image_name=None,
nodepool_snapshot_image_id=None):
if image_name:
image_id = self.findImage(image_name)['id']
flavor = self.findFlavor(min_ram, name_filter)
create_args = dict(name=name, image=image_id, flavor=flavor['id'],
config_drive=config_drive)
if key_name:
create_args['key_name'] = key_name
if az:
create_args['availability_zone'] = az
if self.provider.use_neutron:
nics = []
for network in self.provider.networks:
if 'net-id' in network:
nics.append({'net-id': network['net-id']})
elif 'net-label' in network:
net_id = self.findNetwork(network['net-label'])['id']
nics.append({'net-id': net_id})
else:
raise Exception("Invalid 'networks' configuration.")
create_args['nics'] = nics
# Put provider.name and image_name in as groups so that ansible
# inventory can auto-create groups for us based on each of those
# qualities
# Also list each of those values directly so that non-ansible
# consumption programs don't need to play a game of knowing that
# groups[0] is the image name or anything silly like that.
nodepool_meta = dict(provider_name=self.provider.name)
groups_meta = [self.provider.name]
if nodepool_node_id:
nodepool_meta['node_id'] = nodepool_node_id
if nodepool_snapshot_image_id:
nodepool_meta['snapshot_image_id'] = nodepool_snapshot_image_id
if nodepool_image_name:
nodepool_meta['image_name'] = nodepool_image_name
groups_meta.append(nodepool_image_name)
create_args['meta'] = dict(
groups=json.dumps(groups_meta),
nodepool=json.dumps(nodepool_meta)
)
return self.submitTask(CreateServerTask(**create_args))
def getServer(self, server_id):
return self.submitTask(GetServerTask(server_id=server_id))
def getFloatingIP(self, ip_id):
return self.submitTask(GetFloatingIPTask(ip_id=ip_id))
def getServerFromList(self, server_id):
for s in self.listServers():
if s['id'] == server_id:
return s
raise NotFound()
def _waitForResource(self, resource_type, resource_id, timeout):
last_status = None
for count in iterate_timeout(timeout,
"%s %s in %s" % (resource_type,
resource_id,
self.provider.name)):
try:
if resource_type == 'server':
resource = self.getServerFromList(resource_id)
elif resource_type == 'image':
resource = self.getImage(resource_id)
except NotFound:
continue
except ManagerStoppedException:
raise
except Exception:
self.log.exception('Unable to list %ss while waiting for '
'%s will retry' % (resource_type,
resource_id))
continue
status = resource.get('status')
if (last_status != status):
self.log.debug(
'Status of {type} in {provider} {id}: {status}'.format(
type=resource_type,
provider=self.provider.name,
id=resource_id,
status=status))
last_status = status
if status in ['ACTIVE', 'ERROR']:
return resource
def waitForServer(self, server_id, timeout=3600):
return self._waitForResource('server', server_id, timeout)
def waitForServerDeletion(self, server_id, timeout=600):
for count in iterate_timeout(600, "server %s deletion in %s" %
(server_id, self.provider.name)):
try:
self.getServerFromList(server_id)
except NotFound:
return
def waitForImage(self, image_id, timeout=3600):
# TODO(mordred): This should just be handled by the Fake, but we're
# not quite plumbed through for that yet
if image_id == 'fake-glance-id':
return True
return self._waitForResource('image', image_id, timeout)
def createFloatingIP(self, pool=None):
return self.submitTask(CreateFloatingIPTask(pool=pool))
def addFloatingIP(self, server_id, address):
self.submitTask(AddFloatingIPTask(server=server_id,
address=address))
def addPublicIP(self, server_id, pool=None):
ip = self.createFloatingIP(pool)
try:
self.addFloatingIP(server_id, ip['ip'])
except novaclient.exceptions.ClientException:
# Delete the floating IP here as cleanupServer will not
# have access to the ip -> server mapping preventing it
# from removing this IP.
self.deleteFloatingIP(ip['id'])
raise
for count in iterate_timeout(600, "ip to be added to %s in %s" %
(server_id, self.provider.name)):
try:
newip = self.getFloatingIP(ip['id'])
except ManagerStoppedException:
raise
except Exception:
self.log.exception('Unable to get IP details for server %s, '
'will retry' % (server_id))
continue
if newip['instance_id'] == server_id:
return newip['ip']
def createImage(self, server_id, image_name, meta):
return self.submitTask(CreateImageTask(server=server_id,
image_name=image_name,
metadata=meta))
def getImage(self, image_id):
return self.submitTask(GetImageTask(image=image_id))
def uploadImage(self, image_name, filename, disk_format, container_format,
meta):
# configure glance and upload image. Note the meta flags
# are provided as custom glance properties
# NOTE: we have wait=True set here. This is not how we normally
# do things in nodepool, preferring to poll ourselves thankyouverymuch.
# However - two things to note:
# - glance v1 has no aysnc mechanism, so we have to handle it anyway
# - glance v2 waiting is very strange and complex - but we have to
# block for our v1 clouds anyway, so we might as well
# have the interface be the same and treat faking-out
# a shade-level fake-async interface later
image = self._client.create_image(
name=image_name,
filename='%s.%s' % (filename, disk_format),
is_public=False,
disk_format=disk_format,
container_format=container_format,
wait=True,
**meta)
return image.id
def listExtensions(self):
return self.submitTask(ListExtensionsTask())
def listImages(self):
return self.submitTask(ListImagesTask())
def listFlavors(self):
return self.submitTask(ListFlavorsTask())
def listFloatingIPs(self):
if time.time() - self._ips_time >= IPS_LIST_AGE:
if self._ips_lock.acquire(False):
try:
self._ips = self.submitTask(ListFloatingIPsTask())
self._ips_time = time.time()
finally:
self._ips_lock.release()
return self._ips
def removeFloatingIP(self, server_id, address):
return self.submitTask(RemoveFloatingIPTask(server=server_id,
address=address))
def deleteFloatingIP(self, ip_id):
return self.submitTask(DeleteFloatingIPTask(ip_id=ip_id))
def listServers(self, cache=True):
if (not cache or
time.time() - self._servers_time >= SERVER_LIST_AGE):
# Since we're using cached data anyway, we don't need to
# have more than one thread actually submit the list
# servers task. Let the first one submit it while holding
# a lock, and the non-blocking acquire method will cause
# subsequent threads to just skip this and use the old
# data until it succeeds.
if self._servers_lock.acquire(False):
try:
self._servers = self.submitTask(ListServersTask())
self._servers_time = time.time()
finally:
self._servers_lock.release()
return self._servers
def deleteServer(self, server_id):
return self.submitTask(DeleteServerTask(server_id=server_id))
def cleanupServer(self, server_id):
done = False
while not done:
try:
server = self.getServerFromList(server_id)
done = True
except NotFound:
# If we have old data, that's fine, it should only
# indicate that a server exists when it doesn't; we'll
# recover from that. However, if we have no data at
# all, wait until the first server list task
# completes.
if self._servers_time == 0:
time.sleep(SERVER_LIST_AGE + 1)
else:
done = True
# This will either get the server or raise an exception
server = self.getServerFromList(server_id)
if self.hasExtension('os-floating-ips'):
for ip in self.listFloatingIPs():
if ip['instance_id'] == server_id:
self.log.debug('Deleting floating ip for server %s' %
server_id)
self.deleteFloatingIP(ip['id'])
if (self.hasExtension('os-keypairs') and
server['key_name'] != self.provider.keypair):
for kp in self.listKeypairs():
if kp['name'] == server['key_name']:
self.log.debug('Deleting keypair for server %s' %
server_id)
self.deleteKeypair(kp['name'])
self.log.debug('Deleting server %s' % server_id)
self.deleteServer(server_id)
|
apache-2.0
| 4,222,597,944,849,373,000 | 35.418417 | 79 | 0.576764 | false |
a10networks/acos-client
|
acos_client/v30/slb/virtual_server.py
|
1
|
5582
|
# Copyright 2014-2016, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from acos_client.v30 import base
from acos_client.v30.slb.virtual_port import VirtualPort
class VirtualServer(base.BaseV30):
url_prefix = '/slb/virtual-server/'
@property
def vport(self):
return VirtualPort(self.client)
def all(self):
return self._get(self.url_prefix)
def get(self, name):
return self._get(self.url_prefix + name)
def _set(self, name, ip_address=None, arp_disable=False, description=None, vrid=None,
virtual_server_templates=None, template_virtual_server=None,
port_list=None, status=None, **kwargs):
params = {
"virtual-server": self.minimal_dict({
"name": name,
"ip-address": ip_address,
"arp-disable": None if arp_disable is None else int(arp_disable),
"description": description,
"port-list": port_list
}),
}
if self._is_ipv6(ip_address):
params['virtual-server']['ipv6-address'] = ip_address
else:
params['virtual-server']['ip-address'] = ip_address
if description:
params['virtual-server']['description'] = description
else:
params['virtual-server']['description'] = None
if vrid:
params['virtual-server']['vrid'] = int(vrid)
if virtual_server_templates:
virtual_server_templates = {k: v for k, v in virtual_server_templates.items() if v}
params['virtual-server']['template-virtual-server'] = \
virtual_server_templates.get('template-virtual-server', None)
params['virtual-server']['template-logging'] = virtual_server_templates.get('template-logging', None)
params['virtual-server']['template-policy'] = virtual_server_templates.get('template-policy', None)
params['virtual-server']['template-scaleout'] = virtual_server_templates.get('template-scaleout', None)
# for backward compatibility
if template_virtual_server:
params['virtual-server']['template-virtual-server'] = str(template_virtual_server)
return params
def create(self, name, ip_address, arp_disable=False, description=None, vrid=None,
virtual_server_templates=None, template_virtual_server=None,
port_list=None, max_retries=None, timeout=None, status=None, **kwargs):
params = self._set(name, ip_address, arp_disable=arp_disable, description=description,
vrid=vrid, virtual_server_templates=virtual_server_templates,
template_virtual_server=template_virtual_server,
port_list=port_list, status=status, **kwargs)
return self._post(self.url_prefix, params, max_retries=max_retries, timeout=timeout, axapi_args=kwargs)
def update(self, name, ip_address=None, arp_disable=False, description=None, vrid=None,
virtual_server_templates=None, template_virtual_server=None,
port_list=None, max_retries=None, timeout=None, status=None, **kwargs):
params = self._set(name, ip_address, arp_disable=arp_disable, description=description,
vrid=vrid, virtual_server_templates=virtual_server_templates,
template_virtual_server=template_virtual_server,
port_list=port_list, status=status, **kwargs)
return self._post(self.url_prefix + name, params, max_retries=max_retries, timeout=timeout,
axapi_args=kwargs)
def replace(self, name, ip_address=None, arp_disable=False, description=None, vrid=None,
virtual_server_templates=None, template_virtual_server=None,
port_list=None, max_retries=None, timeout=None, status=None, **kwargs):
params = self._set(name, ip_address, arp_disable=arp_disable, description=description,
vrid=vrid, virtual_server_templates=virtual_server_templates,
template_virtual_server=template_virtual_server,
port_list=port_list, status=status, **kwargs)
return self._put(self.url_prefix + name, params, max_retries=max_retries, timeout=timeout,
axapi_args=kwargs)
def delete(self, name):
return self._delete(self.url_prefix + name)
def stats(self, name='', max_retries=None, timeout=None, **kwargs):
resp = self._get(self.url_prefix + name + '/port/stats', max_retries=max_retries,
timeout=timeout, axapi_args=kwargs)
return resp
def oper(self, name='', max_retries=None, timeout=None, **kwargs):
resp = self._get(self.url_prefix + name + '/oper', max_retries=max_retries,
timeout=timeout, axapi_args=kwargs)
return resp
|
apache-2.0
| -5,504,389,639,245,075,000 | 48.839286 | 115 | 0.626478 | false |
tumluliu/tracks-rest-api
|
tracksdb.py
|
1
|
3636
|
"""
ORM definitions for mapping tracks data stored in PostgreSQL database
"""
from sqlalchemy import create_engine, Column, Integer, Float, String
from sqlalchemy.dialects.postgresql import INTERVAL, TIMESTAMP
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.engine.url import URL
from geoalchemy2 import Geometry
from geoalchemy2.functions import ST_AsGeoJSON as st_asgeojson
from settings import PG_DB_CONF
import json
import logging
logger = logging.getLogger(__name__)
engine = create_engine(URL(**PG_DB_CONF))
Base = declarative_base(bind=engine)
Session = scoped_session(sessionmaker(engine))
class Track(Base):
__tablename__ = 'tracks'
ogc_fid = Column(Integer, primary_key=True)
gpx_id = Column(Integer)
name = Column(String)
cmt = Column(String)
desc = Column(String)
src = Column(String)
number = Column(Integer)
wkb_geometry = Column(Geometry(geometry_type='MULTILINESTRING', srid=4326))
class TrackInfo(Base):
__tablename__ = 'trackinfo'
ogc_fid = Column(Integer, primary_key=True)
gpx_id = Column(Integer)
segments = Column(Integer)
length_2d = Column(Float)
length_3d = Column(Float)
moving_time = Column(INTERVAL)
stopped_time = Column(INTERVAL)
max_speed = Column(Float)
uphill = Column(Float)
downhill = Column(Float)
started = Column(TIMESTAMP)
ended = Column(TIMESTAMP)
points = Column(Integer)
start_lon = Column(Float)
start_lat = Column(Float)
end_lon = Column(Float)
end_lat = Column(Float)
#start_geom = Column(Geometry(geometry_type='POINT', srid=4326))
#end_geom = Column(Geometry(geometry_type='POINT', srid=4326))
def track_serializer(instance):
track_dict = {}
track_dict['ID'] = instance.ogc_fid
track_dict['GPX ID'] = instance.gpx_id
track_dict['Name'] = instance.name
track_dict['CMT'] = instance.cmt
track_dict['Description'] = instance.desc
track_dict['Source'] = instance.src
track_dict['Number'] = instance.number
track_dict['GeoJSON'] = json.loads(
Session.scalar(st_asgeojson(instance.wkb_geometry)))
logger.debug("Serialized track: %s", track_dict)
return track_dict
def trackinfo_serializer(instance):
ti_dict = {}
ti_dict['ID'] = instance.ogc_fid
ti_dict['GPX ID'] = instance.gpx_id
ti_dict['Segments'] = instance.segments
ti_dict['2D length'] = instance.length_2d
ti_dict['3D length'] = instance.length_3d
ti_dict['Moving time'] = str(instance.moving_time)
ti_dict['Stopped time'] = str(instance.stopped_time)
ti_dict['Max speed'] = instance.max_speed
ti_dict['Uphill'] = instance.uphill
ti_dict['Downhill'] = instance.downhill
ti_dict['Started at'] = str(instance.started)
ti_dict['Ended at'] = str(instance.ended)
ti_dict['Points'] = instance.points
ti_dict['Start lon'] = instance.start_lon
ti_dict['Start lat'] = instance.start_lat
ti_dict['End lon'] = instance.end_lon
ti_dict['End lat'] = instance.end_lat
#ti_dict['Start point GeoJSON'] = json.loads(
#Session.scalar(st_asgeojson(instance.start_geom)))
#ti_dict['End point GeoJSON'] = json.loads(
#Session.scalar(st_asgeojson(instance.end_geom)))
logger.debug("Serialized trackinfo: %s", ti_dict)
return ti_dict
|
mit
| -7,546,321,651,937,122,000 | 37.680851 | 80 | 0.637514 | false |
fabiking/plugin.video.fabiking
|
resources/tools/epg_formulatv.py
|
1
|
24493
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# PalcoTV EPG FórmulaTV.com
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
#------------------------------------------------------------
# Gracias a la librería plugintools de Jesús (www.mimediacenter.info)
#------------------------------------------------------------
import os
import sys
import urllib
import urllib2
import re
import shutil
import zipfile
import time
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
import time
from datetime import datetime
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
tmp = xbmc.translatePath(os.path.join('special://userdata/playlists/tmp', ''))
LIST = "list"
THUMBNAIL = "thumbnail"
MOVIES = "movies"
TV_SHOWS = "tvshows"
SEASONS = "seasons"
EPISODES = "episodes"
FANART = "fanart"
OTHER = "other"
MUSIC = "music"
def epg_ftv(title):
plugintools.log('[%s %s].epg_ftv %s' % (addonName, addonVersion, title))
channel = title.lower()
channel = channel.replace("Opción 1", "").replace("HD", "").replace("720p", "").replace("1080p", "").replace("SD", "").replace("HQ", "").replace("LQ", "").strip()
channel = channel.replace("Opción 2", "")
channel = channel.replace("Opción 3", "")
channel = channel.replace("Op. 1", "")
channel = channel.replace("Op. 2", "")
channel = channel.replace("Op. 3", "")
plugintools.log("Canal: "+channel)
params = plugintools.get_params()
params["url"]='http://www.formulatv.com/programacion/'
if channel == "la 1" or channel == "la 1 hd":
channel = "la 1"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "la 2":
channel = "la 2"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "antena 3" or channel == "antena 3 hd":
channel = "antena 3 televisión"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cuatro" or channel == "cuatro hd":
channel = "cuatro"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "telecinco hd" or channel == "telecinco":
channel == "telecinco"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "la sexta" or channel == "la sexta hd":
channel = "lasexta"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+1" or channel == "canal+ 1" or channel == "canal plus" or channel == "canal+ hd":
channel = "canal+1"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+2" or channel == "canal+ 2" or channel == "canal plus 2" or channel == "canal+ 2 hd":
channel = "canal+ 2"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ 1 ...30" or channel == "canal+ 1... 30":
channel = "canal+ 1 ...30"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ series":
channel = "canal+ series"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "goltv" or channel == "golt":
channel = "gol televisión"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "40 TV":
channel = "40 tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal sur" or channel == "andalucia tv":
channel = "canal sur"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "aragón tv" or channel == "aragon tv":
channel = "aragon-television"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "axn" or channel == "axn hd":
channel = "axn"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "axn white":
channel = "axn white"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "xtrm":
channel = "xtrm"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "bio":
channel = "bio"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "calle 13" or channel == "calle 13 hd":
channel = "calle 13"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "amc" or channel == "amc españa":
channel = "amc (españa)"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal barça" or channel == "canal barca":
channel = "barça tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "andalucía tv" or channel == "andalucia tv":
channel = "andalucia-tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "aragón tv" or channel == "aragon tv":
channel = "aragon-television"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "axn" or channel == "axn hd":
channel = "axn"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "bio":
channel = "bio"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal barça" or channel == "canal barca":
channel = "canal barca"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ 30" or channel == "canal+ ...30" or channel == "canal plus 30":
channel = "canal+ 1... 30"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ accion" or channel == "canal+ acción" or channel=="canal plus accion":
channel = "canal+ acción"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ comedia" or channel == "canal plus comedia":
channel = "canal+ comedia"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ decine" or channel == "canal plus decine":
channel = "canal+ dcine"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ deporte" or channel == "canal plus deporte":
channel = "canal+ deporte"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ futbol" or channel == "canal+ fútbol" or channel == "canal plus fútbol" or channel == "canal plus futbol":
channel = "canal+ fútbol"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ liga":
channel = "canal+ liga"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ golf" or channel == "canal plus golf":
channel = "golf+"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ toros" or channel == "canal plus toros":
channel = "canal+ toros"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ extra" or channel=="canal+ xtra":
channel = "canal+ xtra"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal 33" or channel == "canal33":
channel = "canal33"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal cocina":
channel = "canal cocina"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cartoon network" or channel == "cartoon network hd":
channel = "cartoon network"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "castilla-la mancha televisión" or channel == "castilla-la mancha tv":
channel = "castilla-la-mancha"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "caza y pesca":
channel = "caza-y-pesca"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "clan" or channel == "clan tve 50" or channel == "clan tve":
channel = "clan tve"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "nickelodeon":
channel = "nickelodeon"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "boing":
channel = "boing"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cnbc":
channel = "cnbc"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cnn-international" or channel == "cnn int":
channel = "cnn international"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cosmopolitan" or channel == "cosmopolitan tv":
channel = "cosmopolitan"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "a&e" or channel == "a&e españa":
channel = "a&e españa"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal+ dcine" or channel == "canal plus dcine":
channel = "dcine espanol"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "decasa":
channel = "decasa"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "discovery channel":
channel = "discovery channel"
epg_channel = epg_formulatv(params, channel)
elif channel == "national geographic":
channel = "national geographic"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "discovery max":
channel = "discovery max"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "disney channel":
channel = "disney channel"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "disney-cinemagic":
channel = "disney cinemagic"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "disney xd":
channel = "disney xd"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "disney junior" or channel == "disney jr":
channel = "disney junior"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "divinity":
channel = "divinity"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "energy":
channel = "energy"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "etb1" or channel == "etb 1":
channel = "euskal telebista 1"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "etb 2" or channel == "etb2":
channel = "euskal telebista 1"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "factoría de ficción" or channel == "factoria de ficcion" or channel == "fdf":
channel = "fdf"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "buzz":
channel = "buzz"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "fox" or channel == "fox hd":
channel = "fox españa"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "fox life":
channel = "fox life"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "fox news":
channel = "fox news"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "historia" or channel == "historia hd":
channel = "canal de historia"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "natura" or channel == "canal natura":
channel = "canal natura"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "cosmopolitan" or channel == "cosmopolitan tv":
channel = "cosmopolitan"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "hollywood" or channel == "hollywood channel":
channel = "canal hollywood"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "ib3 televisio" or channel == "ib3 televisió":
channel = "ib3 televisio"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "intereconomia" or channel == "intereconomía" or channel == "intereconomía tv":
channel = "intereconomia"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "mtv" or channel == "mtv españa" or channel == "mtv espana":
channel = "mtv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "nat geo wild":
channel = "nat geo wild"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "neox":
channel = "neox"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "nick jr." or channel == "nick jr":
channel = "nick jr."
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "odisea" or channel == "odisea hd":
channel = "odisea"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "nova":
channel = "nova"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "panda":
channel = "panda"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "paramount channel":
channel = "paramount channel"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "playboy tv":
channel = "playboy tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "playhouse disney":
channel = "playhouse disney"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "rtv murcia 7" or channel == "radiotelevisión de murcia" or channel == "rtv murcia":
channel = "7 región de murcia"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "real madrid tv":
channel = "real madrid tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "syfy" or channel== "syfy españa":
channel = "syfy españa"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "sony entertainment":
channel = "sony entertainment"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "sportmania" or channel == "sportmania hd":
channel = "sportmania"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "tcm":
channel = "tcm"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "teledeporte" or channel == "intereconomía" or channel == "intereconomía tv":
channel = "teledeporte"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "telemadrid" or channel == "telemadrid hd":
channel = "telemadrid"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "televisión canaria" or channel == "televisión canaria":
channel = "television canaria"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "televisión de galicia" or channel == "television de galicia" or channel == "tvg":
channel = "tvg"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "tnt" or channel == "tnt hd":
channel = "tnt españa"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "tv3" or channel == "tv3 hd":
channel = "tv3"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "vh1":
channel = "vh1"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "viajar":
channel = "canal viajar"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "baby tv":
channel = "baby tv"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "canal panda":
channel = "canal panda"
epg_channel = epg_formulatv(params, channel)
return epg_channel
elif channel == "arenasports 1":
from resources.tools.epg_arenasport import *
epg_channel = epg_arena('http://tv.aladin.info/tv-program-arena-sport-1')
return epg_channel
elif channel == "arenasports 2":
from resources.tools.epg_arenasport import *
epg_channel = epg_arena('http://tv.aladin.info/tv-program-arena-sport-2')
return epg_channel
elif channel == "arenasports 3":
from resources.tools.epg_arenasport import *
epg_channel = epg_arena('http://tv.aladin.info/tv-program-arena-sport-3')
return epg_channel
elif channel == "arenasports 4":
from resources.tools.epg_arenasport import *
epg_channel = epg_arena('http://tv.aladin.info/tv-program-arena-sport-4')
return epg_channel
elif channel == "arenasports 5":
from resources.tools.epg_arenasport import *
epg_channel = epg_arena('http://tv.aladin.info/tv-program-arena-sport-5')
return epg_channel
elif channel == "sportklub 1" or channel == "sport klub 1":
from resources.tools.epg_arenasport import *
epg_channel = epg_arena('http://tv.aladin.info/tv-program-sport-klub-1')
return epg_channel
elif channel == "sportklub 2" or channel == "sport klub 2":
from resources.tools.epg_arenasport import *
epg_channel = epg_arena('http://tv.aladin.info/tv-program-sport-klub-2')
return epg_channel
else:
return False
def epg_formulatv(params, channel):
plugintools.log('[%s %s].epg_formulatv %s' % (addonName, addonVersion, repr(params)))
thumbnail = params.get("thumbnail")
fanart = params.get("extra")
canal_buscado = channel
canal_buscado= canal_buscado.replace(" hd", "")
epg_channel = []
params["plot"]=""
backup_ftv = tmp + 'backup_ftv.txt'
if os.path.exists(backup_ftv):
pass
else:
backup_epg = open(backup_ftv, "a")
data = plugintools.read(params.get("url"))
backup_epg.write(data)
backup_epg.close()
# Abrimos backup
backup_epg = open(backup_ftv, "r")
data = backup_epg.read()
#plugintools.log("data= "+data)
# Calculando hora actual
ahora = datetime.now()
minutejo = str(ahora.minute)
if ahora.minute < 10: # Añadimos un cero delante del minuto actual por si es inferior a la decena
minuto_ahora = '0'+str(ahora.minute)
else:
minuto_ahora = str(ahora.minute)
hora_ahora = str(ahora.hour)+":"+minuto_ahora
epg_channel.append(hora_ahora) # index 0
# Vamos a leer la fuente de datos
body = plugintools.find_multiple_matches(data, '<td class="prga-i">(.*?)</tr>')
for entry in body:
channel = plugintools.find_single_match(entry, 'alt=\"([^"]+)')
channel = channel.lower()
plugintools.log("Buscando canal: "+canal_buscado)
plugintools.log("Channel: "+channel)
if channel == canal_buscado:
print 'channel',channel
evento_ahora = plugintools.find_single_match(entry, '<p>(.*?)</p>')
epg_channel.append(evento_ahora) # index 1
hora_luego = plugintools.find_single_match(entry, 'class="fec1">(.*)</span>')
hora_luego = hora_luego.split("</span>")
hora_luego = hora_luego[0]
#print 'hora_luego',hora_luego
epg_channel.append(hora_luego) # index 2
diff_luego = plugintools.find_single_match(entry, 'class="fdiff">([^<]+)').strip()
#print 'diff_luego',diff_luego
epg_channel.append(diff_luego) # index 3
evento_luego = plugintools.find_single_match(entry, '<span class="tprg1">(.*?)</span>')
#print 'evento_luego',evento_luego
epg_channel.append(evento_luego) # index 4
hora_mastarde = plugintools.find_single_match(entry, 'class="fec2">(.*)</span>')
hora_mastarde = hora_mastarde.split("</span>")
hora_mastarde = hora_mastarde[0]
epg_channel.append(hora_mastarde) # index 5
evento_mastarde = plugintools.find_single_match(entry, '<span class="tprg2">(.*?)</span>')
#print 'evento_mastarde',evento_mastarde
epg_channel.append(evento_mastarde) # index 6
sinopsis = '[COLOR lightgreen][I]('+diff_luego+') [/I][/COLOR][COLOR white][B]'+hora_luego+' [/COLOR][/B]'+evento_luego+'[CR][COLOR white][B][CR]'+hora_mastarde+' [/COLOR][/B] '+evento_mastarde
plugintools.log("Sinopsis: "+sinopsis)
datamovie = {}
datamovie["Plot"]=sinopsis
#plugintools.add_item(action="", title= '[COLOR orange][B]'+channel+' [/B][COLOR lightyellow]'+ahora+'[/COLOR] [COLOR lightgreen][I]('+diff_luego+') [/I][/COLOR][COLOR white][B]'+hora_luego+' [/COLOR][/B] '+evento_luego, info_labels = datamovie , thumbnail = thumbnail , fanart = fanart , folder = False, isPlayable = False)
#plugintools.log("entry= "+entry)
return epg_channel
# Petición de la URL
def gethttp_headers(params):
plugintools.log('[%s %s].gethttp_headers %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
request_headers.append(["Referer",'http://www.digitele.com/pluginfiles/canales/'])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
plugintools.log("body= "+body)
return body
|
gpl-2.0
| -6,917,189,217,825,973,000 | 41.670157 | 336 | 0.590798 | false |
j00bar/django-widgy
|
widgy/contrib/urlconf_include/tests.py
|
1
|
5207
|
import imp
import django
from django.test import TestCase
from django.test.client import RequestFactory
from django.utils.decorators import decorator_from_middleware
from django.http import HttpResponse, HttpResponseNotFound
from django.core import urlresolvers
from django.contrib.auth.models import AnonymousUser
from django.conf.urls import include, url, patterns
from widgy.contrib.urlconf_include.middleware import PatchUrlconfMiddleware
from widgy.contrib.urlconf_include.models import UrlconfIncludePage
patch_decorator = decorator_from_middleware(PatchUrlconfMiddleware)
@patch_decorator
def plain_view(request):
return HttpResponse('')
@patch_decorator
def view_that_resolves(request, login_url):
# Use request.urlconf because we're mocking everything. BaseHandler
# would call set_urlconf if we were making a real request.
from django.contrib.auth.views import login as login_view
match = urlresolvers.resolve(login_url, request.urlconf)
assert match.func == login_view
return HttpResponse('')
@patch_decorator
def view_that_reverses(request, desired):
assert urlresolvers.reverse('login', request.urlconf) == desired
return HttpResponse('')
@patch_decorator
def view_not_found(request):
return HttpResponseNotFound('')
@patch_decorator
def view_that_switches_urlconf(request, login_url):
urlresolvers.resolve(login_url, request.urlconf)
new_urlconf = imp.new_module('urlconf')
new_urlconf.urlpatterns = patterns('', url(r'^bar/', include('django.contrib.auth.urls')))
request.urlconf = new_urlconf
urlresolvers.resolve('/bar/login/', request.urlconf)
return HttpResponse('')
class TestMiddleware(TestCase):
def setUp(self):
self.factory = RequestFactory()
if django.VERSION > (1, 7):
def resolver_cache_size(self):
return urlresolvers.get_resolver.cache_info().currsize
else:
def resolver_cache_size(self):
return len(urlresolvers._resolver_cache)
def get_request(self, path='/'):
r = self.factory.get(path)
r.user = AnonymousUser()
return r
def test_noresolve(self):
# It's helpful to test a view that does no resolving, because
# uncache_urlconf needs to catch KeyError.
plain_view(self.get_request())
def do_test_memory_leak(self, doit):
UrlconfIncludePage.objects.create(
slug='foo',
urlconf_name='django.contrib.auth.urls',
)
# warmup
doit()
doit()
n = self.resolver_cache_size()
doit()
doit()
n_after = self.resolver_cache_size()
self.assertEqual(n, n_after)
def test_memory_leak(self):
def doit():
view_that_resolves(self.get_request(), login_url='/foo/login/')
self.do_test_memory_leak(doit)
def test_memory_leak_404(self):
def doit():
view_not_found(self.get_request('/asdf/asdfasdf/'))
self.do_test_memory_leak(doit)
def test_memory_leak_urlconf_replaced(self):
def doit():
view_that_switches_urlconf(self.get_request(), '/foo/login/')
self.do_test_memory_leak(doit)
def test_change_url(self):
page = UrlconfIncludePage.objects.create(
slug='foo',
urlconf_name='django.contrib.auth.urls',
)
view_that_resolves(self.get_request(), login_url='/foo/login/')
view_that_reverses(self.get_request(), desired='/foo/login/')
page.slug = 'bar'
page.save()
view_that_reverses(self.get_request(), desired='/bar/login/')
view_that_resolves(self.get_request(), login_url='/bar/login/')
def test_login_required(self):
UrlconfIncludePage.objects.create(
slug='foo',
urlconf_name='django.contrib.auth.urls',
login_required=True,
)
with self.assertRaises(urlresolvers.Resolver404):
view_that_resolves(self.get_request(), login_url='404')
r = self.get_request()
r.user.is_authenticated = lambda: True
view_that_resolves(r, login_url='/foo/login/')
def test_login_redirect(self):
UrlconfIncludePage.objects.create(
slug='foo',
urlconf_name='django.contrib.auth.urls',
login_required=True,
)
# request for good url redirects to login
r = self.get_request('/foo/login/')
resp = view_not_found(r)
self.assertEqual(resp.status_code, 302)
# request for 404 url stays 404
r = self.get_request('/foo/nonexistentasdf/')
resp = view_not_found(r)
self.assertEqual(resp.status_code, 404)
def test_login_required_legit_404_shouldnt_redirect(self):
"""
We should only redirect to login if there was no urlpattern that
matched, not for other types of 404s.
"""
UrlconfIncludePage.objects.create(
slug='foo',
urlconf_name='django.contrib.auth.urls',
login_required=False,
)
r = self.get_request('/foo/login/')
resp = view_not_found(r)
self.assertEqual(resp.status_code, 404)
|
apache-2.0
| 7,117,233,488,581,188,000 | 30.36747 | 94 | 0.645861 | false |
raulperula/python_tutorials
|
tutorial-pygame/src/example18.py
|
1
|
2277
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.utils.termcolors import background
__author__ = "Raul Perula-Martinez"
__date__ = "2015-01"
__version__ = "$ Revision: 1.0 $"
import pygame
class Player(pygame.sprite.Sprite):
def __init__(self, image):
self.image = image
self.rect = self.image.get_rect()
self.rect.top, self.rect.left = 200, 200
def move(self, vel_x, vel_y):
self.rect.move_ip(vel_x, vel_y)
def update(self, surface):
surface.blit(self.image, self.rect)
def main():
pygame.init()
# set window size
screen1 = pygame.display.set_mode([400, 400])
# clock
clock1 = pygame.time.Clock()
# exit (quit)
quit_cond = False
image_player = pygame.image.load(
"../media/images/stick.png").convert_alpha()
image_bg = pygame.image.load(
"../media/images/background.png").convert_alpha()
player1 = Player(image_player)
vel_x, vel_y = 0, 0
velocity = 10
# main loop
while not quit_cond:
# check events
for event in pygame.event.get():
# detect a QUIT event
if event.type == pygame.QUIT:
quit_cond = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
vel_x = -velocity
elif event.key == pygame.K_RIGHT:
vel_x = velocity
elif event.key == pygame.K_UP:
vel_y = -velocity
elif event.key == pygame.K_DOWN:
vel_y = velocity
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
vel_x = 0
elif event.key == pygame.K_RIGHT:
vel_x = 0
elif event.key == pygame.K_UP:
vel_y = 0
elif event.key == pygame.K_DOWN:
vel_y = 0
# update clock
clock1.tick(20)
# screen background
screen1.blit(image_bg, (0, 0))
# player
player1.move(vel_x, vel_y)
player1.update(screen1)
# refresh the window
pygame.display.update()
# close the window
pygame.quit()
# Main
main()
|
gpl-3.0
| -8,708,156,146,956,355,000 | 24.021978 | 57 | 0.515591 | false |
jelly/calibre
|
src/calibre/ebooks/oeb/reader.py
|
1
|
28670
|
"""
Container-/OPF-based input OEBBook reader.
"""
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
import sys, os, uuid, copy, re, cStringIO
from itertools import izip
from urlparse import urldefrag, urlparse
from urllib import unquote as urlunquote
from collections import defaultdict
from lxml import etree
from calibre.ebooks.oeb.base import OPF1_NS, OPF2_NS, OPF2_NSMAP, DC11_NS, \
DC_NSES, OPF, xml2text, XHTML_MIME
from calibre.ebooks.oeb.base import OEB_DOCS, OEB_STYLES, OEB_IMAGES, \
PAGE_MAP_MIME, JPEG_MIME, NCX_MIME, SVG_MIME
from calibre.ebooks.oeb.base import XMLDECL_RE, COLLAPSE_RE, \
MS_COVER_TYPE, iterlinks
from calibre.ebooks.oeb.base import namespace, barename, XPath, xpath, \
urlnormalize, BINARY_MIME, \
OEBError, OEBBook, DirContainer
from calibre.ebooks.oeb.writer import OEBWriter
from calibre.utils.cleantext import clean_xml_chars
from calibre.utils.localization import get_lang
from calibre.ptempfile import TemporaryDirectory
from calibre.constants import __appname__, __version__
from calibre import guess_type, xml_replace_entities
__all__ = ['OEBReader']
class OEBReader(object):
"""Read an OEBPS 1.x or OPF/OPS 2.0 file collection."""
COVER_SVG_XP = XPath('h:body//svg:svg[position() = 1]')
COVER_OBJECT_XP = XPath('h:body//h:object[@data][position() = 1]')
Container = DirContainer
"""Container type used to access book files. Override in sub-classes."""
DEFAULT_PROFILE = 'PRS505'
"""Default renderer profile for content read with this Reader."""
TRANSFORMS = []
"""List of transforms to apply to content read with this Reader."""
@classmethod
def config(cls, cfg):
"""Add any book-reading options to the :class:`Config` object
:param:`cfg`.
"""
return
@classmethod
def generate(cls, opts):
"""Generate a Reader instance from command-line options."""
return cls()
def __call__(self, oeb, path):
"""Read the book at :param:`path` into the :class:`OEBBook` object
:param:`oeb`.
"""
self.oeb = oeb
self.logger = self.log = oeb.logger
oeb.container = self.Container(path, self.logger)
oeb.container.log = oeb.log
opf = self._read_opf()
self._all_from_opf(opf)
return oeb
def _clean_opf(self, opf):
nsmap = {}
for elem in opf.iter(tag=etree.Element):
nsmap.update(elem.nsmap)
for elem in opf.iter(tag=etree.Element):
if namespace(elem.tag) in ('', OPF1_NS) and ':' not in barename(elem.tag):
elem.tag = OPF(barename(elem.tag))
nsmap.update(OPF2_NSMAP)
attrib = dict(opf.attrib)
nroot = etree.Element(OPF('package'),
nsmap={None: OPF2_NS}, attrib=attrib)
metadata = etree.SubElement(nroot, OPF('metadata'), nsmap=nsmap)
ignored = (OPF('dc-metadata'), OPF('x-metadata'))
for elem in xpath(opf, 'o2:metadata//*'):
if elem.tag in ignored:
continue
if namespace(elem.tag) in DC_NSES:
tag = barename(elem.tag).lower()
elem.tag = '{%s}%s' % (DC11_NS, tag)
if elem.tag.startswith('dc:'):
tag = elem.tag.partition(':')[-1].lower()
elem.tag = '{%s}%s' % (DC11_NS, tag)
metadata.append(elem)
for element in xpath(opf, 'o2:metadata//o2:meta'):
metadata.append(element)
for tag in ('o2:manifest', 'o2:spine', 'o2:tours', 'o2:guide'):
for element in xpath(opf, tag):
nroot.append(element)
return nroot
def _read_opf(self):
data = self.oeb.container.read(None)
data = self.oeb.decode(data)
data = XMLDECL_RE.sub('', data)
data = re.sub(r'http://openebook.org/namespaces/oeb-package/1.0(/*)',
OPF1_NS, data)
try:
opf = etree.fromstring(data)
except etree.XMLSyntaxError:
data = xml_replace_entities(clean_xml_chars(data), encoding=None)
try:
opf = etree.fromstring(data)
self.logger.warn('OPF contains invalid HTML named entities')
except etree.XMLSyntaxError:
data = re.sub(r'(?is)<tours>.+</tours>', '', data)
data = data.replace('<dc-metadata>',
'<dc-metadata xmlns:dc="http://purl.org/metadata/dublin_core">')
try:
opf = etree.fromstring(data)
self.logger.warn('OPF contains invalid tours section')
except etree.XMLSyntaxError:
from calibre.ebooks.oeb.parse_utils import RECOVER_PARSER
opf = etree.fromstring(data, parser=RECOVER_PARSER)
self.logger.warn('OPF contains invalid markup, trying to parse it anyway')
ns = namespace(opf.tag)
if ns not in ('', OPF1_NS, OPF2_NS):
raise OEBError('Invalid namespace %r for OPF document' % ns)
opf = self._clean_opf(opf)
return opf
def _metadata_from_opf(self, opf):
from calibre.ebooks.metadata.opf2 import OPF
from calibre.ebooks.oeb.transforms.metadata import meta_info_to_oeb_metadata
stream = cStringIO.StringIO(etree.tostring(opf, xml_declaration=True, encoding='utf-8'))
o = OPF(stream)
pwm = o.primary_writing_mode
if pwm:
self.oeb.metadata.primary_writing_mode = pwm
mi = o.to_book_metadata()
if not mi.language:
mi.language = get_lang().replace('_', '-')
self.oeb.metadata.add('language', mi.language)
if not mi.book_producer:
mi.book_producer = '%(a)s (%(v)s) [http://%(a)s-ebook.com]'%\
dict(a=__appname__, v=__version__)
meta_info_to_oeb_metadata(mi, self.oeb.metadata, self.logger)
m = self.oeb.metadata
m.add('identifier', str(uuid.uuid4()), id='uuid_id', scheme='uuid')
self.oeb.uid = self.oeb.metadata.identifier[-1]
if not m.title:
m.add('title', self.oeb.translate(__('Unknown')))
has_aut = False
for x in m.creator:
if getattr(x, 'role', '').lower() in ('', 'aut'):
has_aut = True
break
if not has_aut:
m.add('creator', self.oeb.translate(__('Unknown')), role='aut')
def _manifest_prune_invalid(self):
'''
Remove items from manifest that contain invalid data. This prevents
catastrophic conversion failure, when a few files contain corrupted
data.
'''
bad = []
check = OEB_DOCS.union(OEB_STYLES)
for item in list(self.oeb.manifest.values()):
if item.media_type in check:
try:
item.data
except KeyboardInterrupt:
raise
except:
self.logger.exception('Failed to parse content in %s'%
item.href)
bad.append(item)
self.oeb.manifest.remove(item)
return bad
def _manifest_add_missing(self, invalid):
import cssutils
manifest = self.oeb.manifest
known = set(manifest.hrefs)
unchecked = set(manifest.values())
cdoc = OEB_DOCS|OEB_STYLES
invalid = set()
while unchecked:
new = set()
for item in unchecked:
data = None
if (item.media_type in cdoc or
item.media_type[-4:] in ('/xml', '+xml')):
try:
data = item.data
except:
self.oeb.log.exception(u'Failed to read from manifest '
u'entry with id: %s, ignoring'%item.id)
invalid.add(item)
continue
if data is None:
continue
if (item.media_type in OEB_DOCS or
item.media_type[-4:] in ('/xml', '+xml')):
hrefs = [r[2] for r in iterlinks(data)]
for href in hrefs:
if isinstance(href, bytes):
href = href.decode('utf-8')
href, _ = urldefrag(href)
if not href:
continue
try:
href = item.abshref(urlnormalize(href))
scheme = urlparse(href).scheme
except:
self.oeb.log.exception(
'Skipping invalid href: %r'%href)
continue
if not scheme and href not in known:
new.add(href)
elif item.media_type in OEB_STYLES:
try:
urls = list(cssutils.getUrls(data))
except:
urls = []
for url in urls:
href, _ = urldefrag(url)
href = item.abshref(urlnormalize(href))
scheme = urlparse(href).scheme
if not scheme and href not in known:
new.add(href)
unchecked.clear()
warned = set([])
for href in new:
known.add(href)
is_invalid = False
for item in invalid:
if href == item.abshref(urlnormalize(href)):
is_invalid = True
break
if is_invalid:
continue
if not self.oeb.container.exists(href):
if href not in warned:
self.logger.warn('Referenced file %r not found' % href)
warned.add(href)
continue
if href not in warned:
self.logger.warn('Referenced file %r not in manifest' % href)
warned.add(href)
id, _ = manifest.generate(id='added')
guessed = guess_type(href)[0]
media_type = guessed or BINARY_MIME
added = manifest.add(id, href, media_type)
unchecked.add(added)
for item in invalid:
self.oeb.manifest.remove(item)
def _manifest_from_opf(self, opf):
manifest = self.oeb.manifest
for elem in xpath(opf, '/o2:package/o2:manifest/o2:item'):
id = elem.get('id')
href = elem.get('href')
media_type = elem.get('media-type', None)
if media_type is None:
media_type = elem.get('mediatype', None)
if not media_type or media_type == 'text/xml':
guessed = guess_type(href)[0]
media_type = guessed or media_type or BINARY_MIME
if hasattr(media_type, 'lower'):
media_type = media_type.lower()
fallback = elem.get('fallback')
if href in manifest.hrefs:
self.logger.warn(u'Duplicate manifest entry for %r' % href)
continue
if not self.oeb.container.exists(href):
self.logger.warn(u'Manifest item %r not found' % href)
continue
if id in manifest.ids:
self.logger.warn(u'Duplicate manifest id %r' % id)
id, href = manifest.generate(id, href)
manifest.add(id, href, media_type, fallback)
invalid = self._manifest_prune_invalid()
self._manifest_add_missing(invalid)
def _spine_add_extra(self):
manifest = self.oeb.manifest
spine = self.oeb.spine
unchecked = set(spine)
selector = XPath('h:body//h:a/@href')
extras = set()
while unchecked:
new = set()
for item in unchecked:
if item.media_type not in OEB_DOCS:
# TODO: handle fallback chains
continue
for href in selector(item.data):
href, _ = urldefrag(href)
if not href:
continue
try:
href = item.abshref(urlnormalize(href))
except ValueError: # Malformed URL
continue
if href not in manifest.hrefs:
continue
found = manifest.hrefs[href]
if found.media_type not in OEB_DOCS or \
found in spine or found in extras:
continue
new.add(found)
extras.update(new)
unchecked = new
version = int(self.oeb.version[0])
for item in sorted(extras):
if version >= 2:
self.logger.warn(
'Spine-referenced file %r not in spine' % item.href)
spine.add(item, linear=False)
def _spine_from_opf(self, opf):
spine = self.oeb.spine
manifest = self.oeb.manifest
for elem in xpath(opf, '/o2:package/o2:spine/o2:itemref'):
idref = elem.get('idref')
if idref not in manifest.ids:
self.logger.warn(u'Spine item %r not found' % idref)
continue
item = manifest.ids[idref]
if item.media_type.lower() in OEB_DOCS and hasattr(item.data, 'xpath'):
spine.add(item, elem.get('linear'))
else:
if hasattr(item.data, 'tag') and item.data.tag and item.data.tag.endswith('}html'):
item.media_type = XHTML_MIME
spine.add(item, elem.get('linear'))
else:
self.oeb.log.warn('The item %s is not a XML document.'
' Removing it from spine.'%item.href)
if len(spine) == 0:
raise OEBError("Spine is empty")
self._spine_add_extra()
for val in xpath(opf, '/o2:package/o2:spine/@page-progression-direction'):
if val in {'ltr', 'rtl'}:
spine.page_progression_direction = val
def _guide_from_opf(self, opf):
guide = self.oeb.guide
manifest = self.oeb.manifest
for elem in xpath(opf, '/o2:package/o2:guide/o2:reference'):
ref_href = elem.get('href')
path = urlnormalize(urldefrag(ref_href)[0])
if path not in manifest.hrefs:
corrected_href = None
for href in manifest.hrefs:
if href.lower() == path.lower():
corrected_href = href
break
if corrected_href is None:
self.logger.warn(u'Guide reference %r not found' % ref_href)
continue
ref_href = corrected_href
typ = elem.get('type')
if typ not in guide:
guide.add(typ, elem.get('title'), ref_href)
def _find_ncx(self, opf):
result = xpath(opf, '/o2:package/o2:spine/@toc')
if result:
id = result[0]
if id not in self.oeb.manifest.ids:
return None
item = self.oeb.manifest.ids[id]
self.oeb.manifest.remove(item)
return item
for item in self.oeb.manifest.values():
if item.media_type == NCX_MIME:
self.oeb.manifest.remove(item)
return item
return None
def _toc_from_navpoint(self, item, toc, navpoint):
children = xpath(navpoint, 'ncx:navPoint')
for child in children:
title = ''.join(xpath(child, 'ncx:navLabel/ncx:text/text()'))
title = COLLAPSE_RE.sub(' ', title.strip())
href = xpath(child, 'ncx:content/@src')
if not title:
self._toc_from_navpoint(item, toc, child)
continue
if (not href or not href[0]) and not xpath(child, 'ncx:navPoint'):
# This node is useless
continue
href = item.abshref(urlnormalize(href[0])) if href and href[0] else ''
path, _ = urldefrag(href)
if path and path not in self.oeb.manifest.hrefs:
path = urlnormalize(path)
if href and path not in self.oeb.manifest.hrefs:
self.logger.warn('TOC reference %r not found' % href)
gc = xpath(child, 'ncx:navPoint')
if not gc:
# This node is useless
continue
id = child.get('id')
klass = child.get('class', 'chapter')
try:
po = int(child.get('playOrder', self.oeb.toc.next_play_order()))
except:
po = self.oeb.toc.next_play_order()
authorElement = xpath(child,
'descendant::calibre:meta[@name = "author"]')
if authorElement:
author = authorElement[0].text
else:
author = None
descriptionElement = xpath(child,
'descendant::calibre:meta[@name = "description"]')
if descriptionElement:
description = etree.tostring(descriptionElement[0],
method='text', encoding=unicode).strip()
if not description:
description = None
else:
description = None
index_image = xpath(child,
'descendant::calibre:meta[@name = "toc_thumbnail"]')
toc_thumbnail = (index_image[0].text if index_image else None)
if not toc_thumbnail or not toc_thumbnail.strip():
toc_thumbnail = None
node = toc.add(title, href, id=id, klass=klass,
play_order=po, description=description, author=author,
toc_thumbnail=toc_thumbnail)
self._toc_from_navpoint(item, node, child)
def _toc_from_ncx(self, item):
if (item is None) or (item.data is None):
return False
self.log.debug('Reading TOC from NCX...')
ncx = item.data
title = ''.join(xpath(ncx, 'ncx:docTitle/ncx:text/text()'))
title = COLLAPSE_RE.sub(' ', title.strip())
title = title or unicode(self.oeb.metadata.title[0])
toc = self.oeb.toc
toc.title = title
navmaps = xpath(ncx, 'ncx:navMap')
for navmap in navmaps:
self._toc_from_navpoint(item, toc, navmap)
return True
def _toc_from_tour(self, opf):
result = xpath(opf, 'o2:tours/o2:tour')
if not result:
return False
self.log.debug('Reading TOC from tour...')
tour = result[0]
toc = self.oeb.toc
toc.title = tour.get('title')
sites = xpath(tour, 'o2:site')
for site in sites:
title = site.get('title')
href = site.get('href')
if not title or not href:
continue
path, _ = urldefrag(urlnormalize(href))
if path not in self.oeb.manifest.hrefs:
self.logger.warn('TOC reference %r not found' % href)
continue
id = site.get('id')
toc.add(title, href, id=id)
return True
def _toc_from_html(self, opf):
if 'toc' not in self.oeb.guide:
return False
self.log.debug('Reading TOC from HTML...')
itempath, frag = urldefrag(self.oeb.guide['toc'].href)
item = self.oeb.manifest.hrefs[itempath]
html = item.data
if frag:
elems = xpath(html, './/*[@id="%s"]' % frag)
if not elems:
elems = xpath(html, './/*[@name="%s"]' % frag)
elem = elems[0] if elems else html
while elem != html and not xpath(elem, './/h:a[@href]'):
elem = elem.getparent()
html = elem
titles = defaultdict(list)
order = []
for anchor in xpath(html, './/h:a[@href]'):
href = anchor.attrib['href']
href = item.abshref(urlnormalize(href))
path, frag = urldefrag(href)
if path not in self.oeb.manifest.hrefs:
continue
title = xml2text(anchor)
title = COLLAPSE_RE.sub(' ', title.strip())
if href not in titles:
order.append(href)
titles[href].append(title)
toc = self.oeb.toc
for href in order:
toc.add(' '.join(titles[href]), href)
return True
def _toc_from_spine(self, opf):
self.log.warn('Generating default TOC from spine...')
toc = self.oeb.toc
titles = []
headers = []
for item in self.oeb.spine:
if not item.linear:
continue
html = item.data
title = ''.join(xpath(html, '/h:html/h:head/h:title/text()'))
title = COLLAPSE_RE.sub(' ', title.strip())
if title:
titles.append(title)
headers.append('(unlabled)')
for tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'strong'):
expr = '/h:html/h:body//h:%s[position()=1]/text()'
header = ''.join(xpath(html, expr % tag))
header = COLLAPSE_RE.sub(' ', header.strip())
if header:
headers[-1] = header
break
use = titles
if len(titles) > len(set(titles)):
use = headers
for title, item in izip(use, self.oeb.spine):
if not item.linear:
continue
toc.add(title, item.href)
return True
def _toc_from_opf(self, opf, item):
self.oeb.auto_generated_toc = False
if self._toc_from_ncx(item):
return
# Prefer HTML to tour based TOC, since several LIT files
# have good HTML TOCs but bad tour based TOCs
if self._toc_from_html(opf):
return
if self._toc_from_tour(opf):
return
self._toc_from_spine(opf)
self.oeb.auto_generated_toc = True
def _pages_from_ncx(self, opf, item):
if item is None:
return False
ncx = item.data
if ncx is None:
return False
ptargets = xpath(ncx, 'ncx:pageList/ncx:pageTarget')
if not ptargets:
return False
pages = self.oeb.pages
for ptarget in ptargets:
name = ''.join(xpath(ptarget, 'ncx:navLabel/ncx:text/text()'))
name = COLLAPSE_RE.sub(' ', name.strip())
href = xpath(ptarget, 'ncx:content/@src')
if not href:
continue
href = item.abshref(urlnormalize(href[0]))
id = ptarget.get('id')
type = ptarget.get('type', 'normal')
klass = ptarget.get('class')
pages.add(name, href, type=type, id=id, klass=klass)
return True
def _find_page_map(self, opf):
result = xpath(opf, '/o2:package/o2:spine/@page-map')
if result:
id = result[0]
if id not in self.oeb.manifest.ids:
return None
item = self.oeb.manifest.ids[id]
self.oeb.manifest.remove(item)
return item
for item in self.oeb.manifest.values():
if item.media_type == PAGE_MAP_MIME:
self.oeb.manifest.remove(item)
return item
return None
def _pages_from_page_map(self, opf):
item = self._find_page_map(opf)
if item is None:
return False
pmap = item.data
pages = self.oeb.pages
for page in xpath(pmap, 'o2:page'):
name = page.get('name', '')
href = page.get('href')
if not href:
continue
name = COLLAPSE_RE.sub(' ', name.strip())
href = item.abshref(urlnormalize(href))
type = 'normal'
if not name:
type = 'special'
elif name.lower().strip('ivxlcdm') == '':
type = 'front'
pages.add(name, href, type=type)
return True
def _pages_from_opf(self, opf, item):
if self._pages_from_ncx(opf, item):
return
if self._pages_from_page_map(opf):
return
return
def _cover_from_html(self, hcover):
from calibre.ebooks import render_html_svg_workaround
with TemporaryDirectory('_html_cover') as tdir:
writer = OEBWriter()
writer(self.oeb, tdir)
path = os.path.join(tdir, urlunquote(hcover.href))
data = render_html_svg_workaround(path, self.logger)
if not data:
data = ''
id, href = self.oeb.manifest.generate('cover', 'cover.jpg')
item = self.oeb.manifest.add(id, href, JPEG_MIME, data=data)
return item
def _locate_cover_image(self):
if self.oeb.metadata.cover:
id = unicode(self.oeb.metadata.cover[0])
item = self.oeb.manifest.ids.get(id, None)
if item is not None and item.media_type in OEB_IMAGES:
return item
else:
self.logger.warn('Invalid cover image @id %r' % id)
hcover = self.oeb.spine[0]
if 'cover' in self.oeb.guide:
href = self.oeb.guide['cover'].href
item = self.oeb.manifest.hrefs[href]
media_type = item.media_type
if media_type in OEB_IMAGES:
return item
elif media_type in OEB_DOCS:
hcover = item
html = hcover.data
if MS_COVER_TYPE in self.oeb.guide:
href = self.oeb.guide[MS_COVER_TYPE].href
item = self.oeb.manifest.hrefs.get(href, None)
if item is not None and item.media_type in OEB_IMAGES:
return item
if self.COVER_SVG_XP(html):
svg = copy.deepcopy(self.COVER_SVG_XP(html)[0])
href = os.path.splitext(hcover.href)[0] + '.svg'
id, href = self.oeb.manifest.generate(hcover.id, href)
item = self.oeb.manifest.add(id, href, SVG_MIME, data=svg)
return item
if self.COVER_OBJECT_XP(html):
object = self.COVER_OBJECT_XP(html)[0]
href = hcover.abshref(object.get('data'))
item = self.oeb.manifest.hrefs.get(href, None)
if item is not None and item.media_type in OEB_IMAGES:
return item
return self._cover_from_html(hcover)
def _ensure_cover_image(self):
cover = self._locate_cover_image()
if self.oeb.metadata.cover:
self.oeb.metadata.cover[0].value = cover.id
return
self.oeb.metadata.add('cover', cover.id)
def _manifest_remove_duplicates(self):
seen = set()
dups = set()
for item in self.oeb.manifest:
if item.href in seen:
dups.add(item.href)
seen.add(item.href)
for href in dups:
items = [x for x in self.oeb.manifest if x.href == href]
for x in items:
if x not in self.oeb.spine:
self.oeb.log.warn('Removing duplicate manifest item with id:', x.id)
self.oeb.manifest.remove_duplicate_item(x)
def _all_from_opf(self, opf):
self.oeb.version = opf.get('version', '1.2')
self._metadata_from_opf(opf)
self._manifest_from_opf(opf)
self._spine_from_opf(opf)
self._manifest_remove_duplicates()
self._guide_from_opf(opf)
item = self._find_ncx(opf)
self._toc_from_opf(opf, item)
self._pages_from_opf(opf, item)
# self._ensure_cover_image()
def main(argv=sys.argv):
reader = OEBReader()
for arg in argv[1:]:
oeb = reader(OEBBook(), arg)
for name, doc in oeb.to_opf1().values():
print etree.tostring(doc, pretty_print=True)
for name, doc in oeb.to_opf2(page_map=True).values():
print etree.tostring(doc, pretty_print=True)
return 0
if __name__ == '__main__':
sys.exit(main())
|
gpl-3.0
| -4,345,886,397,232,031,000 | 38.599448 | 99 | 0.518242 | false |
openstack/tooz
|
tooz/partitioner.py
|
1
|
3970
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tooz import hashring
class Partitioner(object):
"""Partition set of objects across several members.
Objects to be partitioned should implement the __tooz_hash__ method to
identify themselves across the consistent hashring. This should method
return bytes.
"""
DEFAULT_PARTITION_NUMBER = hashring.HashRing.DEFAULT_PARTITION_NUMBER
def __init__(self, coordinator, group_id,
partitions=DEFAULT_PARTITION_NUMBER):
members = coordinator.get_members(group_id)
self.partitions = partitions
self.group_id = group_id
self._coord = coordinator
caps = [(m, self._coord.get_member_capabilities(self.group_id, m))
for m in members.get()]
self._coord.watch_join_group(self.group_id, self._on_member_join)
self._coord.watch_leave_group(self.group_id, self._on_member_leave)
self.ring = hashring.HashRing([], partitions=self.partitions)
for m_id, cap in caps:
self.ring.add_node(m_id, cap.get().get("weight", 1))
def _on_member_join(self, event):
weight = self._coord.get_member_capabilities(
self.group_id, event.member_id).get().get("weight", 1)
self.ring.add_node(event.member_id, weight)
def _on_member_leave(self, event):
self.ring.remove_node(event.member_id)
@staticmethod
def _hash_object(obj):
if hasattr(obj, "__tooz_hash__"):
return obj.__tooz_hash__()
return str(obj).encode('utf8')
def members_for_object(self, obj, ignore_members=None, replicas=1):
"""Return the members responsible for an object.
:param obj: The object to check owning for.
:param member_id: The member to check if it owns the object.
:param ignore_members: Group members to ignore.
:param replicas: Number of replicas for the object.
"""
return self.ring.get_nodes(self._hash_object(obj),
ignore_nodes=ignore_members,
replicas=replicas)
def belongs_to_member(self, obj, member_id,
ignore_members=None, replicas=1):
"""Return whether an object belongs to a member.
:param obj: The object to check owning for.
:param member_id: The member to check if it owns the object.
:param ignore_members: Group members to ignore.
:param replicas: Number of replicas for the object.
"""
return member_id in self.members_for_object(
obj, ignore_members=ignore_members, replicas=replicas)
def belongs_to_self(self, obj, ignore_members=None, replicas=1):
"""Return whether an object belongs to this coordinator.
:param obj: The object to check owning for.
:param ignore_members: Group members to ignore.
:param replicas: Number of replicas for the object.
"""
return self.belongs_to_member(obj, self._coord._member_id,
ignore_members=ignore_members,
replicas=replicas)
def stop(self):
"""Stop the partitioner."""
self._coord.unwatch_join_group(self.group_id, self._on_member_join)
self._coord.unwatch_leave_group(self.group_id, self._on_member_leave)
|
apache-2.0
| -8,275,849,643,799,480,000 | 39.927835 | 78 | 0.631234 | false |
Spotipo/spotipo
|
tests/modules/analytics/test_methods.py
|
1
|
1724
|
import sys
import pytest
from flask import current_app,url_for
from flask_wtf import Form
from wtforms import TextField
from faker import Faker
import arrow
import uuid
import stripe
from unifispot.core.models import Wifisite,Device,Guesttrack,Guest
from unifispot.core.guestutils import init_track,validate_track,redirect_guest
from unifispot.core.const import *
from unifispot.modules.analytics.methods import update_daily_stat
from unifispot.modules.analytics.models import Sitestat
from tests.helpers import randomMAC
fake = Faker()
def test_update_daily_stat1(session,client):
'''test with empty tracks
'''
site1 = Wifisite.query.filter_by(id=1).first()
now = arrow.utcnow()
update_daily_stat(site1,now)
assert 1 == Sitestat.query.count() , 'Sitestat is not created '
def test_update_daily_stat2(session,populate_analytics_tracks):
'''Check if time validation is fine
'''
site1 = Wifisite.query.filter_by(id=1).first()
now = arrow.utcnow()
update_daily_stat(site1,now)
sitestat = Sitestat.query.get(1)
assert 20 == sitestat.num_visits
def test_update_daily_stat3(session,populate_analytics_logins):
'''Check if stat counting is fine
'''
site1 = Wifisite.query.filter_by(id=1).first()
now = arrow.utcnow()
update_daily_stat(site1,now)
sitestat = Sitestat.query.get(1)
assert 40 == sitestat.num_visits
assert 20 == sitestat.num_newlogins
assert 20 == sitestat.num_repeats
assert {'auth_email': 20, 'auth_facebook': 20, 'fbcheckedin': 20,
'fbliked': 10,'newguest': 20, u'num_visits': 40}\
== sitestat.login_stat
|
agpl-3.0
| 1,730,948,159,544,591,600 | 27.75 | 78 | 0.678654 | false |
thinkopensolutions/tkobr-addons
|
tko_br_delivery_sale_stock/models/sale.py
|
1
|
1879
|
from odoo import models, api, _
from odoo.exceptions import UserError
class SaleOrder(models.Model):
_inherit ='sale.order'
# do not create delivery line but set the value in total_frete
@api.multi
def delivery_set(self):
# Remove delivery products from the sale order
self._delivery_unset()
for order in self:
carrier = order.carrier_id
if carrier:
if order.state not in ('draft', 'sent'):
raise UserError(_('The order state have to be draft to add delivery lines.'))
if carrier.delivery_type not in ['fixed', 'base_on_rule']:
# Shipping providers are used when delivery_type is other than 'fixed' or 'base_on_rule'
price_unit = order.carrier_id.get_shipping_price_from_so(order)[0]
else:
# Classic grid-based carriers
carrier = order.carrier_id.verify_carrier(order.partner_shipping_id)
if not carrier:
raise UserError(_('No carrier matching.'))
price_unit = carrier.get_price_available(order)
if order.company_id.currency_id.id != order.pricelist_id.currency_id.id:
price_unit = order.company_id.currency_id.with_context(date=order.date_order).compute(
price_unit, order.pricelist_id.currency_id)
final_price = price_unit * (1.0 + (float(self.carrier_id.margin) / 100.0))
#order._create_delivery_line(carrier, final_price)
# set price in total_frete field and compute total again
order.total_frete = final_price
order._amount_all()
else:
raise UserError(_('No carrier set for this order.'))
return True
|
agpl-3.0
| -3,344,389,447,532,597,000 | 42.697674 | 110 | 0.566791 | false |
deeplook/svglib
|
tests/test_samples.py
|
1
|
16463
|
#!/usr/bin/env python
"""Testsuite for svglib.
This tests conversion of sample SVG files into PDF files.
Some tests try using a tool called uniconv (if installed)
to convert SVG files into PDF for comparision with svglib.
Read ``tests/README.rst`` for more information on testing!
"""
import os
import glob
import re
import gzip
import io
import json
import tarfile
import textwrap
from http.client import HTTPSConnection
from os.path import dirname, splitext, exists, join, basename, getsize
from urllib.parse import quote, unquote, urlparse
from urllib.request import urlopen
from reportlab.lib.utils import haveImages
from reportlab.graphics import renderPDF, renderPM
from reportlab.graphics.shapes import Group, Rect
import pytest
from svglib import svglib
TEST_ROOT = dirname(__file__)
def found_uniconv():
"Do we have uniconv installed?"
res = os.popen("which uniconv").read().strip()
return len(res) > 0
class TestSVGSamples:
"Tests on misc. sample SVG files included in this test suite."
def cleanup(self):
"Remove generated files created by this class."
paths = glob.glob(f"{TEST_ROOT}/samples/misc/*.pdf")
for i, path in enumerate(paths):
print(f"deleting [{i}] {path}")
os.remove(path)
def test_convert_pdf(self):
"Test convert sample SVG files to PDF using svglib."
paths = glob.glob(f"{TEST_ROOT}/samples/misc/*")
paths = [p for p in paths if splitext(p.lower())[1] in [".svg", ".svgz"]]
for i, path in enumerate(paths):
print(f"working on [{i}] {path}")
# convert
drawing = svglib.svg2rlg(path)
# save as PDF
base = splitext(path)[0] + '-svglib.pdf'
renderPDF.drawToFile(drawing, base, showBoundary=0)
@pytest.mark.skipif(not found_uniconv(), reason="needs uniconv")
def test_create_pdf_uniconv(self):
"Test converting sample SVG files to PDF using uniconverter."
paths = glob.glob(f"{TEST_ROOT}/samples/misc/*.svg")
for path in paths:
out = splitext(path)[0] + '-uniconv.pdf'
cmd = f"uniconv '{path}' '{out}'"
os.popen(cmd).read()
if exists(out) and getsize(out) == 0:
os.remove(out)
class TestWikipediaSymbols:
"Tests on sample symbol SVG files from wikipedia.org."
def fetch_file(self, server, path):
"Fetch file using httplib module."
print(f"downloading https://{server}{path}")
req = HTTPSConnection(server)
req.putrequest('GET', path)
req.putheader('Host', server)
req.putheader('Accept', 'text/svg')
req.endheaders()
r1 = req.getresponse()
data = r1.read().decode('utf-8')
req.close()
return data
def setup_method(self):
"Check if files exists, else download and unpack it."
self.folder_path = f"{TEST_ROOT}/samples/wikipedia/symbols"
# create directory if not existing
if not exists(self.folder_path):
os.mkdir(self.folder_path)
# list sample files, found on:
# http://en.wikipedia.org/wiki/List_of_symbols
server = "upload.wikimedia.org"
paths = textwrap.dedent("""\
/wikipedia/commons/f/f7/Biohazard.svg
/wikipedia/commons/1/11/No_smoking_symbol.svg
/wikipedia/commons/b/b0/Dharma_wheel.svg
/wikipedia/commons/a/a7/Eye_of_Horus_bw.svg
/wikipedia/commons/1/17/Yin_yang.svg
/wikipedia/commons/a/a7/Olympic_flag.svg
/wikipedia/commons/4/46/Ankh.svg
/wikipedia/commons/5/5b/Star_of_life2.svg
/wikipedia/commons/9/97/Tudor_rose.svg
/wikipedia/commons/0/08/Flower-of-Life-small.svg
/wikipedia/commons/d/d0/Countries_by_Population_Density_in_2015.svg
/wikipedia/commons/8/84/CO2_responsibility_1950-2000.svg
""").strip().split()
# convert
for path in paths:
data = None
p = join(os.getcwd(), self.folder_path, basename(path))
if not exists(p):
try:
data = self.fetch_file(server, path)
except Exception:
print("Check your internet connection and try again!")
break
if data:
with open(p, "w", encoding='UTF-8') as fh:
fh.write(data)
def cleanup(self):
"Remove generated files when running this test class."
paths = glob.glob(join(self.folder_path, '*.pdf'))
for i, path in enumerate(paths):
print(f"deleting [{i}] {path}")
os.remove(path)
def test_convert_pdf(self):
"Test converting symbol SVG files to PDF using svglib."
paths = glob.glob(f"{self.folder_path}/*")
paths = [p for p in paths if splitext(p.lower())[1] in [".svg", ".svgz"]]
for i, path in enumerate(paths):
print(f"working on [{i}] {path}")
# convert
drawing = svglib.svg2rlg(path)
# save as PDF
base = splitext(path)[0] + '-svglib.pdf'
renderPDF.drawToFile(drawing, base, showBoundary=0)
@pytest.mark.skipif(not found_uniconv(), reason="needs uniconv")
def test_convert_pdf_uniconv(self):
"Test converting symbol SVG files to PDF using uniconverter."
paths = glob.glob(f"{self.folder_path}/*")
paths = [p for p in paths if splitext(p.lower())[1] in [".svg", ".svgz"]]
for path in paths:
out = splitext(path)[0] + '-uniconv.pdf'
cmd = f"uniconv '{path}' '{out}'"
os.popen(cmd).read()
if exists(out) and getsize(out) == 0:
os.remove(out)
class TestWikipediaFlags:
"Tests using SVG flags from Wikipedia.org."
def fetch_file(self, url):
"Get content with some given URL, uncompress if needed."
parsed = urlparse(url)
conn = HTTPSConnection(parsed.netloc)
conn.request("GET", parsed.path)
r1 = conn.getresponse()
if (r1.status, r1.reason) == (200, "OK"):
data = r1.read()
if r1.getheader("content-encoding") == "gzip":
zbuf = io.BytesIO(data)
zfile = gzip.GzipFile(mode="rb", fileobj=zbuf)
data = zfile.read()
zfile.close()
data = data.decode('utf-8')
else:
data = None
conn.close()
return data
def flag_url2filename(self, url):
"""Convert given flag URL into a local filename.
http://upload.wikimedia.org/wikipedia/commons
/9/91/Flag_of_Bhutan.svg
-> Bhutan.svg
/f/fa/Flag_of_the_People%27s_Republic_of_China.svg
-> The_People's_Republic_of_China.svg
"""
path = basename(url)[len("Flag_of_"):]
path = path.capitalize() # capitalise leading "the_"
path = unquote(path)
return path
def setup_method(self):
"Check if files exists, else download."
self.folder_path = f"{TEST_ROOT}/samples/wikipedia/flags"
# create directory if not already present
if not exists(self.folder_path):
os.mkdir(self.folder_path)
# fetch flags.html, if not already present
path = join(self.folder_path, "flags.html")
if not exists(path):
u = "https://en.wikipedia.org/wiki/Gallery_of_sovereign_state_flags"
data = self.fetch_file(u)
if data:
with open(path, "w", encoding='UTF-8') as f:
f.write(data)
else:
with open(path, encoding='UTF-8') as f:
data = f.read()
# find all flag base filenames
# ["Flag_of_Bhutan.svg", "Flag_of_Bhutan.svg", ...]
flag_names = re.findall(r"\:(Flag_of_.*?\.svg)", data)
flag_names = [unquote(fn) for fn in flag_names]
# save flag URLs into a JSON file, if not already present
json_path = join(self.folder_path, "flags.json")
if not exists(json_path):
flag_url_map = []
prefix = "https://en.wikipedia.org/wiki/File:"
for i, fn in enumerate(flag_names):
# load single flag HTML page, like
# https://en.wikipedia.org/wiki/Image:Flag_of_Bhutan.svg
flag_html = self.fetch_file(prefix + quote(fn))
# search link to single SVG file to download, like
# https://upload.wikimedia.org/wikipedia/commons/9/91/Flag_of_Bhutan.svg
svg_pat = "//upload.wikimedia.org/wikipedia/commons"
p = rf"({svg_pat}/.*?/{quote(fn)})\""
print(f"check {prefix}{fn}")
m = re.search(p, flag_html)
if m:
flag_url = m.groups()[0]
flag_url_map.append((prefix + fn, flag_url))
with open(json_path, "w", encoding='UTF-8') as fh:
json.dump(flag_url_map, fh)
# download flags in SVG format, if not present already
with open(json_path, encoding='UTF-8') as fh:
flag_url_map = json.load(fh)
for dummy, flag_url in flag_url_map:
path = join(self.folder_path, self.flag_url2filename(flag_url))
if not exists(path):
print(f"fetch {flag_url}")
flag_svg = self.fetch_file(flag_url)
with open(path, "w", encoding='UTF-8') as f:
f.write(flag_svg)
def cleanup(self):
"Remove generated files when running this test class."
paths = glob.glob(join(self.folder_path, '*.pdf'))
for i, path in enumerate(paths):
print(f"deleting [{i}] {path}")
os.remove(path)
def test_convert_pdf(self):
"Test converting flag SVG files to PDF using svglib."
paths = glob.glob(f"{self.folder_path}/*")
paths = [p for p in paths if splitext(p.lower())[1] in [".svg", ".svgz"]]
for i, path in enumerate(paths):
print(f"working on [{i}] {path}")
# convert
drawing = svglib.svg2rlg(path)
# save as PDF
base = splitext(path)[0] + '-svglib.pdf'
renderPDF.drawToFile(drawing, base, showBoundary=0)
@pytest.mark.skipif(not found_uniconv(), reason="needs uniconv")
def test_convert_pdf_uniconv(self):
"Test converting flag SVG files to PDF using uniconverer."
paths = glob.glob(f"{self.folder_path}/*")
paths = [p for p in paths if splitext(p.lower())[1] in [".svg", ".svgz"]]
for path in paths:
out = splitext(path)[0] + '-uniconv.pdf'
cmd = f"uniconv '{path}' '{out}'"
os.popen(cmd).read()
if exists(out) and getsize(out) == 0:
os.remove(out)
class TestW3CSVG:
"Tests using the official W3C SVG testsuite."
def setup_method(self):
"Check if testsuite archive exists, else download and unpack it."
server = "http://www.w3.org"
path = "/Graphics/SVG/Test/20070907/W3C_SVG_12_TinyTestSuite.tar.gz"
url = server + path
archive_path = basename(url)
tar_path = splitext(archive_path)[0]
self.folder_path = join(TEST_ROOT, "samples", splitext(tar_path)[0])
if not exists(self.folder_path):
if not exists(join(TEST_ROOT, "samples", tar_path)):
if not exists(join(TEST_ROOT, "samples", archive_path)):
print(f"downloading {url}")
try:
data = urlopen(url).read()
except OSError as details:
print(details)
print("Check your internet connection and try again!")
return
archive_path = basename(url)
with open(join(TEST_ROOT, "samples", archive_path), "wb") as f:
f.write(data)
print(f"unpacking {archive_path}")
tar_data = gzip.open(join(TEST_ROOT, "samples", archive_path), "rb").read()
with open(join(TEST_ROOT, "samples", tar_path), "wb") as f:
f.write(tar_data)
print(f"extracting into {self.folder_path}")
os.mkdir(self.folder_path)
tar_file = tarfile.TarFile(join(TEST_ROOT, "samples", tar_path))
tar_file.extractall(self.folder_path)
tar_file.close()
if exists(join(TEST_ROOT, "samples", tar_path)):
os.remove(join(TEST_ROOT, "samples", tar_path))
def cleanup(self):
"Remove generated files when running this test class."
paths = glob.glob(join(self.folder_path, 'svg/*-svglib.pdf'))
paths += glob.glob(join(self.folder_path, 'svg/*-uniconv.pdf'))
paths += glob.glob(join(self.folder_path, 'svg/*-svglib.png'))
for i, path in enumerate(paths):
print(f"deleting [{i}] {path}")
os.remove(path)
def test_convert_pdf_png(self):
"""
Test converting W3C SVG files to PDF and PNG using svglib.
``renderPM.drawToFile()`` used in this test is known to trigger an
error sometimes in reportlab which was fixed in reportlab 3.3.26.
See https://github.com/deeplook/svglib/issues/47
"""
exclude_list = [
"animate-elem-41-t.svg", # Freeze renderPM in pathFill()
"animate-elem-78-t.svg", # id
"paint-stroke-06-t.svg",
"paint-stroke-207-t.svg",
"coords-trans-09-t.svg", # renderPDF issue (div by 0)
]
paths = glob.glob(f"{self.folder_path}/svg/*.svg")
msg = f"Destination folder '{self.folder_path}/svg' not found."
assert len(paths) > 0, msg
for i, path in enumerate(paths):
print(f"working on [{i}] {path}")
if basename(path) in exclude_list:
print("excluded (to be tested later)")
continue
# convert
drawing = svglib.svg2rlg(path)
# save as PDF
base = splitext(path)[0] + '-svglib.pdf'
renderPDF.drawToFile(drawing, base, showBoundary=0)
# save as PNG
# (endless loop for file paint-stroke-06-t.svg)
base = splitext(path)[0] + '-svglib.png'
try:
# Can trigger an error in reportlab < 3.3.26.
renderPM.drawToFile(drawing, base, 'PNG')
except TypeError:
print('Svglib: Consider upgrading reportlab to version >= 3.3.26!')
raise
@pytest.mark.skipif(not found_uniconv(), reason="needs uniconv")
def test_convert_pdf_uniconv(self):
"Test converting W3C SVG files to PDF using uniconverter."
paths = glob.glob(f"{self.folder_path}/svg/*")
paths = [p for p in paths if splitext(p.lower())[1] in [".svg", ".svgz"]]
for path in paths:
out = splitext(path)[0] + '-uniconv.pdf'
cmd = f"uniconv '{path}' '{out}'"
os.popen(cmd).read()
if exists(out) and getsize(out) == 0:
os.remove(out)
class TestOtherFiles:
@pytest.mark.skipif(not haveImages, reason="missing pillow library")
def test_png_in_svg(self):
path = join(TEST_ROOT, "samples", "others", "png_in_svg.svg")
drawing = svglib.svg2rlg(path)
result = renderPDF.drawToString(drawing)
# If the PNG image is really included, the size is over 7k.
assert len(result) > 7000
def test_external_svg_in_svg(self):
path = join(TEST_ROOT, "samples", "others", "svg_in_svg.svg")
drawing = svglib.svg2rlg(path)
img_group = drawing.contents[0].contents[0]
# First image points to SVG rendered as a group
assert isinstance(img_group.contents[0], Group)
assert isinstance(img_group.contents[0].contents[0].contents[0], Rect)
assert img_group.contents[0].transform, (1, 0, 0, 1, 200.0, 100.0)
# Second image points directly to a Group with Rect element
assert isinstance(img_group.contents[1], Group)
assert isinstance(img_group.contents[1].contents[0], Rect)
assert img_group.contents[1].transform, (1, 0, 0, 1, 100.0, 200.0)
|
lgpl-3.0
| -2,501,537,814,356,397,600 | 36.162528 | 91 | 0.568183 | false |
horazont/aioxmpp
|
aioxmpp/shim/__init__.py
|
1
|
1907
|
########################################################################
# File name: __init__.py
# This file is part of: aioxmpp
#
# LICENSE
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
########################################################################
"""
:mod:`~aioxmpp.shim` --- Stanza Headers and Internet Metadata (:xep:`0131`)
###########################################################################
This module provides support for :xep:`131` stanza headers. The following
attributes are added by this module to the existing stanza classes:
.. attribute:: aioxmpp.Message.xep0131_headers
A :class:`xso.Headers` instance or :data:`None`. Represents the SHIM
headers of the stanza.
.. attribute:: aioxmpp.Presence.xep0131_headers
A :class:`xso.Headers` instance or :data:`None`. Represents the SHIM
headers of the stanza.
The attributes are available as soon as :mod:`aioxmpp.shim` is loaded.
.. currentmodule:: aioxmpp
.. autoclass:: SHIMService
.. currentmodule:: aioxmpp.shim
.. class:: Service
Alias of :class:`.SHIMService`.
.. deprecated:: 0.8
The alias will be removed in 1.0.
.. currentmodule:: aioxmpp.shim.xso
.. autoclass:: Headers
"""
from . import xso # NOQA: F401
from .service import ( # NOQA: F401
SHIMService,
)
|
lgpl-3.0
| 6,548,399,703,151,503,000 | 28.796875 | 75 | 0.639748 | false |
blueskycoco/rtt
|
bsp/mini2440/rtconfig.py
|
1
|
2546
|
import os
# panel options
# 'PNL_A70','PNL_N35', 'PNL_T35' , 'PNL_X35'
RT_USING_LCD_TYPE = 'PNL_T35'
# toolchains options
ARCH = 'arm'
CPU = 's3c24x0'
TextBase = '0x30000000'
CROSS_TOOL = 'keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'C:/Program Files/CodeSourcery/Sourcery G++ Lite/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil'
elif CROSS_TOOL == 'iar':
print '================ERROR============================'
print 'Not support iar yet!'
print '================================================='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=arm920t'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp' + ' -DTEXT_BASE=' + TextBase
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread_mini2440.map,-cref,-u,_start -nostartfiles -T mini2440_ram.ld' + ' -Ttext ' + TextBase
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gstabs+'
AFLAGS += ' -gstabs+'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --device DARMSS9'
CFLAGS = DEVICE + ' --apcs=interwork --diag_suppress=870'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --strict --info sizes --info totals --info unused --info veneers --list rtthread-mini2440.map --ro-base 0x30000000 --entry Entry_Point --first Entry_Point'
CFLAGS += ' -I"' + EXEC_PATH + '/ARM/RV31/INC"'
LFLAGS += ' --libpath "' + EXEC_PATH + '/ARM/RV31/LIB"'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
CFLAGS = ''
AFLAGS = ''
LFLAGS = ''
|
gpl-2.0
| -6,611,863,965,330,386,000 | 24.717172 | 179 | 0.536135 | false |
bigswitch/snac-nox
|
src/nox/apps/directory/dm_ws_groups.py
|
1
|
18122
|
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import simplejson
from twisted.internet import defer
from twisted.python.failure import Failure
from nox.apps.coreui import webservice
from nox.apps.coreui.webservice import *
from nox.apps.directory.directorymanagerws import *
from nox.apps.directory.directorymanager import mangle_name
from nox.lib.directory import DirectoryException
from nox.lib.netinet.netinet import create_eaddr, create_ipaddr
lg = logging.getLogger('dm_ws_groups')
groupname_to_type = {
"switch" : Directory_Factory.SWITCH_PRINCIPAL_GROUP,
"location" : Directory_Factory.LOCATION_PRINCIPAL_GROUP,
"host" : Directory_Factory.HOST_PRINCIPAL_GROUP,
"user" : Directory_Factory.USER_PRINCIPAL_GROUP,
"dladdr" : Directory_Factory.DLADDR_GROUP,
"nwaddr" : Directory_Factory.NWADDR_GROUP,
}
def is_member(group_info,mangled_name, ctype_str):
if ctype_str in ["principal", "address"]:
if mangled_name in group_info.member_names:
return True
return False
else: # 'subgroup'
if mangled_name in group_info.subgroup_names:
return True
return False
class dm_ws_groups:
"""Exposes the group portion of the directory manager interface"""
def err(self, failure, request, fn_name, msg):
lg.error('%s: %s' % (fn_name, str(failure)))
if isinstance(failure.value, DirectoryException) \
and (failure.value.code == DirectoryException.COMMUNICATION_ERROR \
or failure.value.code == DirectoryException.REMOTE_ERROR):
msg = failure.value.message
return webservice.internalError(request, msg)
def member_op(self,request,group_info,member,ptype_str,
otype_str,ctype_str):
try:
def ok(res):
def unicode_(s, encoding):
if isinstance(s, unicode): return s
return unicode(s, encoding)
members = []
for member in res[0]:
if not isinstance(member, basestring):
member = str(member)
members.append(unicode_(member, 'utf-8'))
subgroups = [unicode_(subgroup, 'utf-8') for subgroup in res[1]]
res_str = [ members, subgroups ]
request.write(simplejson.dumps(res_str))
request.finish()
if group_info is None:
return webservice.badRequest(request,"Group does not exist.")
exists = is_member(group_info, member, ctype_str)
if otype_str == "add" and exists:
return webservice.badRequest(request, "%s is already a %s in the group." \
% (member,ctype_str))
if otype_str != "add" and not exists:
return webservice.notFound(request, "%s %s not found in group." \
% (ctype_str.capitalize(),member))
if otype_str == "":
# special case, this is just a membership test
# if the request got this far, return success
request.finish()
return
ptype = groupname_to_type[ptype_str]
method_name = otype_str + "_group_members"
f = getattr(self.dm, method_name)
if ctype_str in ["principal", "address"]:
d = f(ptype,group_info.name, (member,), () )
else: # 'subgroup' case
d = f(ptype,group_info.name, (), (member,))
d.addCallback(ok)
d.addErrback(self.err, request, "%s member_op" % method_name,
"Could not %s." % method_name)
return NOT_DONE_YET
except Exception, e:
return self.err(Failure(), request, "member_op",
"Could not perform group operation.")
def member_op_start(self, request, arg, otype_str):
try:
groupname = arg['<group name>']
groupdir = arg['<group dir>']
mangled_group = mangle_name(groupdir,groupname)
membername = arg['<member name>']
memberdir = arg.get('<member dir>')
ptype_str = get_principal_type_from_args(arg)
ctype_str = find_value_in_args(arg,
["principal","address","subgroup"])
if memberdir == self.dm.discovered_dir.name:
return webservice.badRequest(request, "Discovered principals "
"may not be added to groups; try moving principal to "
"a persistent directory first.")
ptype = groupname_to_type[ptype_str]
is_address = ctype_str == "address"
if is_address and ptype == Directory_Factory.DLADDR_GROUP:
mangled_member = create_eaddr(membername.encode('utf-8'))
elif is_address and ptype == Directory_Factory.NWADDR_GROUP:
mangled_member = create_cidr_ipaddr(
membername.encode('utf-8'))
else:
mangled_member = mangle_name(memberdir, membername)
if mangled_member is None:
return webservice.badRequest(request,
"Invalid group member parameter: '%s'" % membername)
d = self.dm.get_group(ptype, mangled_group)
f = lambda x: self.member_op(request,x,mangled_member,
ptype_str,otype_str,ctype_str)
d.addCallback(f)
d.addErrback(self.err, request, "member_op_start",
"Could not retrieve group.")
return NOT_DONE_YET
except Exception, e:
return self.err(Failure(), request, "member_op_start",
"Could not retrieve group.")
def group_op(self, request, group_info, mangled_group,ptype_str, otype_str):
try:
def ok(res):
if isinstance(res, GroupInfo):
request.write(simplejson.dumps(res.to_str_dict()))
else:
request.write(simplejson.dumps(res))
request.finish()
is_get_op = otype_str == "get" or otype_str == "principal" \
or otype_str == "subgroup"
if group_info is None and (is_get_op or otype_str == "del"):
return webservice.notFound(request,"Group %s does not exist." % mangled_group)
# read operations finish here. this includes 'get','principal'
# and 'subgroup' otype_str
if is_get_op :
str_dict = group_info.to_str_dict()
if otype_str == "get":
request.write(simplejson.dumps(str_dict))
elif otype_str == "principal":
request.write(simplejson.dumps(str_dict["member_names"]))
elif otype_str == "subgroup":
request.write(simplejson.dumps(str_dict["subgroup_names"]))
request.finish()
return
# only otype_str == 'add' or 'del' continues to this point
ptype = groupname_to_type[ptype_str]
if otype_str == "add":
content = json_parse_message_body(request)
if content == None:
return webservice.badRequest(request, "Unable to parse message body.")
if group_info is None:
content["name"] = mangled_group
d = getattr(self.dm, "add_group")(ptype,
GroupInfo.from_str_dict(content))
elif len(content) == 1 and content.has_key('name'):
d = getattr(self.dm, "rename_group")(ptype,
mangled_group, content['name'],'','')
else:
content["name"] = mangled_group
d = getattr(self.dm, "modify_group")(ptype,
GroupInfo.from_str_dict(content))
else: # delete
d = getattr(self.dm, "del_group")(ptype, mangled_group)
d.addCallback(ok)
d.addErrback(self.err, request, "%s %s group_op" % (otype_str, ptype_str),
"Could not perform %s group operation." % ptype_str)
return NOT_DONE_YET
except Exception, e:
return self.err(Failure(), request, "%s %s group_op" % (otype_str, ptype_str),
"Could not perform %s group operation." % ptype_str)
def group_op_start(self, request, arg, otype_str):
try:
groupname = arg['<group name>']
dirname = arg.get('<group dir>')
mangled_group = mangle_name(dirname,groupname)
ptype_str = get_principal_type_from_args(arg)
ptype = groupname_to_type[ptype_str]
d = self.dm.get_group(ptype, mangled_group)
f = lambda x: self.group_op(request,x,mangled_group,
ptype_str,otype_str)
d.addCallback(f)
d.addErrback(self.err, request, "group_op_start",
"Could not retrieve group.")
return NOT_DONE_YET
except Exception, e:
return self.err(Failure(), request, "group_op_start",
"Could not retrieve group.")
def get_all_groups(self, request, arg):
try:
def cb(res):
request.write(simplejson.dumps(res))
request.finish()
ptype_str = get_principal_type_from_args(arg)
ptype = groupname_to_type[ptype_str]
d = self.dm.search_groups(ptype)
d.addCallback(cb)
d.addErrback(self.err, request, "get_all_groups",
"Could not retrieve groups.")
return NOT_DONE_YET
except Exception, e:
return self.err(Failure(), request, "get_all_groups",
"Could not retrieve groups.")
def get_group_parents(self, request, arg):
try:
def cb(res):
request.write(simplejson.dumps(res))
request.finish()
groupname = arg['<group name>']
dirname = arg['<group dir>']
mangled_group = mangle_name(dirname,groupname)
ptype_str = get_principal_type_from_args(arg)
ptype = groupname_to_type[ptype_str]
d = self.dm.get_group_parents(ptype, mangled_group)
d.addCallback(cb)
d.addErrback(self.err, request, "get_group_parents",
"Could not retrieve group parents.")
return NOT_DONE_YET
except Exception, e:
return self.err(Failure(), request, "get_group_parents",
"Could not retrieve group parents.")
def get_addr_groups_op(self, request, arg):
try:
def cb(res):
request.write(simplejson.dumps(res))
request.finish()
gtype_str = get_principal_type_from_args(arg)
gtype = groupname_to_type[gtype_str]
if gtype == Directory.NWADDR_GROUP:
addr = create_cidr_ipaddr(arg['<address>'].encode('utf-8'))
elif gtype == Directory.DLADDR_GROUP:
addr = create_eaddr(arg['<address>'].encode('utf-8'))
else:
return webservice.badRequest(request, "Could not retrieve "
"address groups: invalid address type.")
if addr is None:
return webservice.badRequest(request, "Could not retrieve "
"address groups: invalid address format.")
d = self.dm.get_group_membership(gtype, addr)
d.addCallback(cb)
d.addErrback(self.err, request, "get_group_parents",
"Could not retrieve address groups.")
return NOT_DONE_YET
except Exception, e:
return self.err(Failure(), request, "get_addr_groups",
"Could not retrieve address groups.")
def __init__(self, dm, reg):
self.dm = dm
grouptypes = ["host", "user", "location", "switch"]
addrgrouptypes = ["dladdr", "nwaddr"]
for gt in grouptypes + addrgrouptypes:
path = ( webservice.WSPathStaticString("group"), ) + \
(WSPathStaticString(gt) ,)
desc = "List the names of all %s groups" % (gt)
reg(self.get_all_groups, "GET", path, desc)
for gt in grouptypes + addrgrouptypes:
path = ( webservice.WSPathStaticString("group"), ) + \
( webservice.WSPathStaticString(gt), ) + \
(WSPathExistingDirName(dm,"<group dir>") ,) + \
(WSPathArbitraryString("<group name>"),)
desc = "Get all members and subgroups in a %s group" % gt
reg(lambda x,y: self.group_op_start(x,y,"get"), "GET", path, desc)
desc = "Delete a %s group" % gt
reg(lambda x,y: self.group_op_start(x,y,"del"), "DELETE", path, desc)
desc = "Add a %s group" % gt
reg(lambda x,y: self.group_op_start(x,y,"add"), "PUT", path, desc)
parent_path = path + ( webservice.WSPathStaticString("parent"),)
desc = "Get immediate parent groups of a %s group" % gt
reg(self.get_group_parents, "GET",parent_path, desc)
for gt in grouptypes:
classes = [ "subgroup", "principal" ]
for c in classes:
path1 = ( webservice.WSPathStaticString("group"), ) + \
( webservice.WSPathStaticString(gt), ) + \
(WSPathExistingDirName(dm,"<group dir>") ,) + \
(WSPathArbitraryString("<group name>"),) + \
( webservice.WSPathStaticString(c), )
get_desc = "Get a list of all %ss in the named group" % c
fn = (lambda z: lambda x,y: self.group_op_start(x,y,z))(c)
reg(fn, "GET", path1, get_desc)
path2 = path1 + (WSPathExistingDirName(dm,"<member dir>") ,) + \
(WSPathArbitraryString("<member name>"),)
get_desc = "Test if a %s is in the named group (no recursion)" % c
reg(lambda x,y: self.member_op_start(x,y,""), "GET", path2,get_desc)
put_desc = "Add a %s to the named group" % c
reg(lambda x,y: self.member_op_start(x,y,"add"),"PUT",path2,put_desc)
del_desc = "Delete a %s of the named group" % c
reg(lambda x,y: self.member_op_start(x,y,"del"),
"DELETE",path2,del_desc)
for gt in addrgrouptypes:
basepath = ( webservice.WSPathStaticString("group"), ) + \
( webservice.WSPathStaticString(gt), ) + \
(WSPathExistingDirName(dm,"<group dir>") ,) + \
(WSPathArbitraryString("<group name>"),)
get_desc = "Get a list of all subgroups in the named group"
fn = (lambda z: lambda x,y: self.group_op_start(x,y,z))("subgroup")
reg(fn, "GET", basepath+(webservice.WSPathStaticString("subgroup"),),
get_desc)
get_desc = "Get a list of all addresses in the named group"
fn = (lambda z: lambda x,y: self.group_op_start(x,y,z))("principal")
reg(fn, "GET", basepath+(webservice.WSPathStaticString("address"),),
get_desc)
sgmpath = basepath + ( webservice.WSPathStaticString("subgroup"), ) +\
(WSPathExistingDirName(dm,"<member dir>") ,) +\
(WSPathArbitraryString("<member name>"),)
desc = "Test if a subgroup is in the named group (no recursion)"
reg(lambda x,y: self.member_op_start(x,y,""), "GET", sgmpath, desc)
desc = "Add a subgroup to the named group"
reg(lambda x,y: self.member_op_start(x,y,"add"),"PUT", sgmpath, desc)
desc = "Delete a subgroup of the named group"
reg(lambda x,y: self.member_op_start(x,y,"del"),"DELETE",sgmpath,desc)
if gt == 'nwaddr':
member_desc = "an IP address or address block"
else:
member_desc = "a MAC address"
ampath = basepath + ( webservice.WSPathStaticString("address"), ) +\
(WSPathArbitraryString("<member name>"),)
desc = "Test if %s is in the named group (no recursion)" %member_desc
reg(lambda x,y: self.member_op_start(x,y,""), "GET", ampath, desc)
desc = "Add %s to the named group" %member_desc
reg(lambda x,y: self.member_op_start(x,y,"add"),"PUT", ampath, desc)
desc = "Delete %s of the named group" %member_desc
reg(lambda x,y: self.member_op_start(x,y,"del"),"DELETE",ampath,desc)
addrpath = ( webservice.WSPathStaticString(gt), ) + \
(WSPathArbitraryString("<address>"),) + \
( webservice.WSPathStaticString("group"), )
desc = "Get all groups for %s" %member_desc
reg(self.get_addr_groups_op, "GET", addrpath, desc)
|
gpl-3.0
| 8,808,889,502,028,325,000 | 45.706186 | 96 | 0.539676 | false |
laurentgo/pants
|
src/python/pants/backend/jvm/tasks/classpath_products.py
|
1
|
3952
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.goal.products import UnionProducts
def _not_excluded_filter(exclude_patterns):
def not_excluded(path_tuple):
conf, path = path_tuple
return not any(excluded in path for excluded in exclude_patterns)
return not_excluded
class ClasspathProducts(object):
def __init__(self):
self._classpaths = UnionProducts()
self._exclude_patterns = UnionProducts()
self._buildroot = get_buildroot()
def add_for_targets(self, targets, classpath_elements):
"""Adds classpath elements to the products of all the provided targets."""
for target in targets:
self.add_for_target(target, classpath_elements)
def add_for_target(self, target, classpath_elements):
"""Adds classpath elements to the products of the provided target."""
self._validate_classpath_tuples(classpath_elements, target)
self._classpaths.add_for_target(target, classpath_elements)
def add_excludes_for_targets(self, targets):
"""Add excludes from the provided targets. Does not look up transitive excludes."""
for target in targets:
self._add_excludes_for_target(target)
def get_for_target(self, target):
"""Gets the transitive classpath products for the given target, in order, respecting target
excludes."""
return self.get_for_targets([target])
def get_for_targets(self, targets):
"""Gets the transitive classpath products for the given targets, in order, respecting target
excludes."""
classpath_tuples = self._classpaths.get_for_targets(targets)
filtered_classpath_tuples = self._filter_by_excludes(classpath_tuples, targets)
return filtered_classpath_tuples
def _filter_by_excludes(self, classpath_tuples, root_targets):
exclude_patterns = self._exclude_patterns.get_for_targets(root_targets)
filtered_classpath_tuples = filter(_not_excluded_filter(exclude_patterns),
classpath_tuples)
return filtered_classpath_tuples
def _add_excludes_for_target(self, target):
# TODO(nhoward): replace specific ivy based exclude filterings in the jar object refactor
# creates strings from excludes that will match classpath entries generated by ivy
# eg exclude(org='org.example', name='lib') => 'jars/org.example/lib'
# exclude(org='org.example') => 'jars/org.example/'
if target.is_exported:
self._exclude_patterns.add_for_target(target,
[os.path.join('jars',
target.provides.org,
target.provides.name)])
if isinstance(target, JvmTarget) and target.excludes:
self._exclude_patterns.add_for_target(target,
[os.path.join('jars', e.org, e.name or '')
for e in target.excludes])
def _validate_classpath_tuples(self, classpath, target):
"""Validates that all files are located within the working copy, to simplify relativization."""
for classpath_tuple in classpath:
self._validate_path_in_buildroot(classpath_tuple, target)
def _validate_path_in_buildroot(self, classpath_tuple, target):
conf, path = classpath_tuple
if os.path.relpath(path, self._buildroot).startswith(os.pardir):
raise TaskError(
'Classpath entry {} for target {} is located outside the buildroot.'
.format(path, target.address.spec))
|
apache-2.0
| -8,199,300,134,605,323,000 | 44.425287 | 99 | 0.672824 | false |
MartinThoma/PyMySQL
|
pymysql/connections.py
|
1
|
53902
|
# Python implementation of the MySQL client-server protocol
# http://dev.mysql.com/doc/internals/en/client-server-protocol.html
# Error codes:
# http://dev.mysql.com/doc/refman/5.5/en/error-messages-client.html
from __future__ import print_function
from ._compat import PY2, range_type, text_type, str_type, JYTHON, IRONPYTHON
import errno
from functools import partial
import hashlib
import io
import os
import socket
import struct
import sys
import traceback
import warnings
from .charset import MBLENGTH, charset_by_name, charset_by_id
from .constants import CLIENT, COMMAND, FIELD_TYPE, SERVER_STATUS
from .converters import (
escape_item, encoders, decoders, escape_string, through)
from .cursors import Cursor
from .optionfile import Parser
from .util import byte2int, int2byte
from . import err
try:
import ssl
SSL_ENABLED = True
except ImportError:
ssl = None
SSL_ENABLED = False
try:
import getpass
DEFAULT_USER = getpass.getuser()
del getpass
except ImportError:
DEFAULT_USER = None
DEBUG = False
_py_version = sys.version_info[:2]
# socket.makefile() in Python 2 is not usable because very inefficient and
# bad behavior about timeout.
# XXX: ._socketio doesn't work under IronPython.
if _py_version == (2, 7) and not IRONPYTHON:
# read method of file-like returned by sock.makefile() is very slow.
# So we copy io-based one from Python 3.
from ._socketio import SocketIO
def _makefile(sock, mode):
return io.BufferedReader(SocketIO(sock, mode))
elif _py_version == (2, 6):
# Python 2.6 doesn't have fast io module.
# So we make original one.
class SockFile(object):
def __init__(self, sock):
self._sock = sock
def read(self, n):
read = self._sock.recv(n)
if len(read) == n:
return read
while True:
data = self._sock.recv(n-len(read))
if not data:
return read
read += data
if len(read) == n:
return read
def _makefile(sock, mode):
assert mode == 'rb'
return SockFile(sock)
else:
# socket.makefile in Python 3 is nice.
def _makefile(sock, mode):
return sock.makefile(mode)
TEXT_TYPES = set([
FIELD_TYPE.BIT,
FIELD_TYPE.BLOB,
FIELD_TYPE.LONG_BLOB,
FIELD_TYPE.MEDIUM_BLOB,
FIELD_TYPE.STRING,
FIELD_TYPE.TINY_BLOB,
FIELD_TYPE.VAR_STRING,
FIELD_TYPE.VARCHAR,
FIELD_TYPE.GEOMETRY])
sha_new = partial(hashlib.new, 'sha1')
NULL_COLUMN = 251
UNSIGNED_CHAR_COLUMN = 251
UNSIGNED_SHORT_COLUMN = 252
UNSIGNED_INT24_COLUMN = 253
UNSIGNED_INT64_COLUMN = 254
DEFAULT_CHARSET = 'latin1'
MAX_PACKET_LEN = 2**24-1
def dump_packet(data): # pragma: no cover
def is_ascii(data):
if 65 <= byte2int(data) <= 122:
if isinstance(data, int):
return chr(data)
return data
return '.'
try:
print("packet length:", len(data))
print("method call[1]:", sys._getframe(1).f_code.co_name)
print("method call[2]:", sys._getframe(2).f_code.co_name)
print("method call[3]:", sys._getframe(3).f_code.co_name)
print("method call[4]:", sys._getframe(4).f_code.co_name)
print("method call[5]:", sys._getframe(5).f_code.co_name)
print("-" * 88)
except ValueError:
pass
dump_data = [data[i:i+16] for i in range_type(0, min(len(data), 256), 16)]
for d in dump_data:
print(' '.join(map(lambda x: "{:02X}".format(byte2int(x)), d)) +
' ' * (16 - len(d)) + ' ' * 2 +
' '.join(map(lambda x: "{}".format(is_ascii(x)), d)))
print("-" * 88)
print()
def _scramble(password, message):
if not password:
return b''
if DEBUG: print('password=' + str(password))
stage1 = sha_new(password).digest()
stage2 = sha_new(stage1).digest()
s = sha_new()
s.update(message)
s.update(stage2)
result = s.digest()
return _my_crypt(result, stage1)
def _my_crypt(message1, message2):
length = len(message1)
result = b''
for i in range_type(length):
x = (struct.unpack('B', message1[i:i+1])[0] ^
struct.unpack('B', message2[i:i+1])[0])
result += struct.pack('B', x)
return result
# old_passwords support ported from libmysql/password.c
SCRAMBLE_LENGTH_323 = 8
class RandStruct_323(object):
def __init__(self, seed1, seed2):
self.max_value = 0x3FFFFFFF
self.seed1 = seed1 % self.max_value
self.seed2 = seed2 % self.max_value
def my_rnd(self):
self.seed1 = (self.seed1 * 3 + self.seed2) % self.max_value
self.seed2 = (self.seed1 + self.seed2 + 33) % self.max_value
return float(self.seed1) / float(self.max_value)
def _scramble_323(password, message):
hash_pass = _hash_password_323(password)
hash_message = _hash_password_323(message[:SCRAMBLE_LENGTH_323])
hash_pass_n = struct.unpack(">LL", hash_pass)
hash_message_n = struct.unpack(">LL", hash_message)
rand_st = RandStruct_323(hash_pass_n[0] ^ hash_message_n[0],
hash_pass_n[1] ^ hash_message_n[1])
outbuf = io.BytesIO()
for _ in range_type(min(SCRAMBLE_LENGTH_323, len(message))):
outbuf.write(int2byte(int(rand_st.my_rnd() * 31) + 64))
extra = int2byte(int(rand_st.my_rnd() * 31))
out = outbuf.getvalue()
outbuf = io.BytesIO()
for c in out:
outbuf.write(int2byte(byte2int(c) ^ byte2int(extra)))
return outbuf.getvalue()
def _hash_password_323(password):
nr = 1345345333
add = 7
nr2 = 0x12345671
# x in py3 is numbers, p27 is chars
for c in [byte2int(x) for x in password if x not in (' ', '\t', 32, 9)]:
nr ^= (((nr & 63) + add) * c) + (nr << 8) & 0xFFFFFFFF
nr2 = (nr2 + ((nr2 << 8) ^ nr)) & 0xFFFFFFFF
add = (add + c) & 0xFFFFFFFF
r1 = nr & ((1 << 31) - 1) # kill sign bits
r2 = nr2 & ((1 << 31) - 1)
return struct.pack(">LL", r1, r2)
def pack_int24(n):
return struct.pack('<I', n)[:3]
# https://dev.mysql.com/doc/internals/en/integer.html#packet-Protocol::LengthEncodedInteger
def lenenc_int(i):
if (i < 0):
raise ValueError("Encoding %d is less than 0 - no representation in LengthEncodedInteger" % i)
elif (i < 0xfb):
return int2byte(i)
elif (i < (1 << 16)):
return b'\xfc' + struct.pack('<H', i)
elif (i < (1 << 24)):
return b'\xfd' + struct.pack('<I', i)[:3]
elif (i < (1 << 64)):
return b'\xfe' + struct.pack('<Q', i)
else:
raise ValueError("Encoding %x is larger than %x - no representation in LengthEncodedInteger" % (i, (1 << 64)))
class MysqlPacket(object):
"""Representation of a MySQL response packet.
Provides an interface for reading/parsing the packet results.
"""
__slots__ = ('_position', '_data')
def __init__(self, data, encoding):
self._position = 0
self._data = data
def get_all_data(self):
return self._data
def read(self, size):
"""Read the first 'size' bytes in packet and advance cursor past them."""
result = self._data[self._position:(self._position+size)]
if len(result) != size:
error = ('Result length not requested length:\n'
'Expected=%s. Actual=%s. Position: %s. Data Length: %s'
% (size, len(result), self._position, len(self._data)))
if DEBUG:
print(error)
self.dump()
raise AssertionError(error)
self._position += size
return result
def read_all(self):
"""Read all remaining data in the packet.
(Subsequent read() will return errors.)
"""
result = self._data[self._position:]
self._position = None # ensure no subsequent read()
return result
def advance(self, length):
"""Advance the cursor in data buffer 'length' bytes."""
new_position = self._position + length
if new_position < 0 or new_position > len(self._data):
raise Exception('Invalid advance amount (%s) for cursor. '
'Position=%s' % (length, new_position))
self._position = new_position
def rewind(self, position=0):
"""Set the position of the data buffer cursor to 'position'."""
if position < 0 or position > len(self._data):
raise Exception("Invalid position to rewind cursor to: %s." % position)
self._position = position
def get_bytes(self, position, length=1):
"""Get 'length' bytes starting at 'position'.
Position is start of payload (first four packet header bytes are not
included) starting at index '0'.
No error checking is done. If requesting outside end of buffer
an empty string (or string shorter than 'length') may be returned!
"""
return self._data[position:(position+length)]
if PY2:
def read_uint8(self):
result = ord(self._data[self._position])
self._position += 1
return result
else:
def read_uint8(self):
result = self._data[self._position]
self._position += 1
return result
def read_uint16(self):
result = struct.unpack_from('<H', self._data, self._position)[0]
self._position += 2
return result
def read_uint24(self):
low, high = struct.unpack_from('<HB', self._data, self._position)
self._position += 3
return low + (high << 16)
def read_uint32(self):
result = struct.unpack_from('<I', self._data, self._position)[0]
self._position += 4
return result
def read_uint64(self):
result = struct.unpack_from('<Q', self._data, self._position)[0]
self._position += 8
return result
def read_string(self):
end_pos = self._data.find(b'\0', self._position)
if end_pos < 0:
return None
result = self._data[self._position:end_pos]
self._position = end_pos + 1
return result
def read_length_encoded_integer(self):
"""Read a 'Length Coded Binary' number from the data buffer.
Length coded numbers can be anywhere from 1 to 9 bytes depending
on the value of the first byte.
"""
c = self.read_uint8()
if c == NULL_COLUMN:
return None
if c < UNSIGNED_CHAR_COLUMN:
return c
elif c == UNSIGNED_SHORT_COLUMN:
return self.read_uint16()
elif c == UNSIGNED_INT24_COLUMN:
return self.read_uint24()
elif c == UNSIGNED_INT64_COLUMN:
return self.read_uint64()
def read_length_coded_string(self):
"""Read a 'Length Coded String' from the data buffer.
A 'Length Coded String' consists first of a length coded
(unsigned, positive) integer represented in 1-9 bytes followed by
that many bytes of binary data. (For example "cat" would be "3cat".)
"""
length = self.read_length_encoded_integer()
if length is None:
return None
return self.read(length)
def read_struct(self, fmt):
s = struct.Struct(fmt)
result = s.unpack_from(self._data, self._position)
self._position += s.size
return result
def is_ok_packet(self):
return self._data[0:1] == b'\0'
def is_auth_switch_request(self):
# http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest
return self._data[0:1] == b'\xfe'
def is_eof_packet(self):
# http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-EOF_Packet
# Caution: \xFE may be LengthEncodedInteger.
# If \xFE is LengthEncodedInteger header, 8bytes followed.
return len(self._data) < 9 and self._data[0:1] == b'\xfe'
def is_resultset_packet(self):
field_count = ord(self._data[0:1])
return 1 <= field_count <= 250
def is_load_local_packet(self):
return self._data[0:1] == b'\xfb'
def is_error_packet(self):
return self._data[0:1] == b'\xff'
def check_error(self):
if self.is_error_packet():
self.rewind()
self.advance(1) # field_count == error (we already know that)
errno = self.read_uint16()
if DEBUG: print("errno =", errno)
err.raise_mysql_exception(self._data)
def dump(self):
dump_packet(self._data)
class FieldDescriptorPacket(MysqlPacket):
"""A MysqlPacket that represents a specific column's metadata in the result.
Parsing is automatically done and the results are exported via public
attributes on the class such as: db, table_name, name, length, type_code.
"""
def __init__(self, data, encoding):
MysqlPacket.__init__(self, data, encoding)
self.__parse_field_descriptor(encoding)
def __parse_field_descriptor(self, encoding):
"""Parse the 'Field Descriptor' (Metadata) packet.
This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0).
"""
self.catalog = self.read_length_coded_string()
self.db = self.read_length_coded_string()
self.table_name = self.read_length_coded_string().decode(encoding)
self.org_table = self.read_length_coded_string().decode(encoding)
self.name = self.read_length_coded_string().decode(encoding)
self.org_name = self.read_length_coded_string().decode(encoding)
self.charsetnr, self.length, self.type_code, self.flags, self.scale = (
self.read_struct('<xHIBHBxx'))
# 'default' is a length coded binary and is still in the buffer?
# not used for normal result sets...
def description(self):
"""Provides a 7-item tuple compatible with the Python PEP249 DB Spec."""
return (
self.name,
self.type_code,
None, # TODO: display_length; should this be self.length?
self.get_column_length(), # 'internal_size'
self.get_column_length(), # 'precision' # TODO: why!?!?
self.scale,
self.flags % 2 == 0)
def get_column_length(self):
if self.type_code == FIELD_TYPE.VAR_STRING:
mblen = MBLENGTH.get(self.charsetnr, 1)
return self.length // mblen
return self.length
def __str__(self):
return ('%s %r.%r.%r, type=%s, flags=%x'
% (self.__class__, self.db, self.table_name, self.name,
self.type_code, self.flags))
class OKPacketWrapper(object):
"""
OK Packet Wrapper. It uses an existing packet object, and wraps
around it, exposing useful variables while still providing access
to the original packet objects variables and methods.
"""
def __init__(self, from_packet):
if not from_packet.is_ok_packet():
raise ValueError('Cannot create ' + str(self.__class__.__name__) +
' object from invalid packet type')
self.packet = from_packet
self.packet.advance(1)
self.affected_rows = self.packet.read_length_encoded_integer()
self.insert_id = self.packet.read_length_encoded_integer()
self.server_status, self.warning_count = self.read_struct('<HH')
self.message = self.packet.read_all()
self.has_next = self.server_status & SERVER_STATUS.SERVER_MORE_RESULTS_EXISTS
def __getattr__(self, key):
return getattr(self.packet, key)
class EOFPacketWrapper(object):
"""
EOF Packet Wrapper. It uses an existing packet object, and wraps
around it, exposing useful variables while still providing access
to the original packet objects variables and methods.
"""
def __init__(self, from_packet):
if not from_packet.is_eof_packet():
raise ValueError(
"Cannot create '{0}' object from invalid packet type".format(
self.__class__))
self.packet = from_packet
self.warning_count, self.server_status = self.packet.read_struct('<xhh')
if DEBUG: print("server_status=", self.server_status)
self.has_next = self.server_status & SERVER_STATUS.SERVER_MORE_RESULTS_EXISTS
def __getattr__(self, key):
return getattr(self.packet, key)
class LoadLocalPacketWrapper(object):
"""
Load Local Packet Wrapper. It uses an existing packet object, and wraps
around it, exposing useful variables while still providing access
to the original packet objects variables and methods.
"""
def __init__(self, from_packet):
if not from_packet.is_load_local_packet():
raise ValueError(
"Cannot create '{0}' object from invalid packet type".format(
self.__class__))
self.packet = from_packet
self.filename = self.packet.get_all_data()[1:]
if DEBUG: print("filename=", self.filename)
def __getattr__(self, key):
return getattr(self.packet, key)
class Connection(object):
"""
Representation of a socket with a mysql server.
The proper way to get an instance of this class is to call
connect().
"""
socket = None
_auth_plugin_name = ''
def __init__(self, host=None, user=None, password="",
database=None, port=0, unix_socket=None,
charset='', sql_mode=None,
read_default_file=None, conv=decoders, use_unicode=None,
client_flag=0, cursorclass=Cursor, init_command=None,
connect_timeout=None, ssl=None, read_default_group=None,
compress=None, named_pipe=None, no_delay=None,
autocommit=False, db=None, passwd=None, local_infile=False,
max_allowed_packet=16*1024*1024, defer_connect=False,
auth_plugin_map={}):
"""
Establish a connection to the MySQL database. Accepts several
arguments:
host: Host where the database server is located
user: Username to log in as
password: Password to use.
database: Database to use, None to not use a particular one.
port: MySQL port to use, default is usually OK. (default: 3306)
unix_socket: Optionally, you can use a unix socket rather than TCP/IP.
charset: Charset you want to use.
sql_mode: Default SQL_MODE to use.
read_default_file:
Specifies my.cnf file to read these parameters from under the [client] section.
conv:
Decoders dictionary to use instead of the default one.
This is used to provide custom marshalling of types. See converters.
use_unicode:
Whether or not to default to unicode strings.
This option defaults to true for Py3k.
client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.
cursorclass: Custom cursor class to use.
init_command: Initial SQL statement to run when connection is established.
connect_timeout: Timeout before throwing an exception when connecting.
ssl:
A dict of arguments similar to mysql_ssl_set()'s parameters.
For now the capath and cipher arguments are not supported.
read_default_group: Group to read from in the configuration file.
compress; Not supported
named_pipe: Not supported
autocommit: Autocommit mode. None means use server default. (default: False)
local_infile: Boolean to enable the use of LOAD DATA LOCAL command. (default: False)
max_allowed_packet: Max size of packet sent to server in bytes. (default: 16MB)
Only used to limit size of "LOAD LOCAL INFILE" data packet smaller than default (16KB).
defer_connect: Don't explicitly connect on contruction - wait for connect call.
(default: False)
auth_plugin_map: A dict of plugin names to a class that processes that plugin.
The class will take the Connection object as the argument to the constructor.
The class needs an authenticate method taking an authentication packet as
an argument. For the dialog plugin, a prompt(echo, prompt) method can be used
(if no authenticate method) for returning a string from the user. (experimental)
db: Alias for database. (for compatibility to MySQLdb)
passwd: Alias for password. (for compatibility to MySQLdb)
"""
if no_delay is not None:
warnings.warn("no_delay option is deprecated", DeprecationWarning)
if use_unicode is None and sys.version_info[0] > 2:
use_unicode = True
if db is not None and database is None:
database = db
if passwd is not None and not password:
password = passwd
if compress or named_pipe:
raise NotImplementedError("compress and named_pipe arguments are not supported")
if local_infile:
client_flag |= CLIENT.LOCAL_FILES
self.ssl = False
if ssl:
if not SSL_ENABLED:
raise NotImplementedError("ssl module not found")
self.ssl = True
client_flag |= CLIENT.SSL
self.ctx = self._create_ssl_ctx(ssl)
if read_default_group and not read_default_file:
if sys.platform.startswith("win"):
read_default_file = "c:\\my.ini"
else:
read_default_file = "/etc/my.cnf"
if read_default_file:
if not read_default_group:
read_default_group = "client"
cfg = Parser()
cfg.read(os.path.expanduser(read_default_file))
def _config(key, arg):
if arg:
return arg
try:
return cfg.get(read_default_group, key)
except Exception:
return arg
user = _config("user", user)
password = _config("password", password)
host = _config("host", host)
database = _config("database", database)
unix_socket = _config("socket", unix_socket)
port = int(_config("port", port))
charset = _config("default-character-set", charset)
self.host = host or "localhost"
self.port = port or 3306
self.user = user or DEFAULT_USER
self.password = password or ""
self.db = database
self.unix_socket = unix_socket
if charset:
self.charset = charset
self.use_unicode = True
else:
self.charset = DEFAULT_CHARSET
self.use_unicode = False
if use_unicode is not None:
self.use_unicode = use_unicode
self.encoding = charset_by_name(self.charset).encoding
client_flag |= CLIENT.CAPABILITIES | CLIENT.MULTI_STATEMENTS
if self.db:
client_flag |= CLIENT.CONNECT_WITH_DB
self.client_flag = client_flag
self.cursorclass = cursorclass
self.connect_timeout = connect_timeout
self._result = None
self._affected_rows = 0
self.host_info = "Not connected"
#: specified autocommit mode. None means use server default.
self.autocommit_mode = autocommit
self.encoders = encoders # Need for MySQLdb compatibility.
self.decoders = conv
self.sql_mode = sql_mode
self.init_command = init_command
self.max_allowed_packet = max_allowed_packet
self._auth_plugin_map = auth_plugin_map
if defer_connect:
self.socket = None
else:
self.connect()
def _create_ssl_ctx(self, sslp):
if isinstance(sslp, ssl.SSLContext):
return sslp
ca = sslp.get('ca')
capath = sslp.get('capath')
hasnoca = ca is None and capath is None
ctx = ssl.create_default_context(cafile=ca, capath=capath)
ctx.check_hostname = not hasnoca and sslp.get('check_hostname', True)
ctx.verify_mode = ssl.CERT_NONE if hasnoca else ssl.CERT_REQUIRED
if 'cert' in sslp:
ctx.load_cert_chain(sslp['cert'], keyfile=sslp.get('key'))
if 'cipher' in sslp:
ctx.set_ciphers(sslp['cipher'])
ctx.options |= ssl.OP_NO_SSLv2
ctx.options |= ssl.OP_NO_SSLv3
return ctx
def close(self):
"""Send the quit message and close the socket"""
if self.socket is None:
raise err.Error("Already closed")
send_data = struct.pack('<iB', 1, COMMAND.COM_QUIT)
try:
self._write_bytes(send_data)
except Exception:
pass
finally:
sock = self.socket
self.socket = None
self._rfile = None
sock.close()
@property
def open(self):
return self.socket is not None
def __del__(self):
if self.socket:
try:
self.socket.close()
except:
pass
self.socket = None
self._rfile = None
def autocommit(self, value):
self.autocommit_mode = bool(value)
current = self.get_autocommit()
if value != current:
self._send_autocommit_mode()
def get_autocommit(self):
return bool(self.server_status &
SERVER_STATUS.SERVER_STATUS_AUTOCOMMIT)
def _read_ok_packet(self):
pkt = self._read_packet()
if not pkt.is_ok_packet():
raise err.OperationalError(2014, "Command Out of Sync")
ok = OKPacketWrapper(pkt)
self.server_status = ok.server_status
return ok
def _send_autocommit_mode(self):
"""Set whether or not to commit after every execute()"""
self._execute_command(COMMAND.COM_QUERY, "SET AUTOCOMMIT = %s" %
self.escape(self.autocommit_mode))
self._read_ok_packet()
def begin(self):
"""Begin transaction."""
self._execute_command(COMMAND.COM_QUERY, "BEGIN")
self._read_ok_packet()
def commit(self):
"""Commit changes to stable storage"""
self._execute_command(COMMAND.COM_QUERY, "COMMIT")
self._read_ok_packet()
def rollback(self):
"""Roll back the current transaction"""
self._execute_command(COMMAND.COM_QUERY, "ROLLBACK")
self._read_ok_packet()
def show_warnings(self):
"""SHOW WARNINGS"""
self._execute_command(COMMAND.COM_QUERY, "SHOW WARNINGS")
result = MySQLResult(self)
result.read()
return result.rows
def select_db(self, db):
'''Set current db'''
self._execute_command(COMMAND.COM_INIT_DB, db)
self._read_ok_packet()
def escape(self, obj, mapping=None):
"""Escape whatever value you pass to it"""
if isinstance(obj, str_type):
return "'" + self.escape_string(obj) + "'"
return escape_item(obj, self.charset, mapping=mapping)
def literal(self, obj):
'''Alias for escape()'''
return self.escape(obj)
def escape_string(self, s):
if (self.server_status &
SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES):
return s.replace("'", "''")
return escape_string(s)
def cursor(self, cursor=None):
"""Create a new cursor to execute queries with"""
if cursor:
return cursor(self)
return self.cursorclass(self)
def __enter__(self):
"""Context manager that returns a Cursor"""
return self.cursor()
def __exit__(self, exc, value, traceback):
"""On successful exit, commit. On exception, rollback"""
if exc:
self.rollback()
else:
self.commit()
# The following methods are INTERNAL USE ONLY (called from Cursor)
def query(self, sql, unbuffered=False):
# if DEBUG:
# print("DEBUG: sending query:", sql)
if isinstance(sql, text_type) and not (JYTHON or IRONPYTHON):
if PY2:
sql = sql.encode(self.encoding)
else:
sql = sql.encode(self.encoding, 'surrogateescape')
self._execute_command(COMMAND.COM_QUERY, sql)
self._affected_rows = self._read_query_result(unbuffered=unbuffered)
return self._affected_rows
def next_result(self, unbuffered=False):
self._affected_rows = self._read_query_result(unbuffered=unbuffered)
return self._affected_rows
def affected_rows(self):
return self._affected_rows
def kill(self, thread_id):
arg = struct.pack('<I', thread_id)
self._execute_command(COMMAND.COM_PROCESS_KILL, arg)
return self._read_ok_packet()
def ping(self, reconnect=True):
"""Check if the server is alive"""
if self.socket is None:
if reconnect:
self.connect()
reconnect = False
else:
raise err.Error("Already closed")
try:
self._execute_command(COMMAND.COM_PING, "")
return self._read_ok_packet()
except Exception:
if reconnect:
self.connect()
return self.ping(False)
else:
raise
def set_charset(self, charset):
# Make sure charset is supported.
encoding = charset_by_name(charset).encoding
self._execute_command(COMMAND.COM_QUERY, "SET NAMES %s" % self.escape(charset))
self._read_packet()
self.charset = charset
self.encoding = encoding
def connect(self, sock=None):
try:
if sock is None:
if self.unix_socket and self.host in ('localhost', '127.0.0.1'):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.connect_timeout)
sock.connect(self.unix_socket)
self.host_info = "Localhost via UNIX socket"
if DEBUG: print('connected using unix_socket')
else:
while True:
try:
sock = socket.create_connection(
(self.host, self.port), self.connect_timeout)
break
except (OSError, IOError) as e:
if e.errno == errno.EINTR:
continue
raise
self.host_info = "socket %s:%d" % (self.host, self.port)
if DEBUG: print('connected using socket')
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.settimeout(None)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self.socket = sock
self._rfile = _makefile(sock, 'rb')
self._next_seq_id = 0
self._get_server_information()
self._request_authentication()
if self.sql_mode is not None:
c = self.cursor()
c.execute("SET sql_mode=%s", (self.sql_mode,))
if self.init_command is not None:
c = self.cursor()
c.execute(self.init_command)
c.close()
self.commit()
if self.autocommit_mode is not None:
self.autocommit(self.autocommit_mode)
except BaseException as e:
self._rfile = None
if sock is not None:
try:
sock.close()
except:
pass
if isinstance(e, (OSError, IOError, socket.error)):
exc = err.OperationalError(
2003,
"Can't connect to MySQL server on %r (%s)" % (
self.host, e))
# Keep original exception and traceback to investigate error.
exc.original_exception = e
exc.traceback = traceback.format_exc()
if DEBUG: print(exc.traceback)
raise exc
# If e is neither DatabaseError or IOError, It's a bug.
# But raising AssertionError hides original error.
# So just reraise it.
raise
def write_packet(self, payload):
"""Writes an entire "mysql packet" in its entirety to the network
addings its length and sequence number.
"""
# Internal note: when you build packet manualy and calls _write_bytes()
# directly, you should set self._next_seq_id properly.
data = pack_int24(len(payload)) + int2byte(self._next_seq_id) + payload
if DEBUG: dump_packet(data)
self._write_bytes(data)
self._next_seq_id = (self._next_seq_id + 1) % 256
def _read_packet(self, packet_type=MysqlPacket):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
"""
buff = b''
while True:
packet_header = self._read_bytes(4)
if DEBUG: dump_packet(packet_header)
btrl, btrh, packet_number = struct.unpack('<HBB', packet_header)
bytes_to_read = btrl + (btrh << 16)
if packet_number != self._next_seq_id:
raise err.InternalError("Packet sequence number wrong - got %d expected %d" %
(packet_number, self._next_seq_id))
self._next_seq_id = (self._next_seq_id + 1) % 256
recv_data = self._read_bytes(bytes_to_read)
if DEBUG: dump_packet(recv_data)
buff += recv_data
# https://dev.mysql.com/doc/internals/en/sending-more-than-16mbyte.html
if bytes_to_read == 0xffffff:
continue
if bytes_to_read < MAX_PACKET_LEN:
break
packet = packet_type(buff, self.encoding)
packet.check_error()
return packet
def _read_bytes(self, num_bytes):
while True:
try:
data = self._rfile.read(num_bytes)
break
except (IOError, OSError) as e:
if e.errno == errno.EINTR:
continue
raise err.OperationalError(
2013,
"Lost connection to MySQL server during query (%s)" % (e,))
if len(data) < num_bytes:
raise err.OperationalError(
2013, "Lost connection to MySQL server during query")
return data
def _write_bytes(self, data):
try:
self.socket.sendall(data)
except IOError as e:
raise err.OperationalError(2006, "MySQL server has gone away (%r)" % (e,))
def _read_query_result(self, unbuffered=False):
if unbuffered:
try:
result = MySQLResult(self)
result.init_unbuffered_query()
except:
result.unbuffered_active = False
result.connection = None
raise
else:
result = MySQLResult(self)
result.read()
self._result = result
if result.server_status is not None:
self.server_status = result.server_status
return result.affected_rows
def insert_id(self):
if self._result:
return self._result.insert_id
else:
return 0
def _execute_command(self, command, sql):
if not self.socket:
raise err.InterfaceError("(0, '')")
# If the last query was unbuffered, make sure it finishes before
# sending new commands
if self._result is not None:
if self._result.unbuffered_active:
warnings.warn("Previous unbuffered result was left incomplete")
self._result._finish_unbuffered_query()
while self._result.has_next:
self.next_result()
self._result = None
if isinstance(sql, text_type):
sql = sql.encode(self.encoding)
packet_size = min(MAX_PACKET_LEN, len(sql) + 1) # +1 is for command
# tiny optimization: build first packet manually instead of
# calling self..write_packet()
prelude = struct.pack('<iB', packet_size, command)
packet = prelude + sql[:packet_size-1]
self._write_bytes(packet)
if DEBUG: dump_packet(packet)
self._next_seq_id = 1
if packet_size < MAX_PACKET_LEN:
return
sql = sql[packet_size-1:]
while True:
packet_size = min(MAX_PACKET_LEN, len(sql))
self.write_packet(sql[:packet_size])
sql = sql[packet_size:]
if not sql and packet_size < MAX_PACKET_LEN:
break
def _request_authentication(self):
# https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
if int(self.server_version.split('.', 1)[0]) >= 5:
self.client_flag |= CLIENT.MULTI_RESULTS
if self.user is None:
raise ValueError("Did not specify a username")
charset_id = charset_by_name(self.charset).id
if isinstance(self.user, text_type):
self.user = self.user.encode(self.encoding)
data_init = struct.pack('<iIB23s', self.client_flag, 1, charset_id, b'')
if self.ssl and self.server_capabilities & CLIENT.SSL:
self.write_packet(data_init)
self.socket = self.ctx.wrap_socket(self.socket, server_hostname=self.host)
self._rfile = _makefile(self.socket, 'rb')
data = data_init + self.user + b'\0'
authresp = b''
if self._auth_plugin_name in ('', 'mysql_native_password'):
authresp = _scramble(self.password.encode('latin1'), self.salt)
if self.server_capabilities & CLIENT.PLUGIN_AUTH_LENENC_CLIENT_DATA:
data += lenenc_int(len(authresp)) + authresp
elif self.server_capabilities & CLIENT.SECURE_CONNECTION:
data += struct.pack('B', len(authresp)) + authresp
else: # pragma: no cover - not testing against servers without secure auth (>=5.0)
data += authresp + b'\0'
if self.db and self.server_capabilities & CLIENT.CONNECT_WITH_DB:
if isinstance(self.db, text_type):
self.db = self.db.encode(self.encoding)
data += self.db + b'\0'
if self.server_capabilities & CLIENT.PLUGIN_AUTH:
name = self._auth_plugin_name
if isinstance(name, text_type):
name = name.encode('ascii')
data += name + b'\0'
self.write_packet(data)
auth_packet = self._read_packet()
# if authentication method isn't accepted the first byte
# will have the octet 254
if auth_packet.is_auth_switch_request():
# https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchRequest
auth_packet.read_uint8() # 0xfe packet identifier
plugin_name = auth_packet.read_string()
if self.server_capabilities & CLIENT.PLUGIN_AUTH and plugin_name is not None:
auth_packet = self._process_auth(plugin_name, auth_packet)
else:
# send legacy handshake
data = _scramble_323(self.password.encode('latin1'), self.salt) + b'\0'
self.write_packet(data)
auth_packet = self._read_packet()
def _process_auth(self, plugin_name, auth_packet):
plugin_class = self._auth_plugin_map.get(plugin_name)
if not plugin_class:
plugin_class = self._auth_plugin_map.get(plugin_name.decode('ascii'))
if plugin_class:
try:
handler = plugin_class(self)
return handler.authenticate(auth_packet)
except AttributeError:
if plugin_name != b'dialog':
raise err.OperationalError(2059, "Authentication plugin '%s'" \
" not loaded: - %r missing authenticate method" % (plugin_name, plugin_class))
except TypeError:
raise err.OperationalError(2059, "Authentication plugin '%s'" \
" not loaded: - %r cannot be constructed with connection object" % (plugin_name, plugin_class))
else:
handler = None
if plugin_name == b"mysql_native_password":
# https://dev.mysql.com/doc/internals/en/secure-password-authentication.html#packet-Authentication::Native41
data = _scramble(self.password.encode('latin1'), auth_packet.read_all()) + b'\0'
elif plugin_name == b"mysql_old_password":
# https://dev.mysql.com/doc/internals/en/old-password-authentication.html
data = _scramble_323(self.password.encode('latin1'), auth_packet.read_all()) + b'\0'
elif plugin_name == b"mysql_clear_password":
# https://dev.mysql.com/doc/internals/en/clear-text-authentication.html
data = self.password.encode('latin1') + b'\0'
elif plugin_name == b"dialog":
pkt = auth_packet
while True:
flag = pkt.read_uint8()
echo = (flag & 0x06) == 0x02
last = (flag & 0x01) == 0x01
prompt = pkt.read_all()
if prompt == b"Password: ":
self.write_packet(self.password.encode('latin1') + b'\0')
elif handler:
resp = 'no response - TypeError within plugin.prompt method'
try:
resp = handler.prompt(echo, prompt)
self.write_packet(resp + b'\0')
except AttributeError:
raise err.OperationalError(2059, "Authentication plugin '%s'" \
" not loaded: - %r missing prompt method" % (plugin_name, handler))
except TypeError:
raise err.OperationalError(2061, "Authentication plugin '%s'" \
" %r didn't respond with string. Returned '%r' to prompt %r" % (plugin_name, handler, resp, prompt))
else:
raise err.OperationalError(2059, "Authentication plugin '%s' (%r) not configured" % (plugin_name, handler))
pkt = self._read_packet()
pkt.check_error()
if pkt.is_ok_packet() or last:
break
return pkt
else:
raise err.OperationalError(2059, "Authentication plugin '%s' not configured" % plugin_name)
self.write_packet(data)
pkt = self._read_packet()
pkt.check_error()
return pkt
# _mysql support
def thread_id(self):
return self.server_thread_id[0]
def character_set_name(self):
return self.charset
def get_host_info(self):
return self.host_info
def get_proto_info(self):
return self.protocol_version
def _get_server_information(self):
i = 0
packet = self._read_packet()
data = packet.get_all_data()
if DEBUG: dump_packet(data)
self.protocol_version = byte2int(data[i:i+1])
i += 1
server_end = data.find(b'\0', i)
self.server_version = data[i:server_end].decode('latin1')
i = server_end + 1
self.server_thread_id = struct.unpack('<I', data[i:i+4])
i += 4
self.salt = data[i:i+8]
i += 9 # 8 + 1(filler)
self.server_capabilities = struct.unpack('<H', data[i:i+2])[0]
i += 2
if len(data) >= i + 6:
lang, stat, cap_h, salt_len = struct.unpack('<BHHB', data[i:i+6])
i += 6
self.server_language = lang
self.server_charset = charset_by_id(lang).name
self.server_status = stat
if DEBUG: print("server_status: %x" % stat)
self.server_capabilities |= cap_h << 16
if DEBUG: print("salt_len:", salt_len)
salt_len = max(12, salt_len - 9)
# reserved
i += 10
if len(data) >= i + salt_len:
# salt_len includes auth_plugin_data_part_1 and filler
self.salt += data[i:i+salt_len]
i += salt_len
i+=1
# AUTH PLUGIN NAME may appear here.
if self.server_capabilities & CLIENT.PLUGIN_AUTH and len(data) >= i:
# Due to Bug#59453 the auth-plugin-name is missing the terminating
# NUL-char in versions prior to 5.5.10 and 5.6.2.
# ref: https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
# didn't use version checks as mariadb is corrected and reports
# earlier than those two.
server_end = data.find(b'\0', i)
if server_end < 0: # pragma: no cover - very specific upstream bug
# not found \0 and last field so take it all
self._auth_plugin_name = data[i:].decode('latin1')
else:
self._auth_plugin_name = data[i:server_end].decode('latin1')
def get_server_info(self):
return self.server_version
Warning = err.Warning
Error = err.Error
InterfaceError = err.InterfaceError
DatabaseError = err.DatabaseError
DataError = err.DataError
OperationalError = err.OperationalError
IntegrityError = err.IntegrityError
InternalError = err.InternalError
ProgrammingError = err.ProgrammingError
NotSupportedError = err.NotSupportedError
class MySQLResult(object):
def __init__(self, connection):
"""
:type connection: Connection
"""
self.connection = connection
self.affected_rows = None
self.insert_id = None
self.server_status = None
self.warning_count = 0
self.message = None
self.field_count = 0
self.description = None
self.rows = None
self.has_next = None
self.unbuffered_active = False
def __del__(self):
if self.unbuffered_active:
self._finish_unbuffered_query()
def read(self):
try:
first_packet = self.connection._read_packet()
if first_packet.is_ok_packet():
self._read_ok_packet(first_packet)
elif first_packet.is_load_local_packet():
self._read_load_local_packet(first_packet)
else:
self._read_result_packet(first_packet)
finally:
self.connection = None
def init_unbuffered_query(self):
self.unbuffered_active = True
first_packet = self.connection._read_packet()
if first_packet.is_ok_packet():
self._read_ok_packet(first_packet)
self.unbuffered_active = False
self.connection = None
else:
self.field_count = first_packet.read_length_encoded_integer()
self._get_descriptions()
# Apparently, MySQLdb picks this number because it's the maximum
# value of a 64bit unsigned integer. Since we're emulating MySQLdb,
# we set it to this instead of None, which would be preferred.
self.affected_rows = 18446744073709551615
def _read_ok_packet(self, first_packet):
ok_packet = OKPacketWrapper(first_packet)
self.affected_rows = ok_packet.affected_rows
self.insert_id = ok_packet.insert_id
self.server_status = ok_packet.server_status
self.warning_count = ok_packet.warning_count
self.message = ok_packet.message
self.has_next = ok_packet.has_next
def _read_load_local_packet(self, first_packet):
load_packet = LoadLocalPacketWrapper(first_packet)
sender = LoadLocalFile(load_packet.filename, self.connection)
try:
sender.send_data()
except:
self.connection._read_packet() # skip ok packet
raise
ok_packet = self.connection._read_packet()
if not ok_packet.is_ok_packet(): # pragma: no cover - upstream induced protocol error
raise err.OperationalError(2014, "Commands Out of Sync")
self._read_ok_packet(ok_packet)
def _check_packet_is_eof(self, packet):
if packet.is_eof_packet():
eof_packet = EOFPacketWrapper(packet)
self.warning_count = eof_packet.warning_count
self.has_next = eof_packet.has_next
return True
return False
def _read_result_packet(self, first_packet):
self.field_count = first_packet.read_length_encoded_integer()
self._get_descriptions()
self._read_rowdata_packet()
def _read_rowdata_packet_unbuffered(self):
# Check if in an active query
if not self.unbuffered_active:
return
# EOF
packet = self.connection._read_packet()
if self._check_packet_is_eof(packet):
self.unbuffered_active = False
self.connection = None
self.rows = None
return
row = self._read_row_from_packet(packet)
self.affected_rows = 1
self.rows = (row,) # rows should tuple of row for MySQL-python compatibility.
return row
def _finish_unbuffered_query(self):
# After much reading on the MySQL protocol, it appears that there is,
# in fact, no way to stop MySQL from sending all the data after
# executing a query, so we just spin, and wait for an EOF packet.
while self.unbuffered_active:
packet = self.connection._read_packet()
if self._check_packet_is_eof(packet):
self.unbuffered_active = False
self.connection = None # release reference to kill cyclic reference.
def _read_rowdata_packet(self):
"""Read a rowdata packet for each data row in the result set."""
rows = []
while True:
packet = self.connection._read_packet()
if self._check_packet_is_eof(packet):
self.connection = None # release reference to kill cyclic reference.
break
rows.append(self._read_row_from_packet(packet))
self.affected_rows = len(rows)
self.rows = tuple(rows)
def _read_row_from_packet(self, packet):
row = []
for encoding, converter in self.converters:
try:
data = packet.read_length_coded_string()
except IndexError:
# No more columns in this row
# See https://github.com/PyMySQL/PyMySQL/pull/434
break
if data is not None:
if encoding is not None:
data = data.decode(encoding)
if DEBUG: print("DEBUG: DATA = ", data)
if converter is not None:
data = converter(data)
row.append(data)
return tuple(row)
def _get_descriptions(self):
"""Read a column descriptor packet for each column in the result."""
self.fields = []
self.converters = []
use_unicode = self.connection.use_unicode
description = []
for i in range_type(self.field_count):
field = self.connection._read_packet(FieldDescriptorPacket)
self.fields.append(field)
description.append(field.description())
field_type = field.type_code
if use_unicode:
if field_type in TEXT_TYPES:
charset = charset_by_id(field.charsetnr)
if charset.is_binary:
# TEXTs with charset=binary means BINARY types.
encoding = None
else:
encoding = charset.encoding
else:
encoding = 'ascii'
else:
encoding = None
converter = self.connection.decoders.get(field_type)
if converter is through:
converter = None
if DEBUG: print("DEBUG: field={}, converter={}".format(field, converter))
self.converters.append((encoding, converter))
eof_packet = self.connection._read_packet()
assert eof_packet.is_eof_packet(), 'Protocol error, expecting EOF'
self.description = tuple(description)
class LoadLocalFile(object):
def __init__(self, filename, connection):
self.filename = filename
self.connection = connection
def send_data(self):
"""Send data packets from the local file to the server"""
if not self.connection.socket:
raise err.InterfaceError("(0, '')")
conn = self.connection
try:
with open(self.filename, 'rb') as open_file:
packet_size = min(conn.max_allowed_packet, 16*1024) # 16KB is efficient enough
while True:
chunk = open_file.read(packet_size)
if not chunk:
break
conn.write_packet(chunk)
except IOError:
raise err.OperationalError(1017, "Can't find file '{0}'".format(self.filename))
finally:
# send the empty packet to signify we are done sending data
conn.write_packet(b'')
|
mit
| -927,726,751,287,593,100 | 35.793174 | 134 | 0.575841 | false |
elbeardmorez/quodlibet
|
quodlibet/quodlibet/player/xinebe/player.py
|
1
|
12177
|
# -*- coding: utf-8 -*-
# Copyright 2006-2007 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import GLib
from quodlibet import _
from quodlibet import config
from quodlibet.player import PlayerError
from quodlibet.player._base import BasePlayer
from quodlibet.util.string import decode
from . import cdefs
from .cdefs import XINE_PARAM_SPEED, XINE_PARAM_GAPLESS_SWITCH, xine_dispose, \
XINE_SPEED_PAUSE, xine_play, xine_close, xine_set_param, xine_get_param, \
xine_get_status, xine_open, xine_stop, XINE_PARAM_EARLY_FINISHED_EVENT, \
XINE_META_INFO_ARTIST, xine_ui_message_data_t, XINE_EVENT_UI_SET_TITLE, \
XINE_PARAM_AUDIO_AMP_MUTE, XINE_PARAM_AUDIO_AMP_LEVEL, xine_new, \
XINE_EVENT_UI_PLAYBACK_FINISHED, xine_event_dispose_queue, xine_init, \
XINE_PARAM_IGNORE_VIDEO, XINE_PARAM_IGNORE_SPU, xine_config_load, \
xine_check_version, xine_get_homedir, xine_list_input_plugins, xine_exit, \
xine_open_audio_driver, xine_close_audio_driver, XINE_STATUS_PLAY, \
XINE_SPEED_NORMAL, xine_get_pos_length, XINE_MSG_NO_ERROR, \
XINE_EVENT_UI_MESSAGE, xine_get_meta_info, XINE_META_INFO_ALBUM, \
XINE_META_INFO_TITLE, xine_stream_new, xine_get_version_string, \
xine_event_new_queue, xine_event_create_listener_thread
class XineHandle(object):
def __init__(self):
_xine = xine_new()
xine_config_load(_xine, xine_get_homedir() + b"/.xine/config")
xine_init(_xine)
self._xine = _xine
def list_input_plugins(self):
"""
Returns:
List[text_type]
"""
plugins = []
for plugin in xine_list_input_plugins(self._xine):
if not plugin:
break
plugins.append(decode(plugin))
return plugins
def exit(self):
xine_exit(self._xine)
def open_audio_driver(self, identifier, data):
return xine_open_audio_driver(self._xine, identifier, data)
def close_audio_driver(self, driver):
xine_close_audio_driver(self._xine, driver)
def stream_new(self, audio_port, video_port):
return xine_stream_new(self._xine, audio_port, video_port)
class XinePlaylistPlayer(BasePlayer):
"""Xine playlist player."""
_paused = True
def __init__(self, driver, librarian):
"""May raise PlayerError"""
super(XinePlaylistPlayer, self).__init__()
self.name = "xine"
self.version_info = "xine-lib: " + decode(xine_get_version_string())
self._volume = 1.0
self._handle = XineHandle()
self._supports_gapless = xine_check_version(1, 1, 1) == 1
self._event_queue = None
self._new_stream(driver)
self._librarian = librarian
self._destroyed = False
def _new_stream(self, driver):
assert driver is None or isinstance(driver, bytes)
self._audio_port = self._handle.open_audio_driver(driver, None)
if not self._audio_port:
raise PlayerError(
_("Unable to create audio output"),
_("The audio device %r was not found. Check your Xine "
"settings in ~/.quodlibet/config.") % driver)
self._stream = self._handle.stream_new(self._audio_port, None)
xine_set_param(self._stream, XINE_PARAM_IGNORE_VIDEO, 1)
xine_set_param(self._stream, XINE_PARAM_IGNORE_SPU, 1)
self.update_eq_values()
if self._supports_gapless:
xine_set_param(self._stream, XINE_PARAM_EARLY_FINISHED_EVENT, 1)
if self._event_queue:
xine_event_dispose_queue(self._event_queue)
self._event_queue = xine_event_new_queue(self._stream)
xine_event_create_listener_thread(self._event_queue,
self._event_listener, None)
def _destroy(self):
self._destroyed = True
if self._stream:
xine_close(self._stream)
xine_dispose(self._stream)
self._stream = None
if self._event_queue:
xine_event_dispose_queue(self._event_queue)
if self._audio_port:
self._handle.close_audio_driver(self._audio_port)
self._handle.exit()
def _playback_finished(self):
if self._destroyed:
return False
self._source.next_ended()
self._end(False, None, gapless=True)
return False
def _update_metadata(self):
if self._destroyed:
return False
if not self.song or not self.song.multisong:
return False
if self.info is self.song:
self.info = type(self.song)(self.song["~filename"])
self.info.multisong = False
changed = False
meta = [
(XINE_META_INFO_TITLE, 'title'),
(XINE_META_INFO_ARTIST, 'artist'),
(XINE_META_INFO_ALBUM, 'album'),
]
for info, name in meta:
text = xine_get_meta_info(self._stream, info)
if not text:
continue
text = text.decode('UTF-8', 'replace')
if self.info.get(name) != text:
self.info[name] = text
changed = True
if changed:
self.emit('song-started', self.info)
if self._librarian is not None:
self._librarian.changed([self.song])
return False
def _event_listener(self, user_data, event):
event = event.contents
if event.type == XINE_EVENT_UI_PLAYBACK_FINISHED:
GLib.idle_add(self._playback_finished,
priority=GLib.PRIORITY_HIGH)
elif event.type == XINE_EVENT_UI_SET_TITLE:
GLib.idle_add(self._update_metadata,
priority=GLib.PRIORITY_HIGH)
elif event.type == XINE_EVENT_UI_MESSAGE:
from ctypes import POINTER, cast, string_at, addressof
msg = cast(event.data, POINTER(xine_ui_message_data_t)).contents
if msg.type != XINE_MSG_NO_ERROR:
if msg.explanation:
message = string_at(addressof(msg) + msg.explanation)
else:
message = "xine error %s" % msg.type
message = message.decode("utf-8", errors="replace")
GLib.idle_add(self._error, PlayerError(message))
return True
def do_get_property(self, property):
if property.name == 'volume':
return self._volume
elif property.name == 'seekable':
if self.song is None:
return False
return True
elif property.name == 'mute':
if not self._destroyed:
return xine_get_param(self._stream, XINE_PARAM_AUDIO_AMP_MUTE)
return False
else:
raise AttributeError
def do_set_property(self, property, v):
if property.name == 'volume':
self._volume = v
v = self.calc_replaygain_volume(v)
v = min(100, int(v * 100))
if not self._destroyed:
xine_set_param(self._stream, XINE_PARAM_AUDIO_AMP_LEVEL, v)
elif property.name == 'mute':
if not self._destroyed:
xine_set_param(self._stream, XINE_PARAM_AUDIO_AMP_MUTE, v)
else:
raise AttributeError
def get_position(self):
"""Return the current playback position in milliseconds,
or 0 if no song is playing."""
pos_stream, pos_time, length_time = xine_get_pos_length(self._stream)
return pos_time
def _stop(self):
xine_stop(self._stream)
def _pause(self):
xine_set_param(self._stream, XINE_PARAM_SPEED, XINE_SPEED_PAUSE)
def _play(self):
if (xine_get_param(self._stream, XINE_PARAM_SPEED) !=
XINE_SPEED_NORMAL):
xine_set_param(self._stream, XINE_PARAM_SPEED, XINE_SPEED_NORMAL)
if xine_get_status(self._stream) != XINE_STATUS_PLAY:
xine_play(self._stream, 0, 0)
@property
def paused(self):
return self._paused
@paused.setter
def paused(self, paused):
if paused == self._paused:
return
self._paused = paused
self.emit((paused and 'paused') or 'unpaused')
if self._paused != paused:
return
if self.song:
if paused:
if not self.song.is_file:
xine_close(self._stream)
xine_open(self._stream, self.song("~uri"))
else:
self._pause()
else:
self._play()
def _error(self, player_error=None):
if self._destroyed:
return False
if self.error:
return False
self.error = True
self.paused = True
if player_error:
self.emit('error', self.song, player_error)
def seek(self, pos):
"""Seek to a position in the song, in milliseconds."""
if xine_get_param(self._stream, XINE_PARAM_SPEED) == XINE_SPEED_PAUSE:
xine_play(self._stream, 0, int(pos))
xine_set_param(self._stream, XINE_PARAM_SPEED, XINE_SPEED_PAUSE)
else:
xine_play(self._stream, 0, int(pos))
self.emit('seek', self.song, pos)
def _end(self, stopped, next_song=None, gapless=False):
# We need to set self.song to None before calling our signal
# handlers. Otherwise, if they try to end the song they're given
# (e.g. by removing it), then we get in an infinite loop.
song = self.song
self.song = self.info = None
self.emit('song-ended', song, stopped)
# reset error state
self.error = False
current = self._source.current if next_song is None else next_song
# Then, set up the next song.
self.song = self.info = current
self.emit('song-started', self.song)
if self.song is not None:
self.volume = self.volume
if gapless and self._supports_gapless:
xine_set_param(self._stream, XINE_PARAM_GAPLESS_SWITCH, 1)
xine_open(self._stream, self.song("~uri").encode("ascii"))
if self._paused:
self._pause()
else:
if song is None:
self.emit("unpaused")
self._play()
if gapless and self._supports_gapless:
xine_set_param(self._stream, XINE_PARAM_GAPLESS_SWITCH, 0)
else:
self.paused = True
xine_stop(self._stream)
# seekable might change if we change to None, so notify just in case
self.notify("seekable")
def setup(self, playlist, song, seek_pos):
super(XinePlaylistPlayer, self).setup(playlist, song, seek_pos)
# xine's declining to seek so soon after startup; try again in 100ms
if seek_pos:
GLib.timeout_add(100, self.seek, seek_pos)
@property
def eq_bands(self):
# These are taken straight from Xine's API
return [30, 60, 125, 250, 500, 1000, 2000, 4000, 8000, 16000]
def update_eq_values(self):
bands = self.eq_bands
need_eq = any(self._eq_values)
for band, val in enumerate(self._eq_values):
param = getattr(cdefs, 'XINE_PARAM_EQ_%dHZ' % bands[band])
# between 1..200; 100 is the default gain; 0 means no EQ filter
# only negative gain seems to work
val = (int(val * 100 / 24.0) + 100) or 1
val *= int(need_eq)
xine_set_param(self._stream, param, val)
def can_play_uri(self, uri):
for plugin in self._handle.list_input_plugins():
if uri.startswith(plugin.lower()):
return True
return False
def init(librarian):
"""May raise PlayerError"""
try:
driver = config.getbytes("settings", "xine_driver")
except:
driver = None
return XinePlaylistPlayer(driver, librarian)
|
gpl-2.0
| -3,936,165,923,608,529,000 | 34.920354 | 79 | 0.581506 | false |
mihaip/readerisdead
|
reader_archive/reader_archive.py
|
1
|
21719
|
import argparse
import itertools
import json
import logging
import os.path
import urllib
import urllib2
import sys
import xml.etree.cElementTree as ET
import base.api
import base.atom
import base.log
import base.tag_helper
import base.url_fetcher
import base.worker
def main():
base.log.init()
base.atom.init()
parser = argparse.ArgumentParser(
description='Comprehensive archive of a Google Reader account')
# Credentials
parser.add_argument('--use_client_login' ,action='store_true',
help='Instead of OAuth, use ClientLogin for '
'authentication. You will be prompted for a '
'username and password')
parser.add_argument('--oauth_refresh_token', default='',
help='A previously obtained refresh token (used to bypass '
'OAuth setup')
parser.add_argument('--account', default='',
help='Google Account to save the archive for. Omit to '
'specify via standard input')
parser.add_argument('--password', default='',
help='Password for the account. Omit to specify via '
'standard input')
# Output options
parser.add_argument('--output_directory', default='./',
help='Directory where to place archive data.')
# Fetching options
parser.add_argument('--stream_items_chunk_size', type=int, default=10000,
help='Number of items refs to request per stream items '
'API call (higher is more efficient)')
parser.add_argument('--max_items_per_stream', type=int, default=0,
help='If non-zero, will cap the number of items that are '
'fetched per feed or tag')
parser.add_argument('--item_bodies_chunk_size', type=int, default=250,
help='Number of items refs per request for fetching their '
'bodies (higher is more efficient)')
parser.add_argument('--comments_chunk_size', type=int, default=250,
help='Number of items per request for fetching comments '
'on shared items (higher is more efficient)')
parser.add_argument('--max_streams', type=int, default=0,
help='Maxmium number of streams to archive (0 for no'
'limit, only mean to be used for development)')
parser.add_argument('--parallelism', type=int, default=10,
help='Number of requests to make in parallel.')
parser.add_argument('--http_retry_count', type=int, default=1,
help='Number of retries to make in the case of HTTP '
'request errors.')
# Miscellaneous.
parser.add_argument('--additional_item_refs_file_path', default='',
help='Path to JSON file listing additional tag item refs '
'to fetch')
args = parser.parse_args()
output_directory = base.paths.normalize(args.output_directory)
base.paths.ensure_exists(output_directory)
def output_sub_directory(name):
directory_path = os.path.join(output_directory, name)
base.paths.ensure_exists(directory_path)
return directory_path
api_responses_directory = output_sub_directory('_raw_data')
streams_directory = output_sub_directory('streams')
data_directory = output_sub_directory('data')
items_directory = output_sub_directory('items')
comments_directory = output_sub_directory('comments')
if args.use_client_login:
authenticated_url_fetcher = base.url_fetcher.ClientLoginUrlFetcher(
args.account, args.password)
else:
authenticated_url_fetcher = base.url_fetcher.OAuthUrlFetcher(
args.oauth_refresh_token)
api = base.api.Api(
authenticated_url_fetcher=authenticated_url_fetcher,
http_retry_count=args.http_retry_count,
cache_directory=api_responses_directory)
user_info = api.fetch_user_info()
logging.info(
'Created API instance for %s (%s)', user_info.user_id, user_info.email)
logging.info('Saving preferences')
_save_preferences(api, data_directory)
logging.info('Gathering streams to fetch')
stream_ids = _get_stream_ids(api, user_info.user_id, data_directory)
if args.max_streams and len(stream_ids) > args.max_streams:
stream_ids = stream_ids[:args.max_streams]
logging.info('%d streams to fetch, gathering item refs:', len(stream_ids))
item_ids, item_refs_total = _fetch_and_save_item_refs(
stream_ids, api, args, streams_directory, user_info.user_id)
logging.info('%s unique items refs (%s total), grouping by chunk.',
'{:,}'.format(len(item_ids)),
'{:,}'.format(item_refs_total))
logging.info('Grouped item refs, getting item bodies:')
item_ids_chunks = _chunk_item_ids(item_ids, args.item_bodies_chunk_size)
item_bodies_to_fetch = len(item_ids)
fetched_item_bodies = [0]
missing_item_bodies = set()
def report_item_bodies_progress(requested_item_ids, found_item_ids):
if found_item_ids is None:
missing_item_bodies.update(set(requested_item_ids).difference(
base.api.not_found_items_ids_to_ignore))
return
fetched_item_bodies[0] += len(found_item_ids)
missing_item_bodies.update(
set(requested_item_ids).difference(set(found_item_ids)).difference(
base.api.not_found_items_ids_to_ignore))
logging.info(' Fetched %s/%s item bodies (%s could not be loaded)',
'{:,}'.format(fetched_item_bodies[0]),
'{:,}'.format(item_bodies_to_fetch),
'{:,}'.format(len(missing_item_bodies)))
base.worker.do_work(
lambda: FetchWriteItemBodiesWorker(api, items_directory),
item_ids_chunks,
args.parallelism,
report_progress=report_item_bodies_progress)
if missing_item_bodies:
logging.warn('Item bodies could not be loaded for: %s',
', '.join([i.compact_form() for i in missing_item_bodies]))
broadcast_stream_ids = [
stream_id for stream_id in stream_ids
if stream_id.startswith('user/') and
stream_id.endswith('/state/com.google/broadcast')
]
logging.info(
'Fetching comments from %d shared item streams.',
len(broadcast_stream_ids))
encoded_sharers = api.fetch_encoded_sharers()
remaining_broadcast_stream_ids = [len(broadcast_stream_ids)]
def report_comments_progress(_, comments_by_item_id):
if comments_by_item_id is None:
return
remaining_broadcast_stream_ids[0] -= 1
comment_count = sum((len(c) for c in comments_by_item_id.values()), 0)
logging.info(' Fetched %s comments, %s shared items streams left.',
'{:,}'.format(comment_count),
'{:,}'.format(remaining_broadcast_stream_ids[0]))
all_comments = {}
comments_for_broadcast_streams = base.worker.do_work(
lambda: FetchCommentsWorker(
api, encoded_sharers, args.comments_chunk_size),
broadcast_stream_ids,
args.parallelism,
report_progress=report_comments_progress)
total_comment_count = 0
for comments_for_broadcast_stream in comments_for_broadcast_streams:
if not comments_for_broadcast_stream:
continue
for item_id, comments in comments_for_broadcast_stream.iteritems():
total_comment_count += len(comments)
all_comments.setdefault(item_id, []).extend(comments)
logging.info('Writing %s comments from %s items.',
'{:,}'.format(total_comment_count),
'{:,}'.format(len(all_comments)))
for item_id, comments in all_comments.items():
item_comments_file_path = os.path.join(base.paths.item_id_to_file_path(
comments_directory, item_id), item_id.compact_form())
base.paths.ensure_exists(os.path.dirname(item_comments_file_path))
with open(item_comments_file_path, 'w') as item_comments_file:
item_comments_file.write(json.dumps([c.to_json() for c in comments]))
with open(os.path.join(output_directory, 'README'), 'w') as readme_file:
readme_file.write('See https://github.com/mihaip/readerisdead/'
'wiki/reader_archive-Format.\n')
def _save_preferences(api, data_directory):
def save(preferences_json, file_name):
file_path = os.path.join(data_directory, file_name)
with open(file_path, 'w') as file:
file.write(json.dumps(preferences_json))
save(api.fetch_preferences(), 'preferences.json')
save(api.fetch_stream_preferences(), 'stream-preferences.json')
save(
[g.to_json() for g in api.fetch_sharing_groups()], 'sharing-groups.json')
save(api.fetch_sharing_acl().to_json(), 'sharing-acl.json')
save(api.fetch_user_info().to_json(), 'user-info.json')
def _get_stream_ids(api, user_id, data_directory):
def save_items(items, file_name):
file_path = os.path.join(data_directory, file_name)
with open(file_path, 'w') as file:
file.write(json.dumps([i.to_json() for i in items]))
stream_ids = set()
tags = api.fetch_tags()
tag_stream_ids = set([t.stream_id for t in tags])
for system_tag in base.tag_helper.TagHelper(user_id).system_tags():
if system_tag.stream_id not in tag_stream_ids:
tags.append(system_tag)
tag_stream_ids.add(system_tag.stream_id)
stream_ids.update([tag.stream_id for tag in tags])
save_items(tags, 'tags.json')
subscriptions = api.fetch_subscriptions()
stream_ids.update([sub.stream_id for sub in subscriptions])
save_items(subscriptions, 'subscriptions.json')
friends = api.fetch_friends()
stream_ids.update([
f.stream_id for f in friends if f.stream_id and f.is_following])
save_items(friends, 'friends.json')
bundles = api.fetch_bundles()
for bundle in bundles:
stream_ids.update([f.stream_id for f in bundle.feeds])
save_items(bundles, 'bundles.json')
recommendations = api.fetch_recommendations()
stream_ids.update([r.stream_id for r in recommendations])
save_items(recommendations, 'recommendations.json')
stream_ids.add(base.api.EXPLORE_STREAM_ID)
stream_ids = list(stream_ids)
# Start the fetch with user streams, since those tend to have more items and
# are thus the long pole.
stream_ids.sort(reverse=True)
return stream_ids
def _load_additional_item_refs(
additional_item_refs_file_path, stream_ids, item_refs_responses, user_id):
logging.info('Adding additional item refs.')
compact_item_ids_by_stream_id = {}
item_refs_responses_by_stream_id = {}
for stream_id, item_refs in itertools.izip(stream_ids, item_refs_responses):
compact_item_ids_by_stream_id[stream_id] = set(
item_ref.item_id.compact_form() for item_ref in item_refs)
item_refs_responses_by_stream_id[stream_id] = item_refs
# The JSON file stores item IDs in hex, but with a leading 0x. Additionally,
# timestamps are in microseconds, but they're stored as strings.
def item_ref_from_json(item_ref_json):
return base.api.ItemRef(
item_id=base.api.item_id_from_compact_form(item_ref_json['id'][2:]),
timestamp_usec=int(item_ref_json['timestampUsec']))
with open(additional_item_refs_file_path) as additional_item_refs_file:
additional_item_refs = json.load(additional_item_refs_file)
for stream_id, item_refs_json in additional_item_refs.iteritems():
if not stream_id.startswith('user/%s/' % user_id) or \
'state/com.google/touch' in stream_id or \
'state/com.google/recommendations-' in stream_id:
# Ignore tags from other users and those added by
# https://github.com/mihaip/google-reader-touch. Also ignore the
# recommendations tags, the items that they refer to aren't actually
# items in the Reader backend.
continue
if stream_id not in item_refs_responses_by_stream_id:
logging.info(' Stream %s (%s items) is new.',
stream_id, '{:,}'.format(len(item_refs_json)))
stream_ids.append(stream_id)
item_refs_responses.append(
[item_ref_from_json(i) for i in item_refs_json])
else:
new_item_refs = []
alread_known_item_ref_count = 0
known_item_ids = compact_item_ids_by_stream_id[stream_id]
for item_ref_json in item_refs_json:
if item_ref_json['id'] == '0x859df8b8d14b566e':
# Skip this item, it seems to cause persistent 500s
continue
if item_ref_json['id'][2:] not in known_item_ids:
new_item_refs.append(item_ref_from_json(item_ref_json))
else:
alread_known_item_ref_count += 1
if new_item_refs:
logging.info(' Got an additional %s item refs for %s '
'(%s were already known)',
'{:,}'.format(len(new_item_refs)),
stream_id,
'{:,}'.format(alread_known_item_ref_count))
item_refs_responses_by_stream_id[stream_id].extend(new_item_refs)
def _fetch_and_save_item_refs(
stream_ids, api, args, streams_directory, user_id):
fetched_stream_ids = [0]
def report_item_refs_progress(stream_id, item_refs):
if item_refs is None:
logging.error(' Could not load item refs from %s', stream_id)
return
fetched_stream_ids[0] += 1
logging.info(' Loaded %s item refs from %s, %d streams left.',
'{:,}'.format(len(item_refs)),
stream_id,
len(stream_ids) - fetched_stream_ids[0])
item_refs_responses = base.worker.do_work(
lambda: FetchItemRefsWorker(
api, args.stream_items_chunk_size, args.max_items_per_stream),
stream_ids,
args.parallelism,
report_progress=report_item_refs_progress)
if args.additional_item_refs_file_path:
_load_additional_item_refs(
base.paths.normalize(args.additional_item_refs_file_path),
stream_ids,
item_refs_responses,
user_id)
logging.info('Saving item refs for %d streams',
len([i for i in item_refs_responses if i is not None]))
item_ids = set()
item_refs_total = 0
for stream_id, item_refs in itertools.izip(stream_ids, item_refs_responses):
if not item_refs:
continue
item_ids.update([item_ref.item_id for item_ref in item_refs])
item_refs_total += len(item_refs)
if stream_id == base.api.EXPLORE_STREAM_ID:
base.api.not_found_items_ids_to_ignore.update(
[i.item_id for i in item_refs])
stream = base.api.Stream(stream_id=stream_id, item_refs=item_refs)
stream_file_name = base.paths.stream_id_to_file_name(stream_id) + '.json'
stream_file_path = os.path.join(streams_directory, stream_file_name)
with open(stream_file_path, 'w') as stream_file:
stream_file.write(json.dumps(stream.to_json()))
return list(item_ids), item_refs_total
def _chunk_item_ids(item_ids, chunk_size):
# We have two different chunking goals:
# - Fetch items in large-ish chunks (ideally 250), to minimize HTTP request
# overhead per item
# - Write items in small-ish chunks (ideally around 10) per file, since having
# a file per item is too annoying to deal with from a file-system
# perspective. We also need the chunking into files to be deterministic, so
# that from an item ID we know what file to look for it in.
# We therefore first chunk the IDs by file path, and then group those chunks
# into ID chunks that we fetch.
# We write the file chunks immediately after fetching to decrease the
# in-memory working set of the script.
item_ids_by_path = {}
for item_id in item_ids:
item_id_file_path = base.paths.item_id_to_file_path('', item_id)
item_ids_by_path.setdefault(item_id_file_path, []).append(item_id)
current_item_ids_chunk = []
item_ids_chunks = [current_item_ids_chunk]
for item_ids_for_file_path in item_ids_by_path.values():
if len(current_item_ids_chunk) + len(item_ids_for_file_path) > chunk_size:
current_item_ids_chunk = []
item_ids_chunks.append(current_item_ids_chunk)
current_item_ids_chunk.extend(item_ids_for_file_path)
return item_ids_chunks
class FetchItemRefsWorker(base.worker.Worker):
_PROGRESS_REPORT_INTERVAL = 50000
def __init__(self, api, chunk_size, max_items_per_stream):
self._api = api
self._chunk_size = chunk_size
self._max_items_per_stream = max_items_per_stream
def work(self, stream_id):
result = []
continuation_token = None
next_progress_report = FetchItemRefsWorker._PROGRESS_REPORT_INTERVAL
while True:
try:
item_refs, continuation_token = self._api.fetch_item_refs(
stream_id,
count=self._chunk_size,
continuation_token=continuation_token)
except urllib2.HTTPError as e:
if e.code == 400 and 'Permission denied' in e.read():
logging.warn(' Permission denied when getting items for the stream '
'%s, it\'s most likely private now.', stream_id)
return None
else:
raise
result.extend(item_refs)
if len(result) >= next_progress_report:
logging.debug(' %s item refs fetched so far from %s',
'{:,}'.format(len(result)), stream_id)
next_progress_report += FetchItemRefsWorker._PROGRESS_REPORT_INTERVAL
if not continuation_token or (self._max_items_per_stream and
len(result) >= self._max_items_per_stream):
break
return result
class FetchWriteItemBodiesWorker(base.worker.Worker):
def __init__(self, api, items_directory):
self._api = api
self._items_directory = items_directory
def work(self, item_ids):
if not item_ids:
return 0
item_bodies_by_id = self._fetch_item_bodies(item_ids)
if not item_bodies_by_id:
return []
item_bodies_by_file_path = self._group_item_bodies(
item_bodies_by_id.values())
for file_path, item_bodies in item_bodies_by_file_path.items():
self._write_item_bodies(file_path, item_bodies)
return item_bodies_by_id.keys()
def _fetch_item_bodies(self, item_ids):
def fetch(hifi=True):
result = self._api.fetch_item_bodies(
item_ids,
format='atom-hifi' if hifi else 'atom',
# Turn off authentication in order to make the request cheaper/
# faster. Item bodies are not ACLed, we already have per-user tags
# via the stream item ref fetches, and will be fetching comments
# for shared items separately.
authenticated=False)
return result
try:
try:
return fetch()
except urllib2.HTTPError as e:
if e.code == 500:
logging.info(' 500 response when fetching %d items, retrying with '
'high-fidelity output turned off', len(item_ids))
return fetch(hifi=False)
else:
logging.error(' HTTP error %d when fetching items: %s',
e.code, ','.join([i.compact_form() for i in item_ids]), e.read())
return None
except ET.ParseError as e:
logging.info(' XML parse error when fetching %d items, retrying '
'with high-fidelity turned off', len(item_ids))
return fetch(hifi=False)
except urllib2.HTTPError as e:
if e.code == 500 and len(item_ids) > 1:
logging.info(' 500 response even with high-fidelity output turned '
'off, splitting %d chunk into two to find problematic items',
len(item_ids))
return self._fetch_item_bodies_split(item_ids)
else:
logging.error(' HTTP error %d when fetching %s items%s',
e.code, ','.join([i.compact_form() for i in item_ids]),
(': %s' % e.read()) if e.code != 500 else '')
return None
except:
logging.error(' Exception when fetching items %s',
','.join([i.compact_form() for i in item_ids]), exc_info=True)
return None
def _fetch_item_bodies_split(self, item_ids):
split_point = int(len(item_ids)/2)
first_chunk = item_ids[0:split_point]
second_chunk = item_ids[split_point:]
result = {}
if first_chunk:
first_chunk_result = self._fetch_item_bodies(first_chunk)
if first_chunk_result:
result.update(first_chunk_result)
if second_chunk:
second_chunk_result = self._fetch_item_bodies(second_chunk)
if second_chunk_result:
result.update(second_chunk_result)
return result
def _group_item_bodies(self, item_bodies):
item_bodies_by_path = {}
for entry in item_bodies:
item_id_file_path = base.paths.item_id_to_file_path(
self._items_directory, entry.item_id)
item_bodies_by_path.setdefault(item_id_file_path, []).append(entry)
return item_bodies_by_path
def _write_item_bodies(self, file_path, item_bodies):
base.paths.ensure_exists(os.path.dirname(file_path))
feed_element = ET.Element('{%s}feed' % base.atom.ATOM_NS)
for entry in item_bodies:
feed_element.append(entry.element)
with open(file_path, 'w') as items_file:
ET.ElementTree(feed_element).write(
items_file,
xml_declaration=True,
encoding='utf-8')
class FetchCommentsWorker(base.worker.Worker):
def __init__(self, api, encoded_sharers, chunk_size):
self._api = api
self._encoded_sharers = encoded_sharers
self._chunk_size = chunk_size
def work(self, broadcast_stream_id):
result = {}
continuation_token = None
while True:
comments_by_item_id, continuation_token = self._api.fetch_comments(
broadcast_stream_id,
encoded_sharers=self._encoded_sharers,
count=self._chunk_size,
continuation_token=continuation_token)
result.update(comments_by_item_id)
if not continuation_token:
break
return result
if __name__ == '__main__':
main()
|
apache-2.0
| 8,641,984,040,435,914,000 | 39.672285 | 81 | 0.648833 | false |
luzheqi1987/nova-annotation
|
nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe.py
|
1
|
8260
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid as uuid_lib
from lxml import etree
from oslo.config import cfg
from oslo.utils import timeutils
from webob import exc
from nova.api.openstack.compute.contrib import cloudpipe as cloudpipe_v2
from nova.api.openstack.compute.plugins.v3 import cloudpipe as cloudpipe_v21
from nova.api.openstack import wsgi
from nova.compute import utils as compute_utils
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_network
from nova.tests.unit import matchers
from nova import utils
CONF = cfg.CONF
CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
project_id = str(uuid_lib.uuid4().hex)
uuid = str(uuid_lib.uuid4())
def fake_vpn_instance():
return {
'id': 7, 'image_ref': CONF.vpn_image_id, 'vm_state': 'active',
'created_at': timeutils.parse_strtime('1981-10-20T00:00:00.000000'),
'uuid': uuid, 'project_id': project_id,
}
def compute_api_get_all_empty(context, search_opts=None):
return []
def compute_api_get_all(context, search_opts=None):
return [fake_vpn_instance()]
def utils_vpn_ping(addr, port, timoeout=0.05, session_id=None):
return True
class CloudpipeTestV21(test.NoDBTestCase):
cloudpipe = cloudpipe_v21
url = '/v2/fake/os-cloudpipe'
def setUp(self):
super(CloudpipeTestV21, self).setUp()
self.controller = self.cloudpipe.CloudpipeController()
self.stubs.Set(self.controller.compute_api, "get_all",
compute_api_get_all_empty)
self.stubs.Set(utils, 'vpn_ping', utils_vpn_ping)
def test_cloudpipe_list_no_network(self):
def fake_get_nw_info_for_instance(instance):
return {}
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
fake_get_nw_info_for_instance)
self.stubs.Set(self.controller.compute_api, "get_all",
compute_api_get_all)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req)
response = {'cloudpipes': [{'project_id': project_id,
'instance_id': uuid,
'created_at': '1981-10-20T00:00:00Z'}]}
self.assertEqual(res_dict, response)
def test_cloudpipe_list(self):
def network_api_get(context, network_id):
self.assertEqual(context.project_id, project_id)
return {'vpn_public_address': '127.0.0.1',
'vpn_public_port': 22}
def fake_get_nw_info_for_instance(instance):
return fake_network.fake_get_instance_nw_info(self.stubs)
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
fake_get_nw_info_for_instance)
self.stubs.Set(self.controller.network_api, "get",
network_api_get)
self.stubs.Set(self.controller.compute_api, "get_all",
compute_api_get_all)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req)
response = {'cloudpipes': [{'project_id': project_id,
'internal_ip': '192.168.1.100',
'public_ip': '127.0.0.1',
'public_port': 22,
'state': 'running',
'instance_id': uuid,
'created_at': '1981-10-20T00:00:00Z'}]}
self.assertThat(res_dict, matchers.DictMatches(response))
def test_cloudpipe_create(self):
def launch_vpn_instance(context):
return ([fake_vpn_instance()], 'fake-reservation')
self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
launch_vpn_instance)
body = {'cloudpipe': {'project_id': project_id}}
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.create(req, body=body)
response = {'instance_id': uuid}
self.assertEqual(res_dict, response)
def test_cloudpipe_create_no_networks(self):
def launch_vpn_instance(context):
raise exception.NoMoreNetworks
self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
launch_vpn_instance)
body = {'cloudpipe': {'project_id': project_id}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(exc.HTTPBadRequest,
self.controller.create, req, body=body)
def test_cloudpipe_create_already_running(self):
def launch_vpn_instance(*args, **kwargs):
self.fail("Method should not have been called")
self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
launch_vpn_instance)
self.stubs.Set(self.controller.compute_api, "get_all",
compute_api_get_all)
body = {'cloudpipe': {'project_id': project_id}}
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.create(req, body=body)
response = {'instance_id': uuid}
self.assertEqual(res_dict, response)
def test_cloudpipe_create_with_bad_project_id_failed(self):
body = {'cloudpipe': {'project_id': 'bad.project.id'}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
class CloudpipeTestV2(CloudpipeTestV21):
cloudpipe = cloudpipe_v2
def test_cloudpipe_create_with_bad_project_id_failed(self):
pass
class CloudpipesXMLSerializerTestV2(test.NoDBTestCase):
def test_default_serializer(self):
serializer = cloudpipe_v2.CloudpipeTemplate()
exemplar = dict(cloudpipe=dict(instance_id='1234-1234-1234-1234'))
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('cloudpipe', tree.tag)
for child in tree:
self.assertIn(child.tag, exemplar['cloudpipe'])
self.assertEqual(child.text, exemplar['cloudpipe'][child.tag])
def test_index_serializer(self):
serializer = cloudpipe_v2.CloudpipesTemplate()
exemplar = dict(cloudpipes=[
dict(
project_id='1234',
public_ip='1.2.3.4',
public_port='321',
instance_id='1234-1234-1234-1234',
created_at=timeutils.isotime(),
state='running'),
dict(
project_id='4321',
public_ip='4.3.2.1',
public_port='123',
state='pending')])
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('cloudpipes', tree.tag)
self.assertEqual(len(exemplar['cloudpipes']), len(tree))
for idx, cl_pipe in enumerate(tree):
kp_data = exemplar['cloudpipes'][idx]
for child in cl_pipe:
self.assertIn(child.tag, kp_data)
self.assertEqual(child.text, kp_data[child.tag])
def test_deserializer(self):
deserializer = wsgi.XMLDeserializer()
exemplar = dict(cloudpipe=dict(project_id='4321'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<cloudpipe><project_id>4321</project_id></cloudpipe>')
result = deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
|
apache-2.0
| -8,797,628,048,734,600,000 | 38.333333 | 78 | 0.602663 | false |
krieghan/kobold_python
|
kobold/tests/test_swap.py
|
1
|
3135
|
import asyncio
import unittest
from kobold import (
assertions,
compare,
doubles,
swap)
class Host(object):
def subject(self, arg, kwarg=None):
return "original subject"
async def subject_cr(self, arg, kwarg=None):
return "original subject"
class TestInstallProxy(unittest.TestCase):
def setUp(self):
self.safe_swap = swap.SafeSwap()
def tearDown(self):
self.safe_swap.rollback()
def test_proxy(self):
self.safe_swap.install_proxy(Host, 'subject')
host = Host()
returned = host.subject('some_arg', kwarg='some_kwarg')
self.assertEqual('original subject', returned)
self.assertEqual([((host, 'some_arg'), dict(kwarg='some_kwarg'))],
host.subject.calls)
def test_coroutine_proxy(self):
host = Host()
proxy = self.safe_swap.install_proxy(
Host,
'subject_cr')
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
host.subject_cr('1'))
self.assertEqual(
'original subject',
result)
assertions.assert_match(
[((compare.DontCare(), '1'), {})],
proxy.calls)
class TestSwap(unittest.TestCase):
def setUp(self):
self.safe_swap = swap.SafeSwap()
def tearDown(self):
self.safe_swap.rollback()
def test_swap_and_rollback(self):
stub_function = doubles.StubFunction(returns=1)
self.safe_swap.swap(
Host,
'subject',
stub_function)
host = Host()
self.assertEqual(1, host.subject(1, kwarg=2))
self.safe_swap.rollback()
self.assertEqual('original subject', host.subject(arg=1, kwarg=1))
def test_default_original(self):
routable_stub = doubles.RoutableStubFunction()
routable_stub.add_route(
{'kwarg': 1},
stub_type='value',
stub_value='new subject')
self.safe_swap.swap(
Host,
'subject',
routable_stub,
default_original=True)
host = Host()
self.assertEqual(
'new subject',
host.subject('some_arg', kwarg=1))
self.assertEqual(
'original subject',
host.subject('some_arg', kwarg=2))
def test_default_original_coroutine(self):
loop = asyncio.get_event_loop()
routable_stub = doubles.RoutableStubCoroutine()
routable_stub.add_route(
{'kwarg': 1},
stub_type='value',
stub_value='new subject')
self.safe_swap.swap(
Host,
'subject_cr',
routable_stub,
default_original=True)
host = Host()
self.assertEqual(
'new subject',
loop.run_until_complete(
host.subject_cr('some_arg', kwarg=1)))
self.assertEqual(
'original subject',
loop.run_until_complete(
host.subject_cr('some_arg', kwarg=2)))
|
mit
| 5,797,207,600,288,745,000 | 26.5 | 74 | 0.540032 | false |
carlini/cleverhans
|
examples/multigpu_advtrain/utils_cifar.py
|
1
|
5805
|
"""
https://github.com/renmengye/revnet-public/blob/master/resnet/data/cifar_input.py
MIT License
Copyright (c) 2017 Mengye Ren
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import cPickle as pkl
import numpy as np
from six.moves import range
import tensorflow as tf
# Global constants describing the CIFAR-10 data set.
IMAGE_HEIGHT = 32
IMAGE_WIDTH = 32
NUM_CLASSES = 10
NUM_CHANNEL = 3
NUM_TRAIN_IMG = 50000
NUM_TEST_IMG = 10000
def unpickle(file):
fo = open(file, 'rb')
dict = pkl.load(fo)
fo.close()
return dict
def read_CIFAR10(data_folder):
""" Reads and parses examples from CIFAR10 data files """
train_img = []
train_label = []
test_img = []
test_label = []
train_file_list = [
"data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4",
"data_batch_5"
]
test_file_list = ["test_batch"]
if "CIFAR10_PATH" in os.environ:
cifar10_path = os.environ["CIFAR10_PATH"]
else:
cifar10_path = 'cifar-10-batches-py'
for i in range(len(train_file_list)):
tmp_dict = unpickle(os.path.join(data_folder, cifar10_path,
train_file_list[i]))
train_img.append(tmp_dict["data"])
train_label.append(tmp_dict["labels"])
tmp_dict = unpickle(
os.path.join(data_folder, cifar10_path, test_file_list[0]))
test_img.append(tmp_dict["data"])
test_label.append(tmp_dict["labels"])
train_img = np.concatenate(train_img)
train_label = np.concatenate(train_label)
test_img = np.concatenate(test_img)
test_label = np.concatenate(test_label)
train_img = np.reshape(
train_img, [NUM_TRAIN_IMG, NUM_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH])
test_img = np.reshape(
test_img, [NUM_TEST_IMG, NUM_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH])
# change format from [B, C, H, W] to [B, H, W, C] for feeding to Tensorflow
train_img = np.transpose(train_img, [0, 2, 3, 1])
test_img = np.transpose(test_img, [0, 2, 3, 1])
mean_img = np.mean(np.concatenate([train_img, test_img]), axis=0)
CIFAR10_data = {}
CIFAR10_data["train_img"] = train_img - mean_img
CIFAR10_data["test_img"] = test_img - mean_img
CIFAR10_data["train_label"] = train_label
CIFAR10_data["test_label"] = test_label
train_img = train_img - mean_img
test_img = test_img - mean_img
train_label = train_label
test_label = test_label
train_label = np.eye(10)[train_label]
test_label = np.eye(10)[test_label]
return train_img, train_label, test_img, test_label
def read_CIFAR100(data_folder):
""" Reads and parses examples from CIFAR100 python data files """
train_img = []
train_label = []
test_img = []
test_label = []
train_file_list = ["cifar-100-python/train"]
test_file_list = ["cifar-100-python/test"]
tmp_dict = unpickle(os.path.join(data_folder, train_file_list[0]))
train_img.append(tmp_dict["data"])
train_label.append(tmp_dict["fine_labels"])
tmp_dict = unpickle(os.path.join(data_folder, test_file_list[0]))
test_img.append(tmp_dict["data"])
test_label.append(tmp_dict["fine_labels"])
train_img = np.concatenate(train_img)
train_label = np.concatenate(train_label)
test_img = np.concatenate(test_img)
test_label = np.concatenate(test_label)
train_img = np.reshape(
train_img, [NUM_TRAIN_IMG, NUM_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH])
test_img = np.reshape(
test_img, [NUM_TEST_IMG, NUM_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH])
# change format from [B, C, H, W] to [B, H, W, C] for feeding to Tensorflow
train_img = np.transpose(train_img, [0, 2, 3, 1])
test_img = np.transpose(test_img, [0, 2, 3, 1])
mean_img = np.mean(np.concatenate([train_img, test_img]), axis=0)
CIFAR100_data = {}
CIFAR100_data["train_img"] = train_img - mean_img
CIFAR100_data["test_img"] = test_img - mean_img
CIFAR100_data["train_label"] = train_label
CIFAR100_data["test_label"] = test_label
return CIFAR100_data
def cifar_tf_preprocess(inp, random_crop=True, random_flip=True, whiten=True,
br_sat_con=False):
image_size = 32
image = inp
if random_crop:
image = tf.image.resize_image_with_crop_or_pad(inp, image_size + 4,
image_size + 4)
image = tf.random_crop(image, [image_size, image_size, 3])
if random_flip:
image = tf.image.random_flip_left_right(image)
# Brightness/saturation/constrast provides small gains .2%~.5% on cifar.
if br_sat_con:
image = tf.image.random_brightness(image, max_delta=63. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
if whiten:
image = tf.image.per_image_standardization(image)
return image
|
mit
| 5,248,748,673,369,022,000 | 31.982955 | 81 | 0.681137 | false |
tensorflow/tfx
|
tfx/benchmarks/benchmark_utils.py
|
1
|
1823
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions shared across the different benchmarks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
from google.protobuf import text_format
from tensorflow_metadata.proto.v0 import schema_pb2
def read_schema(proto_path):
"""Reads a TF Metadata schema from the given text proto file."""
result = schema_pb2.Schema()
with open(proto_path) as fp:
text_format.Parse(fp.read(), result)
return result
def get_dataset(name, base_dir=None):
"""Imports the given dataset and returns an instance of it."""
lib = importlib.import_module("..datasets.%s.dataset" % name, __name__)
return lib.get_dataset(base_dir)
def batched_iterator(records, batch_size):
"""Groups elements in the given list into batches of the given size.
Args:
records: List of elements to batch.
batch_size: Size of each batch.
Yields:
Lists with batch_size elements from records. Every list yielded except the
last will contain exactly batch_size elements.
"""
batch = []
for i, x in enumerate(records):
batch.append(x)
if (i + 1) % batch_size == 0:
yield batch
batch = []
if batch:
yield batch
|
apache-2.0
| -6,241,290,098,297,118,000 | 30.982456 | 78 | 0.721338 | false |
felexx90/service.subtitles.subdivx
|
service.py
|
1
|
21861
|
# -*- coding: utf-8 -*-
# Subdivx.com subtitles, based on a mod of Undertext subtitles
# Adaptation: enric_godes@hotmail.com | Please use email address for your
# comments
# Port to XBMC 13 Gotham subtitles infrastructure: cramm, Mar 2014
from __future__ import print_function
from json import loads
import os
from os.path import join as pjoin
import os.path
from pprint import pformat
import re
import shutil
import sys
import tempfile
import time
from unicodedata import normalize
from urllib import FancyURLopener, unquote, quote_plus, urlencode, quote
from urlparse import parse_qs
try:
import xbmc
except ImportError:
if len(sys.argv) > 1 and sys.argv[1] == 'test':
import unittest # NOQA
try:
import mock # NOQA
except ImportError:
print("You need to install the mock Python library to run "
"unit tests.\n")
sys.exit(1)
else:
from xbmc import (LOGDEBUG, LOGINFO, LOGNOTICE, LOGWARNING, LOGERROR,
LOGSEVERE, LOGFATAL, LOGNONE)
import xbmcaddon
import xbmcgui
import xbmcplugin
import xbmcvfs
__addon__ = xbmcaddon.Addon()
__author__ = __addon__.getAddonInfo('author')
__scriptid__ = __addon__.getAddonInfo('id')
__scriptname__ = __addon__.getAddonInfo('name')
__version__ = __addon__.getAddonInfo('version')
__language__ = __addon__.getLocalizedString
__cwd__ = xbmc.translatePath(__addon__.getAddonInfo('path').decode("utf-8"))
__profile__ = xbmc.translatePath(__addon__.getAddonInfo('profile').decode("utf-8"))
MAIN_SUBDIVX_URL = "http://www.subdivx.com/"
SEARCH_PAGE_URL = MAIN_SUBDIVX_URL + \
"index.php?accion=5&masdesc=&oxdown=1&pg=%(page)s&buscar=%(query)s"
INTERNAL_LINK_URL_BASE = "plugin://%s/?"
SUB_EXTS = ['srt', 'sub', 'txt']
HTTP_USER_AGENT = "User-Agent=Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 ( .NET CLR 3.5.30729)"
PAGE_ENCODING = 'latin1'
# ============================
# Regular expression patterns
# ============================
# Subtitle pattern example:
# <div id="menu_titulo_buscador"><a class="titulo_menu_izq" href="http://www.subdivx.com/X6XMjEzMzIyX-iron-man-2-2010.html">Iron Man 2 (2010)</a></div>
# <img src="img/calif5.gif" class="detalle_calif">
# </div><div id="buscador_detalle">
# <div id="buscador_detalle_sub">Para la versión Iron.Man.2.2010.480p.BRRip.XviD.AC3-EVO, sacados de acá. ¡Disfruten!</div><div id="buscador_detalle_sub_datos"><b>Downloads:</b> 4673 <b>Cds:</b> 1 <b>Comentarios:</b> <a rel="nofollow" href="popcoment.php?idsub=MjEzMzIy" onclick="return hs.htmlExpand(this, { objectType: 'iframe' } )">14</a> <b>Formato:</b> SubRip <b>Subido por:</b> <a class="link1" href="http://www.subdivx.com/X9X303157">TrueSword</a> <img src="http://www.subdivx.com/pais/2.gif" width="16" height="12"> <b>el</b> 06/09/2010 </a></div></div>
# <div id="menu_detalle_buscador">
SUBTITLE_RE = re.compile(r'''<a\s+class="titulo_menu_izq2?"\s+
href="http://www.subdivx.com/(?P<subdivx_id>.+?)\.html">
.+?<img\s+src="img/calif(?P<calif>\d)\.gif"\s+class="detalle_calif"\s+name="detalle_calif">
.+?<div\s+id="buscador_detalle_sub">(?P<comment>.*?)</div>
.+?<b>Downloads:</b>(?P<downloads>.+?)
<b>Cds:</b>
.+?<b>Subido\ por:</b>\s*<a.+?>(?P<uploader>.+?)</a>.+?</div></div>''',
re.IGNORECASE | re.DOTALL | re.VERBOSE | re.UNICODE |
re.MULTILINE)
# Named groups:
# 'subdivx_id': ID to fetch the subs files
# 'comment': Translation author comment, may contain filename
# 'downloads': Downloads, used for ratings
DETAIL_PAGE_LINK_RE = re.compile(r'<a rel="nofollow" class="detalle_link" href="http://www.subdivx.com/(?P<id>.*?)"><b>Bajar</b></a>',
re.IGNORECASE | re.DOTALL | re.MULTILINE | re.UNICODE)
DOWNLOAD_LINK_RE = re.compile(r'bajar.php\?id=(?P<id>.*?)&u=(?P<u>[^"\']+?)', re.IGNORECASE |
re.DOTALL | re.MULTILINE | re.UNICODE)
# ==========
# Functions
# ==========
def is_subs_file(fn):
"""Detect if the file has an extension we recognise as subtitle."""
ext = fn.split('.')[-1]
return ext.upper() in [e.upper() for e in SUB_EXTS]
def log(msg, level=LOGDEBUG):
fname = sys._getframe(1).f_code.co_name
s = u"SUBDIVX - %s: %s" % (fname, msg)
xbmc.log(s.encode('utf-8'), level=level)
def get_url(url):
class MyOpener(FancyURLopener):
# version = HTTP_USER_AGENT
version = ''
my_urlopener = MyOpener()
log(u"Fetching %s" % url)
try:
response = my_urlopener.open(url)
content = response.read()
except Exception:
log(u"Failed to fetch %s" % url, level=LOGWARNING)
content = None
return content
def get_all_subs(searchstring, languageshort, file_orig_path):
if languageshort != "es":
return []
subs_list = []
page = 1
while True:
log(u"Trying page %d" % page)
url = SEARCH_PAGE_URL % {'page': page,
'query': quote_plus(searchstring)}
content = get_url(url)
if content is None or not SUBTITLE_RE.search(content):
break
for match in SUBTITLE_RE.finditer(content):
groups = match.groupdict()
subdivx_id = groups['subdivx_id']
dls = re.sub(r'[,.]', '', groups['downloads'])
downloads = int(dls)
descr = groups['comment']
# Remove new lines
descr = re.sub('\n', ' ', descr)
# Remove Google Ads
descr = re.sub(r'<script.+?script>', '', descr,
re.IGNORECASE | re.DOTALL | re.MULTILINE |
re.UNICODE)
# Remove HTML tags
descr = re.sub(r'<[^<]+?>', '', descr)
descr = descr.rstrip(' \t')
# If our actual video file's name appears in the description
# then set sync to True because it has better chances of its
# synchronization to match
_, fn = os.path.split(file_orig_path)
name, _ = os.path.splitext(fn)
sync = re.search(re.escape(name), descr, re.I) is not None
try:
log(u'Subtitles found: (subdivx_id = %s) "%s"' % (subdivx_id,
descr))
except Exception:
pass
item = {
'descr': descr.decode(PAGE_ENCODING),
'sync': sync,
'subdivx_id': subdivx_id.decode(PAGE_ENCODING),
'uploader': groups['uploader'],
'downloads': downloads,
'score': int(groups['calif']),
}
subs_list.append(item)
page += 1
# Put subs with sync=True at the top
subs_list = sorted(subs_list, key=lambda s: s['sync'], reverse=True)
return subs_list
def compute_ratings(subs_list):
"""
Calculate the rating figures (from zero to five) in a relative fashion
based on number of downloads.
This is later converted by XBMC/Kodi in a zero to five stars GUI.
Ideally, we should be able to use a smarter number instead of just the
download count of every subtitle but it seems in Subdivx the 'score' value
has no reliable value and there isn't a user ranking system in place
we could use to deduce the quality of a contribution.
"""
max_dl_count = 0
for sub in subs_list:
dl_cnt = sub.get('downloads', 0)
if dl_cnt > max_dl_count:
max_dl_count = dl_cnt
for sub in subs_list:
if max_dl_count:
sub['rating'] = int((sub['downloads'] / float(max_dl_count)) * 5)
else:
sub['rating'] = 0
log(u"subs_list = %s" % pformat(subs_list))
def append_subtitle(item, filename):
if __addon__.getSetting('show_nick_in_place_of_lang') == 'true':
item_label = item['uploader']
else:
item_label = 'Spanish'
listitem = xbmcgui.ListItem(
label=item_label,
label2=item['descr'],
iconImage=str(item['rating']),
thumbnailImage=''
)
listitem.setProperty("sync", 'true' if item["sync"] else 'false')
listitem.setProperty("hearing_imp",
'true' if item.get("hearing_imp", False) else 'false')
# Below arguments are optional, they can be used to pass any info needed in
# download function. Anything after "action=download&" will be sent to
# addon once user clicks listed subtitle to download
url = INTERNAL_LINK_URL_BASE % __scriptid__
xbmc_url = build_xbmc_item_url(url, item, filename)
# Add it to list, this can be done as many times as needed for all
# subtitles found
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),
url=xbmc_url,
listitem=listitem,
isFolder=False)
def build_xbmc_item_url(url, item, filename):
"""Return an internal Kodi pseudo-url for the provided sub search result"""
try:
xbmc_url = url + urlencode((('id', item['subdivx_id']),
('filename', filename)))
except UnicodeEncodeError:
# Well, go back to trying it with its original latin1 encoding
try:
subdivx_id = item['subdivx_id'].encode(PAGE_ENCODING)
xbmc_url = url + urlencode((('id', subdivx_id),
('filename', filename)))
except Exception:
log('Problematic subdivx_id: %s' % subdivx_id)
raise
return xbmc_url
def Search(item):
"""Called when subtitle download is requested from XBMC."""
log(u'item = %s' % pformat(item))
# Do what's needed to get the list of subtitles from service site
# use item["some_property"] that was set earlier.
# Once done, set xbmcgui.ListItem() below and pass it to
# xbmcplugin.addDirectoryItem()
file_original_path = item['file_original_path']
title = item['title']
tvshow = item['tvshow']
season = item['season']
episode = item['episode']
if item['manual_search']:
searchstring = unquote(item['manual_search_string'])
elif tvshow:
searchstring = "%s S%#02dE%#02d" % (tvshow, int(season), int(episode))
else:
searchstring = title
log(u"Search string = %s" % searchstring)
subs_list = get_all_subs(searchstring, "es", file_original_path)
compute_ratings(subs_list)
for sub in subs_list:
append_subtitle(sub, file_original_path)
def _wait_for_extract(workdir, base_filecount, base_mtime, limit):
waittime = 0
filecount = base_filecount
newest_mtime = base_mtime
while (filecount == base_filecount and waittime < limit and
newest_mtime == base_mtime):
# wait 1 second to let the builtin function 'XBMC.Extract' unpack
time.sleep(1)
files = os.listdir(workdir)
filecount = len(files)
# Determine if there is a newer file created (marks that the extraction
# has completed)
for fname in files:
if not is_subs_file(fname):
continue
fname = fname
mtime = os.stat(pjoin(workdir, fname)).st_mtime
if mtime > newest_mtime:
newest_mtime = mtime
waittime += 1
return waittime != limit
def _handle_compressed_subs(workdir, compressed_file):
MAX_UNZIP_WAIT = 15
files = os.listdir(workdir)
filecount = len(files)
max_mtime = 0
# Determine the newest file
for fname in files:
if not is_subs_file(fname):
continue
mtime = os.stat(pjoin(workdir, fname)).st_mtime
if mtime > max_mtime:
max_mtime = mtime
base_mtime = max_mtime
# Wait 2 seconds so that the unpacked files are at least 1 second newer
time.sleep(2)
xbmc.executebuiltin("XBMC.Extract(%s, %s)" % (
compressed_file.encode("utf-8"),
workdir.encode("utf-8")))
retval = False
if _wait_for_extract(workdir, filecount, base_mtime, MAX_UNZIP_WAIT):
files = os.listdir(workdir)
for fname in files:
# There could be more subtitle files, so make
# sure we get the newly created subtitle file
if not is_subs_file(fname):
continue
fname = fname
fpath = pjoin(workdir, fname)
if os.stat(fpath).st_mtime > base_mtime:
# unpacked file is a newly created subtitle file
retval = True
break
if retval:
log(u"Unpacked subtitles file '%s'" % normalize_string(fpath))
else:
log(u"Failed to unpack subtitles", level=LOGSEVERE)
return retval, fpath
def rmgeneric(path, __func__):
try:
__func__(path)
log(u"Removed %s" % normalize_string(path))
except OSError, (errno, strerror):
log(u"Error removing %(path)s, %(error)s" % {'path' : normalize_string(path), 'error': strerror }, level=LOGFATAL)
def removeAll(dir):
if not os.path.isdir(dir):
return
files = os.listdir(dir)
for file in files:
if os.path.isdir(pjoin(dir, file)):
removeAll(file)
else:
f=os.remove
rmgeneric(pjoin(dir, file), f)
f=os.rmdir
rmgeneric(dir, f)
def ensure_workdir(workdir):
# Cleanup temp dir, we recommend you download/unzip your subs in temp
# folder and pass that to XBMC to copy and activate
if xbmcvfs.exists(workdir):
removeAll(workdir)
xbmcvfs.mkdirs(workdir)
return xbmcvfs.exists(workdir)
def _save_subtitles(workdir, content):
header = content[:4]
if header == 'Rar!':
type = '.rar'
is_compressed = True
elif header == 'PK\x03\x04':
type = '.zip'
is_compressed = True
else:
# Never found/downloaded an unpacked subtitles file, but just to be
# sure ...
# Assume unpacked sub file is a '.srt'
type = '.srt'
is_compressed = False
tmp_fname = pjoin(workdir, "subdivx" + type)
log(u"Saving subtitles to '%s'" % tmp_fname)
try:
with open(tmp_fname, "wb") as fh:
fh.write(content)
except Exception:
log(u"Failed to save subtitles to '%s'" % tmp_fname, level=LOGSEVERE)
return None
else:
if is_compressed:
rval, fname = _handle_compressed_subs(workdir, tmp_fname)
if rval:
return fname
else:
return tmp_fname
return None
def Download(subdivx_id, workdir):
"""Called when subtitle download is requested from XBMC."""
# Get the page with the subtitle link,
# i.e. http://www.subdivx.com/X6XMjE2NDM1X-iron-man-2-2010
subtitle_detail_url = MAIN_SUBDIVX_URL + quote(subdivx_id)
# Fetch and scrape [new] intermediate page
html_content = get_url(subtitle_detail_url)
if html_content is None:
log(u"No content found in selected subtitle intermediate detail/final download page",
level=LOGFATAL)
return []
match = DETAIL_PAGE_LINK_RE.search(html_content)
if match is None:
log(u"Intermediate detail page for selected subtitle or expected content not found. Handling it as final download page",
level=LOGWARNING)
else:
id_ = match.group('id')
# Fetch and scrape final page
html_content = get_url(MAIN_SUBDIVX_URL + id_)
if html_content is None:
log(u"No content found in final download page", level=LOGFATAL)
return []
match = DOWNLOAD_LINK_RE.search(html_content)
if match is None:
log(u"Expected content not found in final download page",
level=LOGFATAL)
return []
id_, u = match.group('id', 'u')
actual_subtitle_file_url = MAIN_SUBDIVX_URL + "bajar.php?id=" + id_ + "&u=" + u
content = get_url(actual_subtitle_file_url)
if content is None:
log(u"Got no content when downloading actual subtitle file",
level=LOGFATAL)
return []
saved_fname = _save_subtitles(workdir, content)
if saved_fname is None:
return []
return [saved_fname]
def _double_dot_fix_hack(video_filename):
log(u"video_filename = %s" % video_filename)
work_path = video_filename
if _subtitles_setting('storagemode'):
custom_subs_path = _subtitles_setting('custompath')
if custom_subs_path:
_, fname = os.path.split(video_filename)
work_path = pjoin(custom_subs_path, fname)
log(u"work_path = %s" % work_path)
parts = work_path.rsplit('.', 1)
if len(parts) > 1:
rest = parts[0]
bad = rest + '..' + 'srt'
old = rest + '.es.' + 'srt'
if xbmcvfs.exists(bad):
log(u"%s exists" % bad)
if xbmcvfs.exists(old):
log(u"%s exists, renaming" % old)
xbmcvfs.delete(old)
log(u"renaming %s to %s" % (bad, old))
xbmcvfs.rename(bad, old)
def _subtitles_setting(name):
"""
Uses XBMC/Kodi JSON-RPC API to retrieve subtitles location settings values.
"""
command = '''{
"jsonrpc": "2.0",
"id": 1,
"method": "Settings.GetSettingValue",
"params": {
"setting": "subtitles.%s"
}
}'''
result = xbmc.executeJSONRPC(command % name)
py = loads(result)
if 'result' in py and 'value' in py['result']:
return py['result']['value']
else:
raise ValueError
def normalize_string(str):
return normalize('NFKD', unicode(unicode(str, 'utf-8'))).encode('ascii',
'ignore')
def get_params(argv):
params = {}
qs = argv[2].lstrip('?')
if qs:
if qs.endswith('/'):
qs = qs[:-1]
parsed = parse_qs(qs)
for k, v in parsed.iteritems():
params[k] = v[0]
return params
def debug_dump_path(victim, name):
t = type(victim)
xbmc.log("%s (%s): %s" % (name, t, victim), level=LOGDEBUG)
def main():
"""Main entry point of the script when it is invoked by XBMC."""
# Get parameters from XBMC and launch actions
params = get_params(sys.argv)
action = params.get('action', 'Unknown')
xbmc.log(u"SUBDIVX - Version: %s -- Action: %s" % (__version__, action), level=LOGINFO)
if action in ('search', 'manualsearch'):
item = {
'temp': False,
'rar': False,
'year': xbmc.getInfoLabel("VideoPlayer.Year"),
'season': str(xbmc.getInfoLabel("VideoPlayer.Season")),
'episode': str(xbmc.getInfoLabel("VideoPlayer.Episode")),
'tvshow': normalize_string(xbmc.getInfoLabel("VideoPlayer.TVshowtitle")),
# Try to get original title
'title': normalize_string(xbmc.getInfoLabel("VideoPlayer.OriginalTitle")),
# Full path of a playing file
'file_original_path': unquote(xbmc.Player().getPlayingFile().decode('utf-8')),
'3let_language': [],
'2let_language': [],
'manual_search': 'searchstring' in params,
}
if 'searchstring' in params:
item['manual_search_string'] = params['searchstring']
for lang in unquote(params['languages']).decode('utf-8').split(","):
item['3let_language'].append(xbmc.convertLanguage(lang, xbmc.ISO_639_2))
item['2let_language'].append(xbmc.convertLanguage(lang, xbmc.ISO_639_1))
if not item['title']:
# No original title, get just Title
item['title'] = normalize_string(xbmc.getInfoLabel("VideoPlayer.Title"))
if "s" in item['episode'].lower():
# Check if season is "Special"
item['season'] = "0"
item['episode'] = item['episode'][-1:]
if "http" in item['file_original_path']:
item['temp'] = True
elif "rar://" in item['file_original_path']:
item['rar'] = True
item['file_original_path'] = os.path.dirname(item['file_original_path'][6:])
elif "stack://" in item['file_original_path']:
stackPath = item['file_original_path'].split(" , ")
item['file_original_path'] = stackPath[0][8:]
Search(item)
elif action == 'download':
debug_dump_path(xbmc.translatePath(__addon__.getAddonInfo('profile')),
"xbmc.translatePath(__addon__.getAddonInfo('profile'))")
debug_dump_path(__profile__, '__profile__')
xbmcvfs.mkdirs(__profile__)
workdir = pjoin(__profile__, 'temp')
# Make sure it ends with a path separator (Kodi 14)
workdir = workdir + os.path.sep
workdir = xbmc.translatePath(workdir)
ensure_workdir(workdir)
# We pickup our arguments sent from the Search() function
subs = Download(params["id"], workdir)
# We can return more than one subtitle for multi CD versions, for now
# we are still working out how to handle that in XBMC core
for sub in subs:
listitem = xbmcgui.ListItem(label=sub)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=sub,
listitem=listitem, isFolder=False)
# Send end of directory to XBMC
xbmcplugin.endOfDirectory(int(sys.argv[1]))
if (action == 'download' and
__addon__.getSetting('show_nick_in_place_of_lang') == 'true'):
time.sleep(3)
_double_dot_fix_hack(params['filename'])
if __name__ == '__main__':
main()
|
gpl-2.0
| 526,871,402,262,129,540 | 35.551839 | 562 | 0.57965 | false |
anandpdoshi/erpnext
|
erpnext/manufacturing/doctype/production_planning_tool/production_planning_tool.py
|
1
|
17777
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, flt, cint, nowdate, add_days, comma_and
from frappe import msgprint, _
from frappe.model.document import Document
from erpnext.manufacturing.doctype.bom.bom import validate_bom_no
from erpnext.manufacturing.doctype.production_order.production_order import get_item_details
class ProductionPlanningTool(Document):
def __init__(self, arg1, arg2=None):
super(ProductionPlanningTool, self).__init__(arg1, arg2)
self.item_dict = {}
def clear_table(self, table_name):
self.set(table_name, [])
def validate_company(self):
if not self.company:
frappe.throw(_("Please enter Company"))
def get_open_sales_orders(self):
""" Pull sales orders which are pending to deliver based on criteria selected"""
so_filter = item_filter = ""
if self.from_date:
so_filter += " and so.transaction_date >= %(from_date)s"
if self.to_date:
so_filter += " and so.transaction_date <= %(to_date)s"
if self.customer:
so_filter += " and so.customer = %(customer)s"
if self.project:
so_filter += " and so.project = %(project)s"
if self.fg_item:
item_filter += " and so_item.item_code = %(item)s"
open_so = frappe.db.sql("""
select distinct so.name, so.transaction_date, so.customer, so.base_grand_total
from `tabSales Order` so, `tabSales Order Item` so_item
where so_item.parent = so.name
and so.docstatus = 1 and so.status != "Stopped"
and so.company = %(company)s
and so_item.qty > so_item.delivered_qty {0} {1}
and (exists (select name from `tabBOM` bom where bom.item=so_item.item_code
and bom.is_active = 1)
or exists (select name from `tabPacked Item` pi
where pi.parent = so.name and pi.parent_item = so_item.item_code
and exists (select name from `tabBOM` bom where bom.item=pi.item_code
and bom.is_active = 1)))
""".format(so_filter, item_filter), {
"from_date": self.from_date,
"to_date": self.to_date,
"customer": self.customer,
"project": self.project,
"item": self.fg_item,
"company": self.company
}, as_dict=1)
self.add_so_in_table(open_so)
def add_so_in_table(self, open_so):
""" Add sales orders in the table"""
self.clear_table("sales_orders")
so_list = []
for r in open_so:
if cstr(r['name']) not in so_list:
pp_so = self.append('sales_orders', {})
pp_so.sales_order = r['name']
pp_so.sales_order_date = cstr(r['transaction_date'])
pp_so.customer = cstr(r['customer'])
pp_so.grand_total = flt(r['base_grand_total'])
def get_pending_material_requests(self):
""" Pull Material Requests that are pending based on criteria selected"""
mr_filter = item_filter = ""
if self.from_date:
mr_filter += " and mr.transaction_date >= %(from_date)s"
if self.to_date:
mr_filter += " and mr.transaction_date <= %(to_date)s"
if self.warehouse:
mr_filter += " and mr_item.warehouse = %(warehouse)s"
if self.fg_item:
item_filter += " and mr_item.item_code = %(item)s"
pending_mr = frappe.db.sql("""
select distinct mr.name, mr.transaction_date
from `tabMaterial Request` mr, `tabMaterial Request Item` mr_item
where mr_item.parent = mr.name
and mr.material_request_type = "Manufacture"
and mr.docstatus = 1
and mr_item.qty > ifnull(mr_item.ordered_qty,0) {0} {1}
and (exists (select name from `tabBOM` bom where bom.item=mr_item.item_code
and bom.is_active = 1))
""".format(mr_filter, item_filter), {
"from_date": self.from_date,
"to_date": self.to_date,
"warehouse": self.warehouse,
"item": self.fg_item
}, as_dict=1)
self.add_mr_in_table(pending_mr)
def add_mr_in_table(self, pending_mr):
""" Add Material Requests in the table"""
self.clear_table("material_requests")
mr_list = []
for r in pending_mr:
if cstr(r['name']) not in mr_list:
mr = self.append('material_requests', {})
mr.material_request = r['name']
mr.material_request_date = cstr(r['transaction_date'])
def get_items(self):
if self.get_items_from == "Sales Order":
self.get_so_items()
elif self.get_items_from == "Material Request":
self.get_mr_items()
def get_so_items(self):
so_list = [d.sales_order for d in self.get('sales_orders') if d.sales_order]
if not so_list:
msgprint(_("Please enter Sales Orders in the above table"))
return []
item_condition = ""
if self.fg_item:
item_condition = ' and so_item.item_code = "{0}"'.format(frappe.db.escape(self.fg_item))
items = frappe.db.sql("""select distinct parent, item_code, warehouse,
(qty - delivered_qty) as pending_qty
from `tabSales Order Item` so_item
where parent in (%s) and docstatus = 1 and qty > delivered_qty
and exists (select name from `tabBOM` bom where bom.item=so_item.item_code
and bom.is_active = 1) %s""" % \
(", ".join(["%s"] * len(so_list)), item_condition), tuple(so_list), as_dict=1)
if self.fg_item:
item_condition = ' and pi.item_code = "{0}"'.format(frappe.db.escape(self.fg_item))
packed_items = frappe.db.sql("""select distinct pi.parent, pi.item_code, pi.warehouse as warehouse,
(((so_item.qty - so_item.delivered_qty) * pi.qty) / so_item.qty)
as pending_qty
from `tabSales Order Item` so_item, `tabPacked Item` pi
where so_item.parent = pi.parent and so_item.docstatus = 1
and pi.parent_item = so_item.item_code
and so_item.parent in (%s) and so_item.qty > so_item.delivered_qty
and exists (select name from `tabBOM` bom where bom.item=pi.item_code
and bom.is_active = 1) %s""" % \
(", ".join(["%s"] * len(so_list)), item_condition), tuple(so_list), as_dict=1)
self.add_items(items + packed_items)
def get_mr_items(self):
mr_list = [d.material_request for d in self.get('material_requests') if d.material_request]
if not mr_list:
msgprint(_("Please enter Material Requests in the above table"))
return []
item_condition = ""
if self.fg_item:
item_condition = ' and mr_item.item_code = "' + frappe.db.escape(self.fg_item, percent=False) + '"'
items = frappe.db.sql("""select distinct parent, name, item_code, warehouse,
(qty - ordered_qty) as pending_qty
from `tabMaterial Request Item` mr_item
where parent in (%s) and docstatus = 1 and qty > ordered_qty
and exists (select name from `tabBOM` bom where bom.item=mr_item.item_code
and bom.is_active = 1) %s""" % \
(", ".join(["%s"] * len(mr_list)), item_condition), tuple(mr_list), as_dict=1)
self.add_items(items)
def add_items(self, items):
self.clear_table("items")
for p in items:
item_details = get_item_details(p['item_code'])
pi = self.append('items', {})
pi.warehouse = p['warehouse']
pi.item_code = p['item_code']
pi.description = item_details and item_details.description or ''
pi.stock_uom = item_details and item_details.stock_uom or ''
pi.bom_no = item_details and item_details.bom_no or ''
pi.planned_qty = flt(p['pending_qty'])
pi.pending_qty = flt(p['pending_qty'])
if self.get_items_from == "Sales Order":
pi.sales_order = p['parent']
elif self.get_items_from == "Material Request":
pi.material_request = p['parent']
pi.material_request_item = p['name']
def validate_data(self):
self.validate_company()
for d in self.get('items'):
if not d.bom_no:
frappe.throw(_("Please select BOM for Item in Row {0}".format(d.idx)))
else:
validate_bom_no(d.item_code, d.bom_no)
if not flt(d.planned_qty):
frappe.throw(_("Please enter Planned Qty for Item {0} at row {1}").format(d.item_code, d.idx))
def raise_production_orders(self):
"""It will raise production order (Draft) for all distinct FG items"""
self.validate_data()
from erpnext.utilities.transaction_base import validate_uom_is_integer
validate_uom_is_integer(self, "stock_uom", "planned_qty")
items = self.get_production_items()
pro_list = []
frappe.flags.mute_messages = True
for key in items:
production_order = self.create_production_order(items[key])
if production_order:
pro_list.append(production_order)
frappe.flags.mute_messages = False
if pro_list:
pro_list = ["""<a href="#Form/Production Order/%s" target="_blank">%s</a>""" % \
(p, p) for p in pro_list]
msgprint(_("{0} created").format(comma_and(pro_list)))
else :
msgprint(_("No Production Orders created"))
def get_production_items(self):
item_dict = {}
for d in self.get("items"):
item_details= {
"production_item" : d.item_code,
"sales_order" : d.sales_order,
"material_request" : d.material_request,
"material_request_item" : d.material_request_item,
"bom_no" : d.bom_no,
"description" : d.description,
"stock_uom" : d.stock_uom,
"company" : self.company,
"wip_warehouse" : "",
"fg_warehouse" : d.warehouse,
"status" : "Draft",
}
""" Club similar BOM and item for processing in case of Sales Orders """
if self.get_items_from == "Material Request":
item_details.update({
"qty": d.planned_qty
})
item_dict[(d.item_code, d.material_request_item, d.warehouse)] = item_details
else:
item_details.update({
"qty":flt(item_dict.get((d.item_code, d.sales_order, d.warehouse),{})
.get("qty")) + flt(d.planned_qty)
})
item_dict[(d.item_code, d.sales_order, d.warehouse)] = item_details
return item_dict
def create_production_order(self, item_dict):
"""Create production order. Called from Production Planning Tool"""
from erpnext.manufacturing.doctype.production_order.production_order import OverProductionError, get_default_warehouse
warehouse = get_default_warehouse()
pro = frappe.new_doc("Production Order")
pro.update(item_dict)
pro.set_production_order_operations()
if warehouse:
pro.wip_warehouse = warehouse.get('wip_warehouse')
if not pro.fg_warehouse:
pro.fg_warehouse = warehouse.get('fg_warehouse')
try:
pro.insert()
return pro.name
except OverProductionError:
pass
def get_so_wise_planned_qty(self):
"""
bom_dict {
bom_no: ['sales_order', 'qty']
}
"""
bom_dict = {}
for d in self.get("items"):
if self.get_items_from == "Material Request":
bom_dict.setdefault(d.bom_no, []).append([d.material_request_item, flt(d.planned_qty)])
else:
bom_dict.setdefault(d.bom_no, []).append([d.sales_order, flt(d.planned_qty)])
return bom_dict
def download_raw_materials(self):
""" Create csv data for required raw material to produce finished goods"""
self.validate_data()
bom_dict = self.get_so_wise_planned_qty()
self.get_raw_materials(bom_dict)
return self.get_csv()
def get_raw_materials(self, bom_dict):
""" Get raw materials considering sub-assembly items
{
"item_code": [qty_required, description, stock_uom, min_order_qty]
}
"""
item_list = []
for bom, so_wise_qty in bom_dict.items():
bom_wise_item_details = {}
if self.use_multi_level_bom:
# get all raw materials with sub assembly childs
# Did not use qty_consumed_per_unit in the query, as it leads to rounding loss
for d in frappe.db.sql("""select fb.item_code,
ifnull(sum(fb.qty/ifnull(bom.quantity, 1)), 0) as qty,
fb.description, fb.stock_uom, item.min_order_qty
from `tabBOM Explosion Item` fb, `tabBOM` bom, `tabItem` item
where bom.name = fb.parent and item.name = fb.item_code
and (item.is_sub_contracted_item = 0 or ifnull(item.default_bom, "")="")
and item.is_stock_item = 1
and fb.docstatus<2 and bom.name=%s
group by fb.item_code, fb.stock_uom""", bom, as_dict=1):
bom_wise_item_details.setdefault(d.item_code, d)
else:
# Get all raw materials considering SA items as raw materials,
# so no childs of SA items
for d in frappe.db.sql("""select bom_item.item_code,
ifnull(sum(bom_item.qty/ifnull(bom.quantity, 1)), 0) as qty,
bom_item.description, bom_item.stock_uom, item.min_order_qty
from `tabBOM Item` bom_item, `tabBOM` bom, tabItem item
where bom.name = bom_item.parent and bom.name = %s and bom_item.docstatus < 2
and bom_item.item_code = item.name
and item.is_stock_item = 1
group by bom_item.item_code""", bom, as_dict=1):
bom_wise_item_details.setdefault(d.item_code, d)
for item, item_details in bom_wise_item_details.items():
for so_qty in so_wise_qty:
item_list.append([item, flt(item_details.qty) * so_qty[1], item_details.description,
item_details.stock_uom, item_details.min_order_qty, so_qty[0]])
self.make_items_dict(item_list)
def make_items_dict(self, item_list):
for i in item_list:
self.item_dict.setdefault(i[0], []).append([flt(i[1]), i[2], i[3], i[4], i[5]])
def get_csv(self):
item_list = [['Item Code', 'Description', 'Stock UOM', 'Required Qty', 'Warehouse',
'Quantity Requested for Purchase', 'Ordered Qty', 'Actual Qty']]
for item in self.item_dict:
total_qty = sum([flt(d[0]) for d in self.item_dict[item]])
item_list.append([item, self.item_dict[item][0][1], self.item_dict[item][0][2], total_qty])
item_qty = frappe.db.sql("""select warehouse, indented_qty, ordered_qty, actual_qty
from `tabBin` where item_code = %s""", item, as_dict=1)
i_qty, o_qty, a_qty = 0, 0, 0
for w in item_qty:
i_qty, o_qty, a_qty = i_qty + flt(w.indented_qty), o_qty + flt(w.ordered_qty), a_qty + flt(w.actual_qty)
item_list.append(['', '', '', '', w.warehouse, flt(w.indented_qty),
flt(w.ordered_qty), flt(w.actual_qty)])
if item_qty:
item_list.append(['', '', '', '', 'Total', i_qty, o_qty, a_qty])
return item_list
def raise_material_requests(self):
"""
Raise Material Request if projected qty is less than qty required
Requested qty should be shortage qty considering minimum order qty
"""
self.validate_data()
if not self.purchase_request_for_warehouse:
frappe.throw(_("Please enter Warehouse for which Material Request will be raised"))
bom_dict = self.get_so_wise_planned_qty()
self.get_raw_materials(bom_dict)
if self.item_dict:
self.create_material_request()
def get_requested_items(self):
items_to_be_requested = frappe._dict()
if not self.create_material_requests_for_all_required_qty:
item_projected_qty = self.get_projected_qty()
for item, so_item_qty in self.item_dict.items():
total_qty = sum([flt(d[0]) for d in so_item_qty])
requested_qty = 0
if self.create_material_requests_for_all_required_qty:
requested_qty = total_qty
elif total_qty > item_projected_qty.get(item, 0):
# shortage
requested_qty = total_qty - flt(item_projected_qty.get(item))
# consider minimum order qty
if requested_qty and requested_qty < flt(so_item_qty[0][3]):
requested_qty = flt(so_item_qty[0][3])
# distribute requested qty SO wise
for item_details in so_item_qty:
if requested_qty:
sales_order = item_details[4] or "No Sales Order"
if self.get_items_from == "Material Request":
sales_order = "No Sales Order"
if requested_qty <= item_details[0]:
adjusted_qty = requested_qty
else:
adjusted_qty = item_details[0]
items_to_be_requested.setdefault(item, {}).setdefault(sales_order, 0)
items_to_be_requested[item][sales_order] += adjusted_qty
requested_qty -= adjusted_qty
else:
break
# requested qty >= total so qty, due to minimum order qty
if requested_qty:
items_to_be_requested.setdefault(item, {}).setdefault("No Sales Order", 0)
items_to_be_requested[item]["No Sales Order"] += requested_qty
return items_to_be_requested
def get_projected_qty(self):
items = self.item_dict.keys()
item_projected_qty = frappe.db.sql("""select item_code, sum(projected_qty)
from `tabBin` where item_code in (%s) and warehouse=%s group by item_code""" %
(", ".join(["%s"]*len(items)), '%s'), tuple(items + [self.purchase_request_for_warehouse]))
return dict(item_projected_qty)
def create_material_request(self):
items_to_be_requested = self.get_requested_items()
material_request_list = []
if items_to_be_requested:
for item in items_to_be_requested:
item_wrapper = frappe.get_doc("Item", item)
material_request = frappe.new_doc("Material Request")
material_request.update({
"transaction_date": nowdate(),
"status": "Draft",
"company": self.company,
"requested_by": frappe.session.user
})
if item_wrapper.default_bom:
material_request.update({"material_request_type": "Manufacture"})
else:
material_request.update({"material_request_type": "Purchase"})
for sales_order, requested_qty in items_to_be_requested[item].items():
material_request.append("items", {
"doctype": "Material Request Item",
"__islocal": 1,
"item_code": item,
"item_name": item_wrapper.item_name,
"description": item_wrapper.description,
"uom": item_wrapper.stock_uom,
"item_group": item_wrapper.item_group,
"brand": item_wrapper.brand,
"qty": requested_qty,
"schedule_date": add_days(nowdate(), cint(item_wrapper.lead_time_days)),
"warehouse": self.purchase_request_for_warehouse,
"sales_order": sales_order if sales_order!="No Sales Order" else None
})
material_request.flags.ignore_permissions = 1
material_request.submit()
material_request_list.append(material_request.name)
if material_request_list:
message = ["""<a href="#Form/Material Request/%s" target="_blank">%s</a>""" % \
(p, p) for p in material_request_list]
msgprint(_("Material Requests {0} created").format(comma_and(message)))
else:
msgprint(_("Nothing to request"))
|
agpl-3.0
| -849,179,727,550,764,800 | 35.805383 | 120 | 0.660179 | false |
uwosh/uwosh.emergency.client
|
uwosh/emergency/client/tests.py
|
1
|
1484
|
import unittest
from zope.testing import doctestunit
from zope.component import testing
from Testing import ZopeTestCase as ztc
from Products.Five import zcml
from Products.Five import fiveconfigure
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import PloneSite
ptc.setupPloneSite()
import uwosh.emergency.client
class TestCase(ptc.PloneTestCase):
class layer(PloneSite):
@classmethod
def setUp(cls):
fiveconfigure.debug_mode = True
zcml.load_config('configure.zcml',
uwosh.emergency.client)
fiveconfigure.debug_mode = False
@classmethod
def tearDown(cls):
pass
def test_suite():
return unittest.TestSuite([
# Unit tests
#doctestunit.DocFileSuite(
# 'README.txt', package='uwosh.emergency',
# setUp=testing.setUp, tearDown=testing.tearDown),
#doctestunit.DocTestSuite(
# module='uwosh.emergency.mymodule',
# setUp=testing.setUp, tearDown=testing.tearDown),
# Integration tests that use PloneTestCase
#ztc.ZopeDocFileSuite(
# 'README.txt', package='uwosh.emergency',
# test_class=TestCase),
ztc.FunctionalDocFileSuite(
'browser.txt', package='uwosh.emergency.client',
test_class=TestCase),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
gpl-2.0
| 6,942,139,128,027,517,000 | 26.481481 | 61 | 0.645553 | false |
joshmoore/openmicroscopy
|
components/tools/OmeroWeb/omeroweb/webgateway/webgateway_cache.py
|
1
|
30403
|
#
# webgateway/webgateway_cache - web cache handler for webgateway
#
# Copyright (c) 2008, 2009 Glencoe Software, Inc. All rights reserved.
#
# This software is distributed under the terms described by the LICENCE file
# you can find at the root of the distribution bundle, which states you are
# free to use it only for non commercial purposes.
# If the file is missing please request a copy by contacting
# jason@glencoesoftware.com.
#
# Author: Carlos Neves <carlos(at)glencoesoftware.com>
from django.conf import settings
import omero
import logging
from random import random
import datetime
logger = logging.getLogger('cache')
import struct, time, os, re, shutil, stat
size_of_double = len(struct.pack('d',0))
string_type = type('')
CACHE=getattr(settings, 'WEBGATEWAY_CACHE', None)
TMPROOT=getattr(settings, 'WEBGATEWAY_TMPROOT', None)
THUMB_CACHE_TIME = 3600 # 1 hour
THUMB_CACHE_SIZE = 20*1024 # KB == 20MB
IMG_CACHE_TIME= 3600 # 1 hour
IMG_CACHE_SIZE = 512*1024 # KB == 512MB
JSON_CACHE_TIME= 3600 # 1 hour
JSON_CACHE_SIZE = 1*1024 # KB == 1MB
TMPDIR_TIME = 3600 * 12 # 12 hours
class CacheBase (object): #pragma: nocover
"""
Caching base class - extended by L{FileCache} for file-based caching.
Methods of this base class return None or False providing a no-caching implementation if needed
"""
def __init__ (self):
""" not implemented """
pass
def get (self, k):
return None
def set (self, k, v, t=0, invalidateGroup=None):
return False
def delete (self, k):
return False
def wipe (self):
return False
class FileCache(CacheBase):
"""
Implements file-based caching within the directory specified in constructor.
"""
_purge_holdoff = 4
def __init__(self, dir, timeout=60, max_entries=0, max_size=0):
"""
Initialises the class.
@param dir: Path to directory to place cached files.
@param timeout: Cache timeout in secs
@param max_entries: If specified, limits number of items to cache
@param max_size: Maxium size of cache in KB
"""
super(FileCache, self).__init__()
self._dir = dir
self._max_entries = max_entries
self._max_size = max_size
self._last_purge = 0
self._default_timeout=timeout
if not os.path.exists(self._dir):
self._createdir()
#
def add(self, key, value, timeout=None, invalidateGroup=None):
"""
Adds data to cache, returning False if already cached. Otherwise delegating to L{set}
@param key: Unique key for cache
@param value: Value to cache - must be String
@param timeout: Optional timeout - otherwise use default
@param invalidateGroup: Not used?
"""
if self.has_key(key):
return False
self.set(key, value, timeout, invalidateGroup=invalidateGroup)
return True
def get(self, key, default=None):
"""
Gets data from cache
@param key: cache key
@param default: default value to return
@return: cache data or default if timout has passed
"""
fname = self._key_to_file(key)
try:
f = open(fname, 'rb')
exp = struct.unpack('d',f.read(size_of_double))[0]
now = time.time()
if exp < now:
f.close()
self._delete(fname)
else:
return f.read()
except (IOError, OSError, EOFError, struct.error):
pass
return default
def set(self, key, value, timeout=None, invalidateGroup=None):
"""
Adds data to cache, overwriting if already cached.
@param key: Unique key for cache
@param value: Value to cache - must be String
@param timeout: Optional timeout - otherwise use default
@param invalidateGroup: Not used?
"""
if type(value) != string_type:
raise ValueError("%s not a string, can't cache" % type(value))
fname = self._key_to_file(key)
dirname = os.path.dirname(fname)
if timeout is None:
timeout = self._default_timeout
if self._full():
# Maybe we already have this one cached, and we need the space
try:
self._delete(fname)
except OSError:
pass
if self._full():
return
try:
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(fname, 'wb')
exp = time.time() + timeout + (timeout / 5 * random())
f.write(struct.pack('d', exp))
f.write(value)
except (IOError, OSError): #pragma: nocover
pass
def delete(self, key):
"""
Attempt to delete the cache data referenced by key
@param key: Cache key
"""
try:
self._delete(self._key_to_file(key))
except (IOError, OSError): #pragma: nocover
pass
def _delete(self, fname):
"""
Tries to delete the data at the specified absolute file path
@param fname: File name of data to delete
"""
logger.debug('requested delete for "%s"' % fname)
if os.path.isdir(fname):
shutil.rmtree(fname, ignore_errors=True)
else:
os.remove(fname)
try:
# Remove the parent subdirs if they're empty
dirname = os.path.dirname(fname)
while dirname != self._dir:
os.rmdir(dirname)
dirname = os.path.dirname(fname)
except (IOError, OSError):
pass
def wipe (self):
""" Deletes everything in the cache """
shutil.rmtree(self._dir)
self._createdir()
return True
def _check_entry (self, fname):
"""
Verifies if a specific cache entry (provided as absolute file path) is expired.
If expired, it gets deleted and method returns false.
If not expired, returns True.
@param fname: File path
"""
try:
f = open(fname, 'rb')
exp = struct.unpack('d',f.read(size_of_double))[0]
now = time.time()
if exp < now:
f.close()
self._delete(fname)
return False
else:
return True
except (IOError, OSError, EOFError, struct.error): #pragma: nocover
return False
def has_key(self, key):
"""
Returns true if the cache has the specified key
@param key: Key to look for.
@rtype: Boolean
"""
fname = self._key_to_file(key)
return self._check_entry(fname)
def _du (self):
"""
Disk Usage count on the filesystem the cache is based at
@rtype: int
@return: the current usage, in KB
"""
return int(os.popen('du -sk %s' % os.path.join(os.getcwd(),self._dir)).read().split('\t')[0].strip())
def _full(self, _on_retry=False):
"""
Checks whether the cache is full, either because we have exceeded max number of entries or
the cache space is full.
@param _on_retry: Flag allows calling this method again after purge() without recursion
@return: True if cache is full
@rtype: Boolean
"""
# Check nr of entries
if self._max_entries:
try:
x = int(os.popen('find %s -type f | wc -l' % self._dir).read().strip())
if x >= self._max_entries:
if not _on_retry:
self._purge()
return self._full(True)
logger.warn('caching limits reached on %s: max entries %d' % (self._dir, self._max_entries))
return True
except ValueError: #pragma: nocover
logger.error('Counting cache entries failed')
# Check for space usage
if self._max_size:
try:
x = self._du()
if x >= self._max_size:
if not _on_retry:
self._purge()
return self._full(True)
logger.warn('caching limits reached on %s: max size %d' % (self._dir, self._max_size))
return True
except ValueError: #pragma: nocover
logger.error('Counting cache size failed')
return False
def _purge (self):
"""
Iterate the whole cache structure searching and cleaning expired entries.
this method may be expensive, so only call it when really necessary.
"""
now = time.time()
if now-self._last_purge < self._purge_holdoff:
return
self._last_purge = now
logger.debug('entering purge')
count = 0
for p,_,files in os.walk(self._dir):
for f in files:
if not self._check_entry(os.path.join(p, f)):
count += 1
logger.debug('purge finished, removed %d files' % count)
def _createdir(self):
"""
Creates a directory for the root dir of the cache.
"""
try:
os.makedirs(self._dir)
except OSError: #pragma: nocover
raise EnvironmentError, "Cache directory '%s' does not exist and could not be created'" % self._dir
def _key_to_file(self, key):
"""
Uses the key to construct an absolute path to the cache data.
@param key: Cache key
@return: Path
@rtype: String
"""
if key.find('..') > 0 or key.startswith('/'):
raise ValueError('Invalid value for cache key: "%s"' % key)
return os.path.join(self._dir, key)
def _get_num_entries(self):
"""
Returns the number of files in the cache
@rtype: int
"""
count = 0
for _,_,files in os.walk(self._dir):
count += len(files)
return count
_num_entries = property(_get_num_entries)
FN_REGEX = re.compile('[#$,|]')
class WebGatewayCache (object):
"""
Caching class for webgateway.
"""
def __init__ (self, backend=None, basedir=CACHE):
"""
Initialises cache
@param backend: The cache class to use for caching. E.g. L{FileCache}
@param basedir: The base location for all caches. Sub-dirs created for json/ img/ thumb/
"""
self._basedir = basedir
self._lastlock = None
if backend is None or basedir is None:
self._json_cache = CacheBase()
self._img_cache = CacheBase()
self._thumb_cache = CacheBase()
else:
self._json_cache = backend(dir=os.path.join(basedir,'json'),
timeout=JSON_CACHE_TIME, max_entries=0, max_size=JSON_CACHE_SIZE)
self._img_cache = backend(dir=os.path.join(basedir,'img'),
timeout=IMG_CACHE_TIME, max_entries=0, max_size=IMG_CACHE_SIZE)
self._thumb_cache = backend(dir=os.path.join(basedir,'thumb'),
timeout=THUMB_CACHE_TIME, max_entries=0, max_size=THUMB_CACHE_SIZE)
def _updateCacheSettings (self, cache, timeout=None, max_entries=None, max_size=None):
"""
Updates the timeout, max_entries and max_size (if specified) for the given cache
@param cache: Cache or caches to update.
@type cache: L{CacheBase} or list of caches
"""
if isinstance(cache, CacheBase):
cache = (cache,)
for c in cache:
if timeout is not None:
c._default_timeout = timeout
if max_entries is not None:
c._max_entries = max_entries
if max_size is not None:
c._max_size = max_size
def __del__ (self):
"""
Tries to remove the lock on this cache.
"""
if self._lastlock:
try:
logger.debug('removing cache lock file on __del__')
os.remove(self._lastlock)
except:
pass
self._lastlock = None
def tryLock (self):
"""
simple lock mechanisn to avoid multiple processes on the same cache to
step on each other's toes.
@rtype: boolean
@return: True if we created a lockfile or already had it. False otherwise.
"""
lockfile = os.path.join(self._basedir, '%s_lock' % datetime.datetime.now().strftime('%Y%m%d_%H%M'))
if self._lastlock:
if lockfile == self._lastlock:
return True
try:
os.remove(self._lastlock)
except:
pass
self._lastlock = None
try:
fd = os.open(lockfile, os.O_CREAT | os.O_EXCL)
os.close(fd)
self._lastlock = lockfile
return True
except OSError:
return False
def handleEvent (self, client_base, e):
"""
Handle one event from blitz.onEventLogs.
Meant to be overridden, this implementation just logs.
@param client_base: TODO: docs!
@param e:
"""
logger.debug('## %s#%i %s user #%i group #%i(%i)' % (e.entityType.val,
e.entityId.val,
e.action.val,
e.details.owner.id.val,
e.details.group.id.val,
e.event.id.val))
def eventListener (self, client_base, events):
"""
handle events coming our way from blitz.onEventLogs.
Because all processes will be listening to the same events, we use a simple file
lock mechanism to make sure the first process to get the event will be the one
handling things from then on.
@param client_base: TODO: docs!
@param events:
"""
for e in events:
if self.tryLock():
self.handleEvent(client_base, e)
else:
logger.debug("## ! ignoring event %s" % str(e.event.id.val))
def clear (self):
"""
Clears all the caches.
"""
self._json_cache.wipe()
self._img_cache.wipe()
self._thumb_cache.wipe()
def _cache_set (self, cache, key, obj):
""" Calls cache.set(key, obj) """
logger.debug(' set: %s' % key)
cache.set(key, obj)
def _cache_clear (self, cache, key):
""" Calls cache.delete(key) """
logger.debug(' clear: %s' % key)
cache.delete(key)
def invalidateObject (self, client_base, user_id, obj):
"""
Invalidates all caches for this particular object
@param client_base: The server_id
@param user_id: OMERO user ID to partition caching upon
@param obj: The object wrapper. E.g. L{omero.gateway.ImageWrapper}
"""
if obj.OMERO_CLASS == 'Image':
self.clearImage(None, client_base, user_id, obj)
else:
logger.debug('unhandled object type: %s' % obj.OMERO_CLASS)
self.clearJson(client_base, obj)
##
# Thumb
def _thumbKey (self, r, client_base, user_id, iid, size):
"""
Generates a string key for caching the thumbnail, based on the above parameters
@param r: not used
@param client_base: server-id, forms stem of the key
@param user_id: OMERO user ID to partition caching upon
@param iid: image ID
@param size: size of the thumbnail - tuple. E.g. (100,)
"""
if size is not None and len(size):
return 'thumb_user_%s/%s/%s/%s' % (client_base, str(iid), user_id, 'x'.join([str(x) for x in size]))
else:
return 'thumb_user_%s/%s/%s' % (client_base, str(iid), user_id)
def setThumb (self, r, client_base, user_id, iid, obj, size=()):
"""
Puts thumbnail into cache.
@param r: for cache key - Not used?
@param client_base: server_id for cache key
@param user_id: OMERO user ID to partition caching upon
@param iid: image ID for cache key
@param obj: Data to cache
@param size: Size used for cache key. Tuple
"""
k = self._thumbKey(r, client_base, user_id, iid, size)
self._cache_set(self._thumb_cache, k, obj)
return True
def getThumb (self, r, client_base, user_id, iid, size=()):
"""
Gets thumbnail from cache.
@param r: for cache key - Not used?
@param client_base: server_id for cache key
@param user_id: OMERO user ID to partition caching upon
@param iid: image ID for cache key
@param size: Size used for cache key. Tuple
@return: Cached data or None
@rtype: String
"""
k = self._thumbKey(r, client_base, user_id, iid, size)
r = self._thumb_cache.get(k)
if r is None:
logger.debug(' fail: %s' % k)
else:
logger.debug('cached: %s' % k)
return r
def clearThumb (self, r, client_base, user_id, iid, size=None):
"""
Clears thumbnail from cache.
@param r: for cache key - Not used?
@param client_base: server_id for cache key
@param user_id: OMERO user ID to partition caching upon
@param iid: image ID for cache key
@param size: Size used for cache key. Tuple
@return: True
"""
k = self._thumbKey(r, client_base, user_id, iid, size)
self._cache_clear(self._thumb_cache, k)
return True
##
# Image
def _imageKey (self, r, client_base, img, z=0, t=0):
"""
Returns a key for caching the Image, based on parameters above, including rendering settings
specified in the http request.
@param r: http request - get rendering params 'c', 'm', 'p'
@param client_base: server_id for cache key
@param img: L{omero.gateway.ImageWrapper} for ID
@param obj: Data to cache
@param size: Size used for cache key. Tuple
"""
iid = img.getId()
if r:
r = r.REQUEST
c = FN_REGEX.sub('-',r.get('c', ''))
m = r.get('m', '')
p = r.get('p', '')
if p and not isinstance(omero.gateway.ImageWrapper.PROJECTIONS.get(p, -1),
omero.constants.projection.ProjectionType): #pragma: nocover
p = ''
q = r.get('q', '')
region = r.get('region', '')
tile = r.get('tile', '')
rv = 'img_%s/%s/%%s-c%s-m%s-q%s-r%s-t%s' % (client_base, str(iid), c, m, q, region, tile)
if p:
return rv % ('%s-%s' % (p, str(t)))
else:
return rv % ('%sx%s' % (str(z), str(t)))
else:
return 'img_%s/%s/' % (client_base, str(iid))
def setImage (self, r, client_base, img, z, t, obj, ctx=''):
"""
Puts image data into cache.
@param r: http request for cache key
@param client_base: server_id for cache key
@param img: ImageWrapper for cache key
@param z: Z index for cache key
@param t: T index for cache key
@param obj: Data to cache
@param ctx: Additional string for cache key
"""
k = self._imageKey(r, client_base, img, z, t) + ctx
self._cache_set(self._img_cache, k, obj)
return True
def getImage (self, r, client_base, img, z, t, ctx=''):
"""
Gets image data from cache.
@param r: http request for cache key
@param client_base: server_id for cache key
@param img: ImageWrapper for cache key
@param z: Z index for cache key
@param t: T index for cache key
@param ctx: Additional string for cache key
@return: Image data
@rtype: String
"""
k = self._imageKey(r, client_base, img, z, t) + ctx
r = self._img_cache.get(k)
if r is None:
logger.debug(' fail: %s' % k)
else:
logger.debug('cached: %s' % k)
return r
def clearImage (self, r, client_base, user_id, img):
"""
Clears image data from cache using default rendering settings (r=None) T and Z indexes ( = 0).
TODO: Doesn't clear any data stored WITH r, t, or z specified in cache key?
Also clears thumbnail (but not thumbs with size specified) and json data for this image.
@param r: http request for cache key
@param client_base: server_id for cache key
@param user_id: OMERO user ID to partition caching upon
@param img: ImageWrapper for cache key
@param obj: Data to cache
@param rtype: True
"""
k = self._imageKey(None, client_base, img)
self._cache_clear(self._img_cache, k)
# do the thumb too
self.clearThumb(r, client_base, user_id, img.getId())
# and json data
self.clearJson(client_base, img)
return True
def setSplitChannelImage (self, r, client_base, img, z, t, obj):
""" Calls L{setImage} with '-sc' context """
return self.setImage(r, client_base, img, z, t, obj, '-sc')
def getSplitChannelImage (self, r, client_base, img, z, t):
"""
Calls L{getImage} with '-sc' context
@rtype: String
"""
return self.getImage(r, client_base, img, z, t, '-sc')
def setOmeTiffImage (self, r, client_base, img, obj):
""" Calls L{setImage} with '-ometiff' context """
return self.setImage(r, client_base, img, 0, 0, obj, '-ometiff')
def getOmeTiffImage (self, r, client_base, img):
"""
Calls L{getImage} with '-ometiff' context
@rtype: String
"""
return self.getImage(r, client_base, img, 0, 0, '-ometiff')
##
# hierarchies (json)
def _jsonKey (self, r, client_base, obj, ctx=''):
"""
Creates a cache key for storing json data based on params above.
@param r: http request - not used
@param client_base: server_id
@param obj: ObjectWrapper
@param ctx: Additional string for cache key
@return: Cache key
@rtype: String
"""
if obj:
return 'json_%s/%s_%s/%s' % (client_base, obj.OMERO_CLASS, obj.id, ctx)
else:
return 'json_%s/single/%s' % (client_base, ctx)
def clearJson (self, client_base, obj):
"""
Only handles Dataset obj, calling L{clearDatasetContents}
"""
logger.debug('clearjson')
if obj.OMERO_CLASS == 'Dataset':
self.clearDatasetContents(None, client_base, obj)
def setDatasetContents (self, r, client_base, ds, data):
"""
Adds data to the json cache using 'contents' as context
@param r: http request - not used
@param client_base: server_id for cache key
@param ds: ObjectWrapper for cache key
@param data: Data to cache
@rtype: True
"""
k = self._jsonKey(r, client_base, ds, 'contents')
self._cache_set(self._json_cache, k, data)
return True
def getDatasetContents (self, r, client_base, ds):
"""
Gets data from the json cache using 'contents' as context
@param r: http request - not used
@param client_base: server_id for cache key
@param ds: ObjectWrapper for cache key
@rtype: String or None
"""
k = self._jsonKey(r, client_base, ds, 'contents')
r = self._json_cache.get(k)
if r is None:
logger.debug(' fail: %s' % k)
else:
logger.debug('cached: %s' % k)
return r
def clearDatasetContents (self, r, client_base, ds):
"""
Clears data from the json cache using 'contents' as context
@param r: http request - not used
@param client_base: server_id for cache key
@param ds: ObjectWrapper for cache key
@rtype: True
"""
k = self._jsonKey(r, client_base, ds, 'contents')
self._cache_clear(self._json_cache, k)
return True
webgateway_cache = WebGatewayCache(FileCache)
class AutoLockFile (file):
""" Class extends file to facilitate creation and deletion of lock file. """
def __init__ (self, fn, mode):
""" creates a '.lock' file with the spicified file name and mode """
super(AutoLockFile, self).__init__(fn, mode)
self._lock = os.path.join(os.path.dirname(fn), '.lock')
file(self._lock, 'a').close()
def __del__ (self):
""" tries to delete the lock file """
try:
os.remove(self._lock)
except:
pass
def close (self):
""" tries to delete the lock file and close the file """
try:
os.remove(self._lock)
except:
pass
super(AutoLockFile, self).close()
class WebGatewayTempFile (object):
"""
Class for handling creation of temporary files
"""
def __init__ (self, tdir=TMPROOT):
""" Initialises class, setting the directory to be used for temp files. """
self._dir = tdir
if tdir and not os.path.exists(self._dir):
self._createdir()
def _createdir(self):
""" Tries to create the directories required for the temp file base dir """
try:
os.makedirs(self._dir)
except OSError: #pragma: nocover
raise EnvironmentError, "Cache directory '%s' does not exist and could not be created'" % self._dir
def _cleanup (self):
""" Tries to delete all the temp files that have expired their cache timeout. """
now = time.time()
for f in os.listdir(self._dir):
try:
ts = os.path.join(self._dir, f, '.timestamp')
if os.path.exists(ts):
ft = float(file(ts).read()) + TMPDIR_TIME
else:
ft = float(f) + TMPDIR_TIME
if ft < now:
shutil.rmtree(os.path.join(self._dir, f), ignore_errors=True)
except ValueError:
continue
def newdir (self, key=None):
"""
Creates a new directory using key as the dir name, and adds a timestamp file with it's
creation time. If key is not specified, use a unique key based on timestamp.
@param key: The new dir name
@return: Tuple of (path to new directory, key used)
"""
if not self._dir:
return None, None
self._cleanup()
stamp = str(time.time())
if key is None:
dn = os.path.join(self._dir, stamp)
while os.path.exists(dn):
stamp = str(time.time())
dn = os.path.join(self._dir, stamp)
key = stamp
key = key.replace('/','_').decode('utf8').encode('ascii','ignore')
dn = os.path.join(self._dir, key)
if not os.path.isdir(dn):
os.makedirs(dn)
file(os.path.join(dn, '.timestamp'), 'w').write(stamp)
return dn, key
def new (self, name, key=None):
"""
Creates a new directory if needed, see L{newdir} and checks whether this contains a file 'name'. If not, a
file lock is created for this location and returned.
@param name: Name of file we want to create.
@param key: The new dir name
@return: Tuple of (abs path to new directory, relative path key/name, L{AutoFileLock} or True if exists)
"""
if not self._dir:
return None, None, None
dn, stamp = self.newdir(key)
name = name.replace('/','_').decode('utf8').encode('ascii', 'ignore')
fn = os.path.join(dn, name)
rn = os.path.join(stamp, name)
lf = os.path.join(dn, '.lock')
cnt = 30
fsize = 0
while os.path.exists(lf) and cnt > 0:
time.sleep(1)
t = os.stat(fn)[stat.ST_SIZE]
if (t == fsize):
cnt -= 1
logger.debug('countdown %d' % cnt)
else:
fsize = t
cnt = 30
if cnt == 0:
return None, None, None
if os.path.exists(fn):
return fn, rn, True
return fn, rn, AutoLockFile(fn, 'wb')
webgateway_tempfile = WebGatewayTempFile()
|
gpl-2.0
| 1,907,028,645,191,416,800 | 34.476079 | 119 | 0.516791 | false |
johnlb/strange_wp
|
strange_bak/tests/test_text.py
|
1
|
25260
|
# coding: utf-8
"""
weasyprint.tests.test_text
--------------------------
Test the text layout.
:copyright: Copyright 2011-2014 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
from ..css import StyleDict
from ..css.properties import INITIAL_VALUES
from ..text import split_first_line
from .test_layout import parse, body_children
from .testing_utils import FONTS, assert_no_logs
FONTS = FONTS.split(', ')
def make_text(text, width=None, **style):
"""Wrapper for split_first_line() creating a StyleDict."""
style = StyleDict({
'font_family': ['Nimbus Mono L', 'Liberation Mono', 'FreeMono',
'monospace'],
}, INITIAL_VALUES).updated_copy(style)
return split_first_line(
text, style, hinting=False, max_width=width, line_width=None)
@assert_no_logs
def test_line_content():
"""Test the line break for various fixed-width lines."""
for width, remaining in [(100, 'text for test'),
(45, 'is a text for test')]:
text = 'This is a text for test'
_, length, resume_at, _, _, _ = make_text(
text, width, font_family=FONTS, font_size=19)
assert text[resume_at:] == remaining
assert length + 1 == resume_at # +1 is for the removed trailing space
@assert_no_logs
def test_line_with_any_width():
"""Test the auto-fit width of lines."""
_, _, _, width_1, _, _ = make_text('some text')
_, _, _, width_2, _, _ = make_text('some text some text')
assert width_1 < width_2
@assert_no_logs
def test_line_breaking():
"""Test the line breaking."""
string = 'This is a text for test'
# These two tests do not really rely on installed fonts
_, _, resume_at, _, _, _ = make_text(string, 90, font_size=1)
assert resume_at is None
_, _, resume_at, _, _, _ = make_text(string, 90, font_size=100)
assert string[resume_at:] == 'is a text for test'
_, _, resume_at, _, _, _ = make_text(string, 100, font_family=FONTS,
font_size=19)
assert string[resume_at:] == 'text for test'
@assert_no_logs
def test_text_dimension():
"""Test the font size impact on the text dimension."""
string = 'This is a text for test. This is a test for text.py'
_, _, _, width_1, height_1, _ = make_text(string, 200, font_size=12)
_, _, _, width_2, height_2, _ = make_text(string, 200, font_size=20)
assert width_1 * height_1 < width_2 * height_2
@assert_no_logs
def test_text_font_size_zero():
"""Test a text with a font size set to 0."""
page, = parse('''
<style>
p { font-size: 0; }
</style>
<p>test font size zero</p>
''')
paragraph, = body_children(page)
line, = paragraph.children
# zero-sized text boxes are removed
assert not line.children
assert line.height == 0
assert paragraph.height == 0
@assert_no_logs
def test_text_spaced_inlines():
"""Test a text with inlines separated by a space."""
page, = parse('''
<p>start <i><b>bi1</b> <b>bi2</b></i> <b>b1</b> end</p>
''')
paragraph, = body_children(page)
line, = paragraph.children
start, i, space, b, end = line.children
assert start.text == 'start '
assert space.text == ' '
assert space.width > 0
assert end.text == ' end'
bi1, space, bi2 = i.children
bi1, = bi1.children
bi2, = bi2.children
assert bi1.text == 'bi1'
assert space.text == ' '
assert space.width > 0
assert bi2.text == 'bi2'
b1, = b.children
assert b1.text == 'b1'
@assert_no_logs
def test_text_align_left():
"""Test the left text alignment."""
"""
<--------------------> page, body
+-----+
+---+ |
| | |
+---+-----+
^ ^ ^ ^
x=0 x=40 x=100 x=200
"""
page, = parse('''
<style>
@page { size: 200px }
</style>
<body>
<img src="pattern.png" style="width: 40px"
><img src="pattern.png" style="width: 60px">''')
html, = page.children
body, = html.children
line, = body.children
img_1, img_2 = line.children
# initial value for text-align: left (in ltr text)
assert img_1.position_x == 0
assert img_2.position_x == 40
@assert_no_logs
def test_text_align_right():
"""Test the right text alignment."""
"""
<--------------------> page, body
+-----+
+---+ |
| | |
+---+-----+
^ ^ ^ ^
x=0 x=100 x=200
x=140
"""
page, = parse('''
<style>
@page { size: 200px }
body { text-align: right }
</style>
<body>
<img src="pattern.png" style="width: 40px"
><img src="pattern.png" style="width: 60px">''')
html, = page.children
body, = html.children
line, = body.children
img_1, img_2 = line.children
assert img_1.position_x == 100 # 200 - 60 - 40
assert img_2.position_x == 140 # 200 - 60
@assert_no_logs
def test_text_align_center():
"""Test the center text alignment."""
"""
<--------------------> page, body
+-----+
+---+ |
| | |
+---+-----+
^ ^ ^ ^
x= x=50 x=150
x=90
"""
page, = parse('''
<style>
@page { size: 200px }
body { text-align: center }
</style>
<body>
<img src="pattern.png" style="width: 40px"
><img src="pattern.png" style="width: 60px">''')
html, = page.children
body, = html.children
line, = body.children
img_1, img_2 = line.children
assert img_1.position_x == 50
assert img_2.position_x == 90
@assert_no_logs
def test_text_align_justify():
"""Test justified text."""
page, = parse('''
<style>
@page { size: 300px 1000px }
body { text-align: justify }
</style>
<p><img src="pattern.png" style="width: 40px">
<strong>
<img src="pattern.png" style="width: 60px">
<img src="pattern.png" style="width: 10px">
<img src="pattern.png" style="width: 100px"
></strong><img src="pattern.png" style="width: 290px"
><!-- Last image will be on its own line. -->''')
html, = page.children
body, = html.children
paragraph, = body.children
line_1, line_2 = paragraph.children
image_1, space_1, strong = line_1.children
image_2, space_2, image_3, space_3, image_4 = strong.children
image_5, = line_2.children
assert space_1.text == ' '
assert space_2.text == ' '
assert space_3.text == ' '
assert image_1.position_x == 0
assert space_1.position_x == 40
assert strong.position_x == 70
assert image_2.position_x == 70
assert space_2.position_x == 130
assert image_3.position_x == 160
assert space_3.position_x == 170
assert image_4.position_x == 200
assert strong.width == 230
assert image_5.position_x == 0
# single-word line (zero spaces)
page, = parse('''
<style>
body { text-align: justify; width: 50px }
</style>
<p>Supercalifragilisticexpialidocious bar</p>
''')
html, = page.children
body, = html.children
paragraph, = body.children
line_1, line_2 = paragraph.children
text, = line_1.children
assert text.position_x == 0
@assert_no_logs
def test_word_spacing():
"""Test word-spacing."""
# keep the empty <style> as a regression test: element.text is None
# (Not a string.)
page, = parse('''
<style></style>
<body><strong>Lorem ipsum dolor<em>sit amet</em></strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_1, = line.children
# TODO: Pango gives only half of word-spacing to a space at the end
# of a TextBox. Is this what we want?
page, = parse('''
<style>strong { word-spacing: 11px }</style>
<body><strong>Lorem ipsum dolor<em>sit amet</em></strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_2, = line.children
assert strong_2.width - strong_1.width == 33
@assert_no_logs
def test_letter_spacing():
"""Test letter-spacing."""
page, = parse('''
<body><strong>Supercalifragilisticexpialidocious</strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_1, = line.children
page, = parse('''
<style>strong { letter-spacing: 11px }</style>
<body><strong>Supercalifragilisticexpialidocious</strong>''')
html, = page.children
body, = html.children
line, = body.children
strong_2, = line.children
assert strong_2.width - strong_1.width == 34 * 11
# an embedded tag should not affect the single-line letter spacing
page, = parse('''
<style>strong { letter-spacing: 11px }</style>
<body><strong>Supercali<span>fragilistic</span>expialidocious''' +
'</strong>')
html, = page.children
body, = html.children
line, = body.children
strong_3, = line.children
assert strong_3.width == strong_2.width
# duplicate wrapped lines should also have same overall width
# Note work-around for word-wrap bug (issue #163) by marking word
# as an inline-block
page, = parse('''
<style>strong { letter-spacing: 11px; max-width: %dpx }
span { display: inline-block }</style>
<body><strong>%s %s</strong>''' %
((strong_3.width * 1.5),
'<span>Supercali<i>fragilistic</i>expialidocious</span>',
'<span>Supercali<i>fragilistic</i>expialidocious</span>'))
html, = page.children
body, = html.children
line1, line2 = body.children
assert line1.children[0].width == line2.children[0].width
assert line1.children[0].width == strong_2.width
@assert_no_logs
def test_text_indent():
"""Test the text-indent property."""
for indent in ['12px', '6%']: # 6% of 200px is 12px
page, = parse('''
<style>
@page { size: 220px }
body { margin: 10px; text-indent: %(indent)s }
</style>
<p>Some text that is long enough that it take at least three line,
but maybe more.
''' % {'indent': indent})
html, = page.children
body, = html.children
paragraph, = body.children
lines = paragraph.children
text_1, = lines[0].children
text_2, = lines[1].children
text_3, = lines[2].children
assert text_1.position_x == 22 # 10px margin-left + 12px indent
assert text_2.position_x == 10 # No indent
assert text_3.position_x == 10 # No indent
@assert_no_logs
def test_hyphenate_character():
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-character: \'!\'" lang=fr>'
'hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('!')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('!', '') == 'hyphénation'
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-character: \'à\'" lang=fr>'
'hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('à')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('à', '') == 'hyphénation'
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-character: \'ù ù\'" lang=fr>'
'hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('ù ù')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace(' ', '').replace('ù', '') == 'hyphénation'
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-character: \'\'" lang=fr>'
'hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
full_text = ''.join(line.children[0].text for line in lines)
assert full_text == 'hyphénation'
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-character: \'———\'" lang=fr>'
'hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('———')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('—', '') == 'hyphénation'
@assert_no_logs
def test_manual_hyphenation():
for i in range(1, len('hyphénation')):
for hyphenate_character in ('!', 'ù ù'):
word = 'hyphénation'[:i] + '\u00ad' + 'hyphénation'[i:]
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: manual;'
'-weasy-hyphenate-character: \'%s\'"'
'lang=fr>%s' % (hyphenate_character, word))
html, = page.children
body, = html.children
lines = body.children
assert len(lines) == 2
assert lines[0].children[0].text.endswith(hyphenate_character)
full_text = ''.join(
child.text for line in lines for child in line.children)
assert full_text.replace(hyphenate_character, '') == word
for i in range(1, len('hy phénation')):
for hyphenate_character in ('!', 'ù ù'):
word = 'hy phénation'[:i] + '\u00ad' + 'hy phénation'[i:]
page, = parse(
'<html style="width: 5em; font-family: ahem">'
'<body style="-weasy-hyphens: manual;'
'-weasy-hyphenate-character: \'%s\'"'
'lang=fr>%s' % (hyphenate_character, word))
html, = page.children
body, = html.children
lines = body.children
assert len(lines) in (2, 3)
full_text = ''.join(
child.text for line in lines for child in line.children)
full_text = full_text.replace(hyphenate_character, '')
if lines[0].children[0].text.endswith(hyphenate_character):
assert full_text == word
else:
assert lines[0].children[0].text.endswith('y')
if len(lines) == 3:
assert lines[1].children[0].text.endswith(
hyphenate_character)
@assert_no_logs
def test_hyphenate_limit_zone():
page, = parse(
'<html style="width: 12em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-limit-zone: 0" lang=fr>'
'mmmmm hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) == 2
assert lines[0].children[0].text.endswith('‐')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('‐', '') == 'mmmmm hyphénation'
page, = parse(
'<html style="width: 12em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-limit-zone: 9em" lang=fr>'
'mmmmm hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('mm')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text == 'mmmmmhyphénation'
page, = parse(
'<html style="width: 12em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-limit-zone: 5%" lang=fr>'
'mmmmm hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) == 2
assert lines[0].children[0].text.endswith('‐')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text.replace('‐', '') == 'mmmmm hyphénation'
page, = parse(
'<html style="width: 12em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-limit-zone: 95%" lang=fr>'
'mmmmm hyphénation')
html, = page.children
body, = html.children
lines = body.children
assert len(lines) > 1
assert lines[0].children[0].text.endswith('mm')
full_text = ''.join(line.children[0].text for line in lines)
assert full_text == 'mmmmmhyphénation'
@assert_no_logs
def test_hyphenate_limit_chars():
def line_count(limit_chars):
page, = parse((
'<html style="width: 1em; font-family: ahem">'
'<body style="-weasy-hyphens: auto;'
'-weasy-hyphenate-limit-chars: %s" lang=en>'
'hyphen') % limit_chars)
html, = page.children
body, = html.children
lines = body.children
return len(lines)
assert line_count('auto') == 2
assert line_count('auto auto 0') == 2
assert line_count('0 0 0') == 2
assert line_count('4 4 auto') == 1
assert line_count('6 2 4') == 2
assert line_count('auto 1 auto') == 2
assert line_count('7 auto auto') == 1
assert line_count('6 auto auto') == 2
assert line_count('5 2') == 2
assert line_count('3') == 2
assert line_count('2 4 6') == 1
assert line_count('auto 4') == 1
assert line_count('auto 2') == 2
@assert_no_logs
def test_overflow_wrap():
def get_lines(wrap, text):
page, = parse('''
<style>
body {width: 80px; overflow: hidden; font-family: ahem; }
span {overflow-wrap: %s; white-space: normal; }
</style>
<body style="-weasy-hyphens: auto;" lang="en">
<span>%s
''' % (wrap, text))
html, = page.children
body, = html.children
body_lines = []
for line in body.children:
box, = line.children
textBox, = box.children
body_lines.append(textBox.text)
return body_lines
# break-word
lines = get_lines('break-word', 'aaaaaaaa')
assert len(lines) > 1
full_text = ''.join(line for line in lines)
assert full_text == 'aaaaaaaa'
# normal
lines = get_lines('normal', 'aaaaaaaa')
assert len(lines) == 1
full_text = ''.join(line for line in lines)
assert full_text == 'aaaaaaaa'
# break-word after hyphenation
lines = get_lines('break-word', 'hyphenations')
assert len(lines) > 3
full_text = ''.join(line for line in lines)
assert full_text == "hy\u2010phen\u2010ations"
# break word after normal white-space wrap and hyphenation
lines = get_lines(
'break-word', "A splitted word. An hyphenated word.")
assert len(lines) > 8
full_text = ''.join(line for line in lines)
assert full_text == "Asplittedword.Anhy\u2010phen\u2010atedword."
@assert_no_logs
def test_white_space():
"""Test the white-space property."""
def lines(width, space):
page, = parse('''
<style>
body { font-size: 100px; width: %ipx }
span { white-space: %s }
</style>
<body><span>This + \n is text''' % (width, space))
html, = page.children
body, = html.children
return body.children
line1, line2, line3, line4 = lines(1, 'normal')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This'
box2, = line2.children
text2, = box2.children
assert text2.text == '+'
box3, = line3.children
text3, = box3.children
assert text3.text == 'is'
box4, = line4.children
text4, = box4.children
assert text4.text == 'text'
line1, line2 = lines(1, 'pre')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + '
box2, = line2.children
text2, = box2.children
assert text2.text == ' is text'
line1, = lines(1, 'nowrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + is text'
line1, line2, line3, line4, line5 = lines(1, 'pre-wrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This '
box2, = line2.children
text2, = box2.children
assert text2.text == '+ '
box3, = line3.children
text3, = box3.children
assert text3.text == ' '
box4, = line4.children
text4, = box4.children
assert text4.text == 'is '
box5, = line5.children
text5, = box5.children
assert text5.text == 'text'
line1, line2, line3, line4 = lines(1, 'pre-line')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This'
box2, = line2.children
text2, = box2.children
assert text2.text == '+'
box3, = line3.children
text3, = box3.children
assert text3.text == 'is'
box4, = line4.children
text4, = box4.children
assert text4.text == 'text'
line1, = lines(1000000, 'normal')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + is text'
line1, line2 = lines(1000000, 'pre')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + '
box2, = line2.children
text2, = box2.children
assert text2.text == ' is text'
line1, = lines(1000000, 'nowrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + is text'
line1, line2 = lines(1000000, 'pre-wrap')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This + '
box2, = line2.children
text2, = box2.children
assert text2.text == ' is text'
line1, line2 = lines(1000000, 'pre-line')
box1, = line1.children
text1, = box1.children
assert text1.text == 'This +'
box2, = line2.children
text2, = box2.children
assert text2.text == 'is text'
@assert_no_logs
def test_tab_size():
"""Test the ``tab-size`` property."""
for value, width in (
(8, 144), # (2 + (8 - 1)) * 16
(4, 80), # (2 + (4 - 1)) * 16
('3em', 64), # (2 + (3 - 1)) * 16
('25px', 41), # 2 * 16 + 25 - 1 * 16
# (0, 32), # See Layout.set_tabs
):
page, = parse('''
<style>
pre { tab-size: %s; font-family: ahem }
</style>
<pre>a	a</pre>
''' % value)
paragraph, = body_children(page)
line, = paragraph.children
assert line.width == width
@assert_no_logs
def test_text_transform():
"""Test the text-transform property."""
page, = parse('''
<style>
p { text-transform: capitalize }
p+p { text-transform: uppercase }
p+p+p { text-transform: lowercase }
p+p+p+p { text-transform: full-width }
p+p+p+p+p { text-transform: none }
</style>
<p>hé lO1</p><p>hé lO1</p><p>hé lO1</p><p>hé lO1</p><p>hé lO1</p>
''')
p1, p2, p3, p4, p5 = body_children(page)
line1, = p1.children
text1, = line1.children
assert text1.text == 'Hé Lo1'
line2, = p2.children
text2, = line2.children
assert text2.text == 'HÉ LO1'
line3, = p3.children
text3, = line3.children
assert text3.text == 'hé lo1'
line4, = p4.children
text4, = line4.children
assert text4.text == '\uff48é\u3000\uff4c\uff2f\uff11'
line5, = p5.children
text5, = line5.children
assert text5.text == 'hé lO1'
|
gpl-3.0
| -650,783,121,996,463,400 | 31.103811 | 78 | 0.540211 | false |
bolkedebruin/airflow
|
tests/providers/amazon/aws/operators/test_s3_copy_object.py
|
1
|
4133
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import unittest
import boto3
from moto import mock_s3
from airflow.providers.amazon.aws.operators.s3_copy_object import S3CopyObjectOperator
class TestS3CopyObjectOperator(unittest.TestCase):
def setUp(self):
self.source_bucket = "bucket1"
self.source_key = "path1/data.txt"
self.dest_bucket = "bucket2"
self.dest_key = "path2/data_copy.txt"
@mock_s3
def test_s3_copy_object_arg_combination_1(self):
conn = boto3.client('s3')
conn.create_bucket(Bucket=self.source_bucket)
conn.create_bucket(Bucket=self.dest_bucket)
conn.upload_fileobj(Bucket=self.source_bucket,
Key=self.source_key,
Fileobj=io.BytesIO(b"input"))
# there should be nothing found before S3CopyObjectOperator is executed
self.assertFalse('Contents' in conn.list_objects(Bucket=self.dest_bucket,
Prefix=self.dest_key))
op = S3CopyObjectOperator(task_id="test_task_s3_copy_object",
source_bucket_key=self.source_key,
source_bucket_name=self.source_bucket,
dest_bucket_key=self.dest_key,
dest_bucket_name=self.dest_bucket)
op.execute(None)
objects_in_dest_bucket = conn.list_objects(Bucket=self.dest_bucket,
Prefix=self.dest_key)
# there should be object found, and there should only be one object found
self.assertEqual(len(objects_in_dest_bucket['Contents']), 1)
# the object found should be consistent with dest_key specified earlier
self.assertEqual(objects_in_dest_bucket['Contents'][0]['Key'], self.dest_key)
@mock_s3
def test_s3_copy_object_arg_combination_2(self):
conn = boto3.client('s3')
conn.create_bucket(Bucket=self.source_bucket)
conn.create_bucket(Bucket=self.dest_bucket)
conn.upload_fileobj(Bucket=self.source_bucket,
Key=self.source_key,
Fileobj=io.BytesIO(b"input"))
# there should be nothing found before S3CopyObjectOperator is executed
self.assertFalse('Contents' in conn.list_objects(Bucket=self.dest_bucket,
Prefix=self.dest_key))
source_key_s3_url = "s3://{}/{}".format(self.source_bucket, self.source_key)
dest_key_s3_url = "s3://{}/{}".format(self.dest_bucket, self.dest_key)
op = S3CopyObjectOperator(task_id="test_task_s3_copy_object",
source_bucket_key=source_key_s3_url,
dest_bucket_key=dest_key_s3_url)
op.execute(None)
objects_in_dest_bucket = conn.list_objects(Bucket=self.dest_bucket,
Prefix=self.dest_key)
# there should be object found, and there should only be one object found
self.assertEqual(len(objects_in_dest_bucket['Contents']), 1)
# the object found should be consistent with dest_key specified earlier
self.assertEqual(objects_in_dest_bucket['Contents'][0]['Key'], self.dest_key)
|
apache-2.0
| 2,913,947,881,592,704,000 | 45.438202 | 86 | 0.620615 | false |
consultit/Ely
|
ely/direct/data_structures_and_algorithms/ch12/decorated_merge_sort.py
|
1
|
1681
|
# Copyright 2013, Michael H. Goldwasser
#
# Developed for use with the book:
#
# Data Structures and Algorithms in Python
# Michael T. Goodrich, Roberto Tamassia, and Michael H. Goldwasser
# John Wiley & Sons, 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .merge_array import merge_sort
class _Item:
"""Lightweight composite to store decorated value for sorting."""
__slots__ = '_key', '_value'
def __init__(self, k, v):
self._key = k
self._value = v
def __lt__(self, other):
return self._key < other._key # compare items based on their keys
def decorated_merge_sort(data, key=None):
"""Demonstration of the decorate-sort-undecorate pattern."""
if key is not None:
for j in range(len(data)):
data[j] = _Item(key(data[j]), data[j]) # decorate each element
merge_sort(data) # sort with existing algorithm
if key is not None:
for j in range(len(data)):
data[j] = data[j]._value # undecorate each element
|
lgpl-3.0
| -2,266,771,482,216,979,700 | 38.093023 | 86 | 0.654967 | false |
rplevka/robottelo
|
tests/foreman/sys/test_rename.py
|
1
|
10708
|
"""Test class for ``katello-change-hostname``
:Requirement: katello-change-hostname
:CaseAutomation: Automated
:CaseLevel: System
:CaseComponent: satellite-change-hostname
:Assignee: pondrejk
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import pytest
from fauxfactory import gen_string
from nailgun import entities
from robottelo.config import settings
from robottelo.ssh import get_connection
BCK_MSG = "**** Hostname change complete! ****"
BAD_HN_MSG = (
"{0} is not a valid fully qualified domain name. Please use a valid FQDN and try again."
)
NO_CREDS_MSG = "Username and/or Password options are missing!"
BAD_CREDS_MSG = "Unable to authenticate user admin"
@pytest.mark.run_in_one_thread
@pytest.mark.destructive
class TestRenameHost:
"""Implements ``katello-change-hostname`` tests"""
@pytest.mark.skip_if_open("BZ:1925616")
def test_positive_rename_satellite(self, module_org, module_product):
"""run katello-change-hostname on Satellite server
:id: 9944bfb1-1440-4820-ada8-2e219f09c0be
:setup: Satellite server with synchronized rh and custom
repos and with a registered host
:steps:
1. Rename Satellite using katello-change-hostname
2. Do basic checks for hostname change (hostnamctl)
3. Run some existence tests, as in backup testing
4. Verify certificates were properly recreated, check
for instances of old hostname
in etc/foreman-installer/scenarios.d/
5. Check for updated repo urls, installation media paths,
updated internal capsule
6. Check usability of entities created before rename:
resync repos, republish CVs and re-register hosts
7. Create new entities (run end-to-end test from robottelo)
:BZ: 1469466, 1897360, 1901983, 1925616
:expectedresults: Satellite hostname is successfully updated
and the server functions correctly
:CaseImportance: Critical
:CaseAutomation: Automated
"""
username = settings.server.admin_username
password = settings.server.admin_password
with get_connection() as connection:
old_hostname = connection.run('hostname').stdout[0]
new_hostname = f'new-{old_hostname}'
# create installation medium with hostname in path
medium_path = 'http://{}/testpath-{}/os/'.format(old_hostname, gen_string('alpha'))
medium = entities.Media(organization=[module_org], path_=medium_path).create()
repo = entities.Repository(product=module_product, name='testrepo').create()
result = connection.run(
'satellite-change-hostname {} -y -u {} -p {}'.format(
new_hostname, username, password
),
timeout=1200,
)
assert result.return_code == 0, 'unsuccessful rename'
assert BCK_MSG in result.stdout
# services running after rename?
result = connection.run('hammer ping')
assert result.return_code == 0, 'services did not start properly'
# basic hostname check
result = connection.run('hostname')
assert result.return_code == 0
assert new_hostname in result.stdout, 'hostname left unchanged'
# check default capsule
result = connection.run(
'hammer -u {1} -p {2} --output json capsule \
info --name {0}'.format(
new_hostname, username, password
),
output_format='json',
)
assert result.return_code == 0, 'internal capsule not renamed correctly'
assert result.stdout['url'] == f"https://{new_hostname}:9090"
# check old consumer certs were deleted
result = connection.run(f'rpm -qa | grep ^{old_hostname}')
assert result.return_code == 1, 'old consumer certificates not removed'
# check new consumer certs were created
result = connection.run(f'rpm -qa | grep ^{new_hostname}')
assert result.return_code == 0, 'new consumer certificates not created'
# check if installation media paths were updated
result = connection.run(
'hammer -u {1} -p {2} --output json \
medium info --id {0}'.format(
medium.id, username, password
),
output_format='json',
)
assert result.return_code == 0
assert new_hostname in result.stdout['path'], 'medium path not updated correctly'
# check answer file for instances of old hostname
ans_f = '/etc/foreman-installer/scenarios.d/satellite-answers.yaml'
result = connection.run(f'grep " {old_hostname}" {ans_f}')
assert (
result.return_code == 1
), 'old hostname was not correctly replaced in answers.yml'
# check repository published at path
result = connection.run(
'hammer -u {1} -p {2} --output json \
repository info --id {0}'.format(
repo.id, username, password
),
output_format='json',
)
assert result.return_code == 0
assert (
new_hostname in result.stdout['published-at']
), 'repository published path not updated correctly'
repo.sync()
cv = entities.ContentView(organization=module_org).create()
cv.repository = [repo]
cv.update(['repository'])
cv.publish()
def test_negative_rename_sat_to_invalid_hostname(self):
"""change to invalid hostname on Satellite server
:id: 385fad60-3990-42e0-9436-4ebb71918125
:BZ: 1485884
:expectedresults: script terminates with a message, hostname
is not changed
:CaseAutomation: Automated
"""
username = settings.server.admin_username
password = settings.server.admin_password
with get_connection() as connection:
original_name = connection.run('hostname').stdout[0]
hostname = gen_string('alpha')
result = connection.run(
'satellite-change-hostname -y \
{} -u {} -p {}'.format(
hostname, username, password
),
output_format='plain',
)
assert result.return_code == 1
assert BAD_HN_MSG.format(hostname) in result.stdout
# assert no changes were made
result = connection.run('hostname')
assert original_name == result.stdout[0], "Invalid hostame assigned"
def test_negative_rename_sat_no_credentials(self):
"""change hostname without credentials on Satellite server
:id: ed4f7611-33c9-455f-8557-507cc59ede92
:BZ: 1485884
:expectedresults: script terminates with a message, hostname
is not changed
:CaseAutomation: Automated
"""
with get_connection() as connection:
original_name = connection.run('hostname').stdout[0]
hostname = gen_string('alpha')
result = connection.run(
f'satellite-change-hostname -y {hostname}', output_format='plain'
)
assert result.return_code == 1
assert NO_CREDS_MSG in result.stdout
# assert no changes were made
result = connection.run('hostname')
assert original_name == result.stdout[0], "Invalid hostame assigned"
@pytest.mark.skip_if_open("BZ:1925616")
def test_negative_rename_sat_wrong_passwd(self):
"""change hostname with wrong password on Satellite server
:id: e6d84c5b-4bb1-4400-8022-d01cc9216936
:BZ: 1485884, 1897360, 1925616
:expectedresults: script terminates with a message, hostname
is not changed
:CaseAutomation: Automated
"""
username = settings.server.admin_username
with get_connection() as connection:
original_name = connection.run('hostname').stdout[0]
new_hostname = f'new-{original_name}'
password = gen_string('alpha')
result = connection.run(
'satellite-change-hostname -y \
{} -u {} -p {}'.format(
new_hostname, username, password
),
output_format='plain',
)
assert result.return_code == 1
assert BAD_CREDS_MSG in result.stderr
@pytest.mark.stubbed
def test_positive_rename_capsule(self):
"""run katello-change-hostname on Capsule
:id: 4aa9fd86-bba9-49e4-a67a-8685e1ab5a74
:setup: Capsule server registered to Satellite, with common features
enabled, with synchronized content and a host registered to it
:steps:
1. Rename Satellite using katello-change-hostname
2. Do basic checks for hostname change (hostnamctl)
3. Verify certificates were properly recreated, check
for instances of old hostname
in etc/foreman-installer/scenarios.d/
4. Re-register Capsule to Satellite, resync content
5. Re-register old host, register new one to Satellite,
6. Check hosts can consume content, run basic REX command,
import Puppet environments from hosts
:BZ: 1469466, 1473614
:expectedresults: Capsule hostname is successfully updated
and the capsule fuctions correctly
:CaseAutomation: Automated
"""
# Save original hostname, get credentials, eventually will
# end up in setUpClass
# original_name = settings.server.hostname
username = settings.server.admin_username
password = settings.server.admin_password
# the rename part of the test, not necessary to run from robottelo
with get_connection() as connection:
hostname = gen_string('alpha')
result = connection.run(
'satellite-change-hostname -y -u {} -p {}\
--disable-system-checks\
--scenario capsule {}'.format(
username, password, hostname
),
output_format='plain',
)
assert result.return_code == 0
assert BCK_MSG in result.stdout
|
gpl-3.0
| 1,660,779,668,490,670,600 | 38.080292 | 95 | 0.590493 | false |
kalhartt/dnestpy
|
examples/example_uistring.py
|
1
|
1067
|
#!/usr/bin/python2.7
import sqlalchemy as sqla
import codecs, re
uistring = '/mnt/500G/Games/dragonnest/extract/resource/uistring/uistring.xml'
message_re = re.compile(r'<message mid="(\d+)"><!\[CDATA\[(.+)\]\]></message>', re.UNICODE|re.DOTALL)
def readlines(f, bufsize):
buf = u''
data = True
while data:
data = f.read(bufsize)
buf += data
lines = buf.split('\r\n')
buf = lines.pop()
for line in lines:
yield line
yield buf
messages = []
with codecs.open(uistring, encoding='utf-8', mode='r') as f:
for line in readlines(f, 524288):
match = message_re.match(line)
if match:
messages.append({ 'id' : int(match.group(1)), '_Message' : match.group(2) })
engine = sqla.create_engine('sqlite:///dnt.db', echo=False)
metadata = sqla.MetaData()
table = sqla.Table('UISTRING', metadata,
sqla.Column('id', sqla.Integer, primary_key=True),
sqla.Column('_Message', sqla.Text))
metadata.create_all(engine)
engine.connect().execute(table.insert(), messages)
|
gpl-2.0
| 7,095,719,596,313,183,000 | 31.333333 | 101 | 0.621368 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.