text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python3
"""
URL: http://docs.graphene-python.org/en/latest/types/enums/
"""
import graphene
class Episode(graphene.Enum):
NEWHOPE = 4
EMPIRE = 5
JEDI = 6
@property
def description(self):
if self == Episode.NEWHOPE:
return 'New Hope Episode'
return 'Other episode'
|
garciparedes/python-examples
|
web/django/graphene/graphene-quickstart/lesson-02-enums.py
|
Python
|
mpl-2.0
| 332 | 0 |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Simulate a Lennard-Jones liquid with charges. The P3M method is used to
calculate electrostatic interactions.
"""
import numpy as np
import espressomd
required_features = ["P3M", "WCA"]
espressomd.assert_features(required_features)
from espressomd import electrostatics
import argparse
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("--cpu", action="store_const", dest="mode",
const="cpu", help="P3M on CPU", default="cpu")
group.add_argument("--gpu", action="store_const", dest="mode",
const="gpu", help="P3M on GPU")
args = parser.parse_args()
print("""
=======================================================
= p3m.py =
=======================================================
""")
# System parameters
#############################################################
box_l = 10
density = 0.3
# Interaction parameters (repulsive Lennard-Jones)
#############################################################
wca_eps = 10.0
wca_sig = 1.0
# Integration parameters
#############################################################
system = espressomd.System(box_l=[box_l] * 3)
np.random.seed(seed=42)
system.time_step = 0.01
system.cell_system.skin = 0.4
# warmup integration (steepest descent)
warm_steps = 20
warm_n_times = 30
# convergence criterion (particles are separated by at least 90% sigma)
min_dist = 0.9 * wca_sig
# integration
int_steps = 1000
int_n_times = 10
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.non_bonded_inter[0, 0].wca.set_params(epsilon=wca_eps, sigma=wca_sig)
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].wca.get_params())
# Particle setup
#############################################################
volume = box_l**3
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
print("Simulate {} particles in a cubic box {} at density {}."
.format(n_part, box_l, density).strip())
print("Interactions:\n")
act_min_dist = system.analysis.min_dist()
print("Start with minimal distance {}".format(act_min_dist))
# Assign charges to particles
for i in range(n_part // 2 - 1):
system.part[2 * i].q = -1.0
system.part[2 * i + 1].q = 1.0
# P3M setup after charge assignment
#############################################################
print("\nSCRIPT--->Create p3m\n")
if args.mode == "gpu":
p3m = electrostatics.P3MGPU(prefactor=2.0, accuracy=1e-2)
else:
p3m = electrostatics.P3M(prefactor=1.0, accuracy=1e-2)
print("\nSCRIPT--->Add actor\n")
system.actors.add(p3m)
print("\nSCRIPT--->P3M parameter:\n")
p3m_params = p3m.get_params()
for key in list(p3m_params.keys()):
print("{} = {}".format(key, p3m_params[key]))
print("\nSCRIPT--->Explicit tune call\n")
p3m.tune(accuracy=1e3)
print("\nSCRIPT--->P3M parameter:\n")
p3m_params = p3m.get_params()
for key in list(p3m_params.keys()):
print("{} = {}".format(key, p3m_params[key]))
print(system.actors)
#############################################################
# Warmup Integration #
#############################################################
print("""
Start warmup integration:
At maximum {} times {} steps
Stop if minimal distance is larger than {}
""".strip().format(warm_n_times, warm_steps, min_dist))
# minimize energy using min_dist as the convergence criterion
system.integrator.set_steepest_descent(f_max=0, gamma=1e-3,
max_displacement=wca_sig / 100)
i = 0
while i < warm_n_times and system.analysis.min_dist() < min_dist:
print("minimization: {:+.2e}".format(system.analysis.energy()["total"]))
system.integrator.run(warm_steps)
i += 1
print("minimization: {:+.2e}".format(system.analysis.energy()["total"]))
print()
system.integrator.set_vv()
# activate thermostat
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
# Just to see what else we may get from the C++ core
import pprint
pprint.pprint(system.cell_system.get_state(), width=1)
# pprint.pprint(system.part.__getstate__(), width=1)
pprint.pprint(system.__getstate__())
#############################################################
# Integration #
#############################################################
print("\nStart integration: run {} times {} steps"
.format(int_n_times, int_steps))
for i in range(int_n_times):
print("run {} at time={:.2f}".format(i, system.time))
system.integrator.run(int_steps)
energies = system.analysis.energy()
print(energies['total'])
# terminate program
print("\nFinished.")
|
KaiSzuttor/espresso
|
samples/p3m.py
|
Python
|
gpl-3.0
| 5,654 | 0.000531 |
"""autogenerated by genpy from hrl_lib/Pose3DOF.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class Pose3DOF(genpy.Message):
_md5sum = "646ead44a0e6fecf4e14ca116f12b08b"
_type = "hrl_lib/Pose3DOF"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
float64 x
float64 y
float64 theta
float64 dt
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','x','y','theta','dt']
_slot_types = ['std_msgs/Header','float64','float64','float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,x,y,theta,dt
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Pose3DOF, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
if self.theta is None:
self.theta = 0.
if self.dt is None:
self.dt = 0.
else:
self.header = std_msgs.msg.Header()
self.x = 0.
self.y = 0.
self.theta = 0.
self.dt = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_4d.pack(_x.x, _x.y, _x.theta, _x.dt))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 32
(_x.x, _x.y, _x.theta, _x.dt,) = _struct_4d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_4d.pack(_x.x, _x.y, _x.theta, _x.dt))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 32
(_x.x, _x.y, _x.theta, _x.dt,) = _struct_4d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
_struct_4d = struct.Struct("<4d")
|
HailStorm32/Q.bo_stacks
|
qbo_stereo_anaglyph/hrl_lib/src/hrl_lib/msg/_Pose3DOF.py
|
Python
|
lgpl-2.1
| 5,992 | 0.019526 |
import pytest
from collections import OrderedDict
from insights.parsers import (calc_offset, keyword_search, optlist_to_dict, parse_delimited_table, parse_fixed_table,
split_kv_pairs, unsplit_lines, ParseException, SkipException)
SPLIT_TEST_1 = """
# Comment line
keyword1 = value1 # Inline comments
# Comment indented
keyword3 # Key with no separator
keyword2 = value2a=True, value2b=100M
""".strip()
SPLIT_TEST_1_OD = OrderedDict([
('keyword1', 'value1'),
('keyword3', ''),
('keyword2', 'value2a=True, value2b=100M')
])
SPLIT_TEST_2 = """
@ Comment line
keyword1: value1 @ Inline comments
keyword2 : value2a=True, value2b=100M
@ Comment indented
keyword3 @ Key with no separator
""".strip()
OFFSET_CONTENT_1 = """
data 1 line
data 2 line
""".strip()
OFFSET_CONTENT_2 = """
#
Warning line
Error line
data 1 line
data 2 line
Trailing line
Blank line above
Another trailing line
Yet another trailing line
Yet yet another trailing line
""".strip()
def test_split_kv_pairs():
kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines())
assert len(kv_pairs) == 2
assert kv_pairs == {
'keyword1': 'value1',
'keyword2': 'value2a=True, value2b=100M'
}
kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines(), filter_string='value2')
assert len(kv_pairs) == 1
assert kv_pairs == {
'keyword2': 'value2a=True, value2b=100M'
}
kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines(), use_partition=True)
assert len(kv_pairs) == 3
assert kv_pairs == {
'keyword1': 'value1',
'keyword2': 'value2a=True, value2b=100M',
'keyword3': ''
}
kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines(), use_partition=True, ordered=True)
assert len(kv_pairs) == 3
assert kv_pairs == SPLIT_TEST_1_OD
kv_pairs = split_kv_pairs(SPLIT_TEST_2.splitlines(), comment_char='@', split_on=':')
assert len(kv_pairs) == 2
assert kv_pairs == {
'keyword1': 'value1',
'keyword2': 'value2a=True, value2b=100M'
}
kv_pairs = split_kv_pairs(SPLIT_TEST_2.splitlines(), comment_char='@', split_on=':', filter_string='value2')
assert len(kv_pairs) == 1
assert kv_pairs == {
'keyword2': 'value2a=True, value2b=100M'
}
kv_pairs = split_kv_pairs(SPLIT_TEST_2.splitlines(), comment_char='@', split_on=':', use_partition=True)
assert len(kv_pairs) == 3
assert kv_pairs == {
'keyword1': 'value1',
'keyword2': 'value2a=True, value2b=100M',
'keyword3': ''
}
SPLIT_LINES = """
Line one
Line two part 1 \\
line two part 2\\
line two part 3
Line three
""".strip()
SPLIT_LINES_2 = """
Line one
Line two part 1 ^
line two part 2^
line two part 3
Line three^
""".strip()
SPLIT_LINES_3 = """
web.default_taskmaster_tasks = RHN::Task::SessionCleanup, RHN::Task::ErrataQueue,
RHN::Task::ErrataEngine,
RHN::Task::DailySummary, RHN::Task::SummaryPopulation,
RHN::Task::RHNProc,
RHN::Task::PackageCleanup
db_host ="""
def test_unsplit_lines():
lines = list(unsplit_lines(SPLIT_LINES.splitlines()))
assert len(lines) == 3
assert lines[0] == 'Line one'
assert lines[1] == 'Line two part 1 line two part 2 line two part 3'
assert lines[2] == 'Line three'
lines = list(unsplit_lines(SPLIT_LINES_2.splitlines(), cont_char='^'))
assert len(lines) == 3
assert lines[0] == 'Line one'
assert lines[1] == 'Line two part 1 line two part 2 line two part 3'
assert lines[2] == 'Line three' # test continuation on last line
# Test keeping continuation character on line
lines = list(unsplit_lines(
SPLIT_LINES_3.splitlines(), cont_char=',', keep_cont_char=True
))
assert len(lines) == 4
assert lines[0] == ''
assert lines[1] == 'web.default_taskmaster_tasks = RHN::Task::SessionCleanup, RHN::Task::ErrataQueue, RHN::Task::ErrataEngine, RHN::Task::DailySummary, RHN::Task::SummaryPopulation, RHN::Task::RHNProc, RHN::Task::PackageCleanup'
assert lines[2] == ''
assert lines[3] == 'db_host ='
def test_calc_offset():
assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=[]) == 0
assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=[None]) == 0
assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=['data ']) == 0
with pytest.raises(ValueError):
calc_offset(OFFSET_CONTENT_1.splitlines(), target=['xdata '])
with pytest.raises(ValueError):
calc_offset(OFFSET_CONTENT_1.splitlines(),
target=['data '],
invert_search=True)
assert calc_offset(OFFSET_CONTENT_1.splitlines(),
target=['Trailing', 'Blank', 'Another '],
invert_search=True) == 0
assert calc_offset(OFFSET_CONTENT_2.splitlines(), target=[]) == 0
assert calc_offset(OFFSET_CONTENT_2.splitlines(), target=['data ']) == 3
assert calc_offset(reversed(OFFSET_CONTENT_2.splitlines()),
target=['Trailing', 'Blank', 'Another ', 'Yet'],
invert_search=True) == 6
assert calc_offset(OFFSET_CONTENT_2.splitlines(),
target=['data', '2']) == 3
assert calc_offset(OFFSET_CONTENT_2.splitlines(),
target=['data', '2'],
require_all=True) == 4
assert calc_offset(
reversed(OFFSET_CONTENT_2.splitlines()),
target=['Trailing', 'Blank', 'Another ', 'Yet'],
invert_search=True) == 6
assert calc_offset(
reversed(OFFSET_CONTENT_2.splitlines()),
target=['Trailing', 'Blank', 'Another ', 'Yet'],
invert_search=True,
require_all=True) == 6
FIXED_CONTENT_1 = """
Column1 Column2 Column3
data1 data 2 data 3
data4 data5 data6
data 7 data 9
""".strip()
FIXED_CONTENT_1A = """
WARNING
Column1 Column2 Column3
data1 data 2 data 3
data4 data5 data6
data 7 data 9
""".strip()
FIXED_CONTENT_1B = """
Column1 Column2 Column3
data1 data 2
data4 data5 data6
data 7 data 9
""".strip()
FIXED_CONTENT_2 = """
WARNING WARNING WARNING
Some message
Another message
Column1 Column2 Column3
data1 data 2 data 3
data4 data5 data6
data 7 data 9
""".strip()
FIXED_CONTENT_3 = """
WARNING WARNING WARNING
Some message
Another message
Column1 Column2 Column3
data1 data 2 data 3
data4 data5 data6
data 7 data 9
Trailing non-data line
Another trailing non-data line
""".strip()
FIXED_CONTENT_4 = """
WARNING WARNING WARNING
Some message
Another message
Column1 Column 2 Column 3
data1 data 2 data 3
data4 data5 data6
data 7 data 9
data10
Trailing non-data line
Another trailing non-data line
""".strip()
FIXED_CONTENT_5 = """
Column1 Column 2 Column 3
data1 data 2 data 3
data 7 data 9
data10
""".strip()
FIXED_CONTENT_DUP_HEADER_PREFIXES = """
NAMESPACE NAME LABELS
default foo app=superawesome
""".strip()
def test_parse_fixed_table():
data = parse_fixed_table(FIXED_CONTENT_1.splitlines())
assert len(data) == 3
assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'}
assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}
assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'}
data = parse_fixed_table(FIXED_CONTENT_1A.splitlines(), heading_ignore=['Column1 '])
assert len(data) == 3
assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'}
assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}
assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'}
data = parse_fixed_table(FIXED_CONTENT_1B.splitlines())
assert len(data) == 3
assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': ''}
assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}
assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'}
data = parse_fixed_table(FIXED_CONTENT_2.splitlines(), heading_ignore=['Column1 '])
assert len(data) == 3
assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'}
assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}
assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'}
data = parse_fixed_table(FIXED_CONTENT_3.splitlines(),
heading_ignore=['Column1 '],
trailing_ignore=['Trailing', 'Another'])
assert len(data) == 3
assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'}
assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}
assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'}
data = parse_fixed_table(FIXED_CONTENT_4.splitlines(),
heading_ignore=['Column1 '],
header_substitute=[('Column 2', 'Column_2'), ('Column 3', 'Column_3')],
trailing_ignore=['Trailing', 'Another'])
assert len(data) == 4
assert data[0] == {'Column1': 'data1', 'Column_2': 'data 2', 'Column_3': 'data 3'}
assert data[1] == {'Column1': 'data4', 'Column_2': 'data5', 'Column_3': 'data6'}
assert data[2] == {'Column1': 'data 7', 'Column_2': '', 'Column_3': 'data 9'}
assert data[3] == {'Column1': 'data10', 'Column_2': '', 'Column_3': ''}
# Test that if we search for trailing data that is always found, then we
# should get the whole thing parsed as a table from the header line
data = parse_fixed_table(
['foo' + line for line in FIXED_CONTENT_4.splitlines()],
heading_ignore=['fooColumn1 '],
header_substitute=[('fooColumn1', 'Column1'), ('Column 2', 'Column_2'), ('Column 3', 'Column_3')],
trailing_ignore=['foo']
)
assert len(data) == 6
assert data[4] == {'Column1': 'fooTrailing', 'Column_2': 'non-data li', 'Column_3': 'ne'}
assert data[5] == {'Column1': 'foo Another', 'Column_2': 'trailing no', 'Column_3': 'n-data line'}
data = parse_fixed_table(FIXED_CONTENT_DUP_HEADER_PREFIXES.splitlines())
assert data[0] == {'NAMESPACE': 'default', 'NAME': 'foo', 'LABELS': 'app=superawesome'}
data = parse_fixed_table(FIXED_CONTENT_5.splitlines())
assert len(data) == 3
def test_parse_fixed_table_empty_exception():
with pytest.raises(ParseException) as pe:
parse_fixed_table(FIXED_CONTENT_1B.splitlines(), empty_exception=True)
assert "Incorrect line:" in str(pe.value)
def test_optlist_standard():
d = optlist_to_dict('key1,key2=val2,key1=val1,key3')
assert sorted(d.keys()) == sorted(['key1', 'key2', 'key3'])
assert d['key1'] == 'val1'
assert d['key2'] == 'val2'
assert d['key3'] is True
def test_optlist_no_vals():
d = optlist_to_dict('key1,key2=val2,key1=val1,key3', kv_sep=None)
assert sorted(d.keys()) == sorted(['key1', 'key1=val1', 'key2=val2', 'key3'])
assert d['key1'] is True
assert d['key1=val1'] is True
assert d['key2=val2'] is True
assert d['key3'] is True
def test_optlist_strip_quotes():
d = optlist_to_dict(
'''key1="foo",key2='bar',key3="mismatched quotes',key4="inner'quotes"''',
strip_quotes=True
)
assert sorted(d.keys()) == sorted(['key1', 'key2', 'key3', 'key4'])
assert d['key1'] == 'foo'
assert d['key2'] == 'bar'
assert d['key3'] == '"mismatched quotes\''
assert d['key4'] == "inner'quotes"
def test_optlist_with_spaces():
d = optlist_to_dict(
'''key1=foo, key2=bar'''
)
assert 'key1' in d
assert 'key2' in d
PS_AUX_TEST = """
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
root 1 0.0 0.0 19356 1544 ? Ss May31 0:01 /sbin/init
root 1821 0.0 0.0 0 0 ? S May31 0:25 [kondemand/0]
root 1864 0.0 0.0 18244 668 ? Ss May31 0:05 irqbalance --pid=/var/run/irqbalance.pid
user1 20160 0.0 0.0 108472 1896 pts/3 Ss 10:09 0:00 bash
root 20357 0.0 0.0 9120 760 ? Ss 10:09 0:00 /sbin/dhclient -1 -q -lf /var/lib/dhclient/dhclient-extbr0.leases -pf /var/run/dhclient-extbr0.pid extbr0
qemu 22673 0.8 10.2 1618556 805636 ? Sl 11:38 1:07 /usr/libexec/qemu-kvm -name rhel7 -S -M rhel6.5.0 -enable-kvm -m 1024 -smp 2,sockets=2,cores=1,threads=1 -uuid 13798ffc-bc1e-d437-4f3f-2e0fa6c923ad
"""
MISSING_DATA_TEST = """
WARNING: Locking disabled. Be careful! This could corrupt your metadata.
LVM2_PV_FMT|LVM2_PV_UUID|LVM2_DEV_SIZE|LVM2_PV_NAME|LVM2_PV_MAJOR|LVM2_PV_MINOR|LVM2_PV_MDA_FREE|LVM2_PV_MDA_SIZE|LVM2_PV_EXT_VSN|LVM2_PE_START|LVM2_PV_SIZE|LVM2_PV_FREE|LVM2_PV_USED|LVM2_PV_ATTR|LVM2_PV_ALLOCATABLE|LVM2_PV_EXPORTED|LVM2_PV_MISSING|LVM2_PV_PE_COUNT|LVM2_PV_PE_ALLOC_COUNT|LVM2_PV_TAGS|LVM2_PV_MDA_COUNT|LVM2_PV_MDA_USED_COUNT|LVM2_PV_BA_START|LVM2_PV_BA_SIZE|LVM2_PV_IN_USE|LVM2_PV_DUPLICATE|LVM2_VG_NAME
WARNING: Locking disabled. Be careful! This could corrupt your metadata.
"""
SUBSTITUTE_HEADERS_TEST = """
address,port,state,read-only
0.0.0.0,3000,LISTEN,N
10.76.19.184,37500,ESTAB,Y
""".strip()
POSTGRESQL_LOG = """
schema | table | rows
public | rhnsnapshotpackage | 47428950
public | rhnpackagefile | 32174333
public | rhnpackagecapability | 12934215
public | rhnpackagechangelogrec | 11269933
public | rhnchecksum | 10129746
public | rhnactionconfigrevision | 2894957
public | rhnpackageprovides | 2712442
public | rhnpackagerequires | 2532861
public | rhn_command_target | 1009152
public | rhnconfigfilename | 0
public | rhnxccdfidentsystem | 0
public | rhndistchannelmap | 0
public | rhnactionvirtshutdown | 0
public | rhnpublicchannelfamily | 0
(402 rows)
""".strip() # Normally has a --- separator line, which is ignored using get_active_lines
TABLE1 = """
THIS IS A HEADER
this is some content_with_blank_prefix
This is more content
""".strip()
TABLE2 = [
"SID Nr Instance SAPLOCALHOST Version DIR_EXECUTABLE",
"HA2| 16| D16| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D16/exe",
"HA2| 22| D22| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D22/exe"
]
TABLE3 = """
THIS | IS | A | HEADER
this ^ is ^ some ^ content
This ^ is ^ more ^ content
""".strip()
def test_parse_delimited_table():
# No content? No table.
assert parse_delimited_table([]) == []
# Test maximum splits and header 'ignore', which should actually be
# called 'header_startswith'
tbl = parse_delimited_table(
PS_AUX_TEST.splitlines(), max_splits=10, heading_ignore=['USER']
)
assert tbl
assert isinstance(tbl, list)
assert len(tbl) == 6
assert isinstance(tbl[0], dict)
assert tbl[0] == {
'%MEM': '0.0', 'TTY': '?', 'VSZ': '19356', 'PID': '1', '%CPU': '0.0',
'START': 'May31', 'COMMAND': '/sbin/init', 'USER': 'root',
'STAT': 'Ss', 'TIME': '0:01', 'RSS': '1544'
}
assert tbl[5]['COMMAND'] == \
'/usr/libexec/qemu-kvm -name rhel7 -S -M rhel6.5.0 -enable-kvm -m 1024 -smp 2,sockets=2,cores=1,threads=1 -uuid 13798ffc-bc1e-d437-4f3f-2e0fa6c923ad'
# Test trailing ignore not found
tbl = parse_delimited_table(
MISSING_DATA_TEST.splitlines(), delim='|',
heading_ignore=['LVM2_PV_FMT'],
trailing_ignore=['WARNING', 'ERROR', 'Cannot get lock']
)
assert isinstance(tbl, list)
assert len(tbl) == 0
# Header substitution
tbl = parse_delimited_table(
SUBSTITUTE_HEADERS_TEST.splitlines(), delim=',', strip=False,
header_substitute=[('read-only', 'read_only')]
)
assert tbl
assert isinstance(tbl, list)
assert len(tbl) == 2
assert isinstance(tbl[1], dict)
assert tbl[1] == {
'address': '10.76.19.184', 'port': '37500', 'state': 'ESTAB', 'read_only': 'Y'
}
# Test change of delimiter and trailing_ignore
tbl = parse_delimited_table(POSTGRESQL_LOG.splitlines(), delim='|', trailing_ignore=['('])
assert isinstance(tbl, list)
assert len(tbl) == 14
assert isinstance(tbl[0], dict)
assert tbl[0] == {
'schema': 'public', 'table': 'rhnsnapshotpackage', 'rows': '47428950'
}
# Test using different header delimiter
result = parse_delimited_table(TABLE3.splitlines(), delim="^", header_delim="|")
assert isinstance(result, list)
assert len(result) == 2
assert isinstance(result[0], dict)
expected = [{"THIS": "this", "IS": "is", "A": "some", "HEADER": "content"},
{"THIS": "This", "IS": "is", "A": "more", "HEADER": "content"}]
assert expected == result
# Test explicit None as header delimiter, different from content delimiter
result = parse_delimited_table(TABLE2, delim='|', header_delim=None)
assert isinstance(result, list)
assert len(result) == 2
assert isinstance(result[0], dict)
expected = [{"SID": "HA2", "Nr": "16", "Instance": "D16", "SAPLOCALHOST": "lu0417",
"Version": "749, patch 10, changelist 1698137",
"DIR_EXECUTABLE": "/usr/sap/HA2/D16/exe"},
{"SID": "HA2", "Nr": "22", "Instance": "D22", "SAPLOCALHOST": "lu0417",
"Version": "749, patch 10, changelist 1698137",
"DIR_EXECUTABLE": "/usr/sap/HA2/D22/exe"}]
assert expected == result
# Test raw_line_key
TABLE1_SP = TABLE1.splitlines()
result = parse_delimited_table(TABLE1_SP, raw_line_key='raw_line')
assert isinstance(result, list)
assert len(result) == 2
assert isinstance(result[0], dict)
# Get the RAW line
assert result[0]['raw_line'] == TABLE1_SP[1]
DATA_LIST = [
{'name': 'test 1', 'role': 'server', 'memory_gb': 16, 'ssd': True},
{'name': 'test 2', 'role': 'server', 'memory_gb': 256, 'ssd': False},
{'name': 'test 3', 'role': 'server', 'memory_gb': 16, 'ssd': False},
{'name': 'test 4', 'role': 'embedded', 'memory_gb': 1, 'ssd': False},
{'name': 'test 5', 'role': 'workstation', 'memory_gb': 16, 'ssd': True},
]
CERT_LIST = [
{
'status': 'MONITORING',
'stuck': 'no',
'key pair storage': "type=NSSDB,location='/etc/dirsrv/slapd-LDAP-EXAMPLE-COM',nickname='Server-Cert',token='NSS Certificate DB',pinfile='/etc/dirsrv/slapd-LDAP-EXAMPLE-COM/pwdfile.txt'",
'certificate': {
'type': 'NSSDB',
'location': '/etc/dirsrv/slapd-LDAP-EXAMPLE-COM',
'nickname': 'Server-Cert',
'token': 'NSS Certificate DB',
},
'CA': 'IPA',
'issuer': 'CN=Certificate Authority,O=LDAP.EXAMPLE.COM',
'subject': 'CN=master.LDAP.EXAMPLE.COM,O=LDAP.EXAMPLE.COM',
'expires': '2017-06-28 12:52:12 UTC',
'eku': 'id-kp-serverAuth,id-kp-clientAuth',
'pre-save command': '',
'post-save command': '/usr/lib64/ipa/certmonger/restart_dirsrv LDAP-EXAMPLE-COM',
'track': 'yes',
'auto-renew': 'yes',
}, {
'status': 'MONITORING',
'stuck': 'no',
'key pair storage': "type=NSSDB,location='/etc/dirsrv/slapd-PKI-IPA',nickname='Server-Cert',token='NSS Certificate DB',pinfile='/etc/dirsrv/slapd-PKI-IPA/pwdfile.txt'",
'certificate': {
'type': 'NSSDB',
'location': '/etc/dirsrv/slapd-PKI-IPA',
'nickname': 'Server-Cert',
'token': 'NSS Certificate DB',
},
'CA': 'IPA',
'issuer': 'CN=Certificate Authority,O=EXAMPLE.COM',
'subject': 'CN=ldap.EXAMPLE.COM,O=EXAMPLE.COM',
'expires': '2017-06-28 12:52:13 UTC',
'eku': 'id-kp-serverAuth,id-kp-clientAuth',
'pre-save command': '',
'post-save command': '/usr/lib64/ipa/certmonger/restart_dirsrv PKI-IPA',
'track': 'yes',
'auto-renew': 'yes',
'dash- space': 'tested',
}
]
def test_keyword_search():
# No keywords, no result
assert len(keyword_search(DATA_LIST)) == 0
# Search on absent keywords produces empty list
assert keyword_search(DATA_LIST, cpu_count=4) == []
# Search on present but non-matching keyword produces empty list
assert keyword_search(DATA_LIST, memory_gb=8) == []
# Single result - search on string
results = keyword_search(DATA_LIST, role='embedded')
assert len(results) == 1
assert results[0] == DATA_LIST[3]
# Multiple results, name has underscore - search on integer
results = keyword_search(DATA_LIST, memory_gb=16)
assert len(results) == 3
assert results == [DATA_LIST[i] for i in (0, 2, 4)]
# Search on boolean
results = keyword_search(DATA_LIST, ssd=False)
assert len(results) == 3
assert results == [DATA_LIST[i] for i in (1, 2, 3)]
# No data, no results.
assert len(keyword_search([], role='server')) == 0
# Search with contains
results = keyword_search(DATA_LIST, role__contains='e')
assert len(results) == 4
assert results == [DATA_LIST[i] for i in (0, 1, 2, 3)]
# Search with startswith
results = keyword_search(DATA_LIST, role__startswith='e')
assert len(results) == 1
assert results[0] == DATA_LIST[3]
# Search for multiple keys, with spaces and dashes, and search operators
results = keyword_search(
CERT_LIST,
pre_save_command='',
key_pair_storage__startswith="type=NSSDB,location='/etc/dirsrv/slapd-PKI-IPA'"
)
assert len(results) == 1
assert results[0] == CERT_LIST[1]
# Make sure contains can also apply to keys with dashes and spaces
results = keyword_search(
CERT_LIST,
post_save_command__contains='PKI-IPA',
)
assert len(results) == 1
assert results[0] == CERT_LIST[1]
# Lower case value matching
results = keyword_search(
CERT_LIST,
status__lower_value='Monitoring',
)
assert len(results) == 2
assert results == CERT_LIST
# Check that searches for keys with two underscores that aren't matcher
# suffixes still work
results = keyword_search(
CERT_LIST,
dash__space='tested',
)
assert len(results) == 1
assert results[0] == CERT_LIST[1]
# Check that we can use contains to check the contents of a dictionary
# in a value
results = keyword_search(
CERT_LIST,
certificate__contains='type'
)
assert len(results) == 2
assert results == CERT_LIST
assert keyword_search(
CERT_LIST,
certificate__contains='encryption'
) == []
PS_LIST = [
{'PID': '692', 'PPID': '2', 'COMMAND': 'kdmflush', '_line': ' 692 2 kdmflush'},
{'PID': '701', 'PPID': '2', 'COMMAND': 'kdmflush', '_line': ' 701 2 kdmflush'},
{'PID': '725', 'PPID': '2', 'COMMAND': 'xfsalloc', '_line': ' 725 2 xfsalloc'},
{'PID': '726', 'PPID': '2', 'COMMAND': None, '_line': ' 726 2 grep -F xx'},
]
def test_keyword_search_None():
# Normal search
assert keyword_search(PS_LIST, COMMAND__default=None)[0]['PID'] == '726'
assert keyword_search(PS_LIST, _line__contains='alloc')[0]['PID'] == '725'
assert keyword_search(PS_LIST, COMMAND__startswith='xfs')[0]['PID'] == '725'
assert len(keyword_search(PS_LIST, COMMAND__lower_value='KDMFLUSH')) == 2
# Check that searches for non-existing keys
assert keyword_search(PS_LIST, NONE__default=None) == []
assert keyword_search(PS_LIST, NONE__startswith='xfs') == []
def test_parse_exception():
with pytest.raises(ParseException) as e_info:
raise ParseException('This is a parse exception')
assert 'This is a parse exception' == str(e_info.value)
def test_skip_exception():
with pytest.raises(SkipException) as e_info:
raise SkipException('This is a skip exception')
assert 'This is a skip exception' == str(e_info.value)
|
RedHatInsights/insights-core
|
insights/parsers/tests/test_parsers_module.py
|
Python
|
apache-2.0
| 24,552 | 0.002403 |
"""Screen database."""
import redis_client
import control
import re
from twisted.internet import defer
class ScreenDB(object):
"""A screen database."""
def __init__(self):
"""Default constructor."""
pass
def set_mode(self, screen, mode):
redis_client.connection.set('screen:{0}:mode'.format(screen),
mode)
redis_client.connection.publish('screen:update', 'update')
def set_override(self, screen, override):
if override is not None:
redis_client.connection.set('screen:{0}:override'.format(screen),
override)
else:
redis_client.connection.delete('screen:{0}:override'.format(screen))
redis_client.connection.publish('screen:update', 'update')
@defer.inlineCallbacks
def list(self):
screens = yield redis_client.connection.keys('screen:*:mode')
entries = {}
for screen in screens:
screenID = screen.split(':')[1]
mode = yield redis_client.connection.get('screen:{0}:mode'.format(screenID))
host = yield redis_client.connection.get('screen:{0}:host'.format(screenID))
entries[screenID] = {'mode': mode,
'host': host}
defer.returnValue(entries)
screens = ScreenDB()
@control.handler('screen-list')
@defer.inlineCallbacks
def perform_screen_list(responder, options):
screen_list = yield screens.list()
for screen, settings in screen_list.iteritems():
if settings['host'] is None:
online_string = 'offline'
else:
online_string = 'online from {0} port {1}'.format(*settings['host'].split(' '))
responder('{0} - {1} ({2})'.format(screen,
settings['mode'],
online_string))
@control.handler('screen-set-mode')
def perform_screen_set_mode(responder, options):
screens.set_mode(options['<id>'], options['<mode>'])
responder('Mode set.')
@control.handler('screen-override')
def perform_screen_override(responder, options):
screens.set_override(options['<id>'], options['<message>'])
responder('Override set.')
@control.handler('screen-clear-override')
def perform_screen_clear_override(responder, options):
screens.set_override(options['<id>'], None)
responder('Override cleared.')
def got_screen(name):
control.broadcast('Screen connected: {0}'.format(name))
redis_client.add_subscribe('screen:connect', got_screen)
|
prophile/compd
|
src/screen_db.py
|
Python
|
mit
| 2,580 | 0.005039 |
from django.core.urlresolvers import reverse, reverse_lazy
from django.shortcuts import render, redirect
from django.views import generic
from common.models import Discipline, Performance
# renders index / home page
def index(request):
return redirect(reverse('tournaments.main'))
# renders todo page
def process(request):
return render(request, 'gymnastics/process.html', None)
def performances_index(request):
context = { 'performances': Performance.objects.all() \
.select_related('athlete').select_related('discipline') }
return render(request, 'gymnastics/performances/index.html', context)
class PerformanceCreateView(generic.CreateView):
model = Performance
fields = ['athlete', 'discipline', 'value', 'value_final']
template_name = 'gymnastics/performances/new.html'
success_url = reverse_lazy('performances.index')
class PerformanceDetailView(generic.DetailView):
model = Performance
template_name = 'gymnastics/performances/detail.html'
class PerformanceUpdateView(generic.UpdateView):
model = Performance
fields = ['athlete', 'discipline', 'value', 'value_final']
template_name = 'gymnastics/performances/edit.html'
def get_success_url(self):
some_kwargs = self.kwargs
return reverse('performances.detail', kwargs = { 'pk' : self.kwargs['pk'] })
class PerformanceDeleteView(generic.DeleteView):
model = Performance
template_name = 'gymnastics/performances/delete.html'
success_url = reverse_lazy('performances.index')
def disciplines_index(request):
context = { 'disciplines': Discipline.objects.all() }
return render(request, 'gymnastics/disciplines/index.html', context)
def discipline_detail(request, id, slug):
discipline = Discipline.objects.get(id=id)
streams = discipline.stream_set.all()
performances = discipline.performance_set.all().select_related('athlete')
context = {
'discipline': discipline,
'streams': streams,
'performances': performances
}
return render(request, 'gymnastics/disciplines/detail.html', context)
class DisciplineCreateView(generic.CreateView):
model = Discipline
fields = ['name']
template_name = 'gymnastics/disciplines/new.html'
class DisciplineUpdateView(generic.UpdateView):
model = Discipline
fields = ['name']
template_name = 'gymnastics/disciplines/edit.html'
class DisciplineDeleteView(generic.DeleteView):
model = Discipline
template_name = 'gymnastics/disciplines/delete.html'
success_url = reverse_lazy('disciplines.index')
|
saechtner/turn-events
|
Turnauswertung-py3/common/views.py
|
Python
|
mit
| 2,597 | 0.004621 |
"""Import an Irish NaPTAN XML file, obtainable from
https://data.dublinked.ie/dataset/national-public-transport-nodes/resource/6d997756-4dba-40d8-8526-7385735dc345
"""
import warnings
import zipfile
import xml.etree.cElementTree as ET
from django.contrib.gis.geos import Point
from django.core.management.base import BaseCommand
from ...models import Locality, AdminArea, StopPoint
class Command(BaseCommand):
ns = {'naptan': 'http://www.naptan.org.uk/'}
@staticmethod
def add_arguments(parser):
parser.add_argument('filenames', nargs='+', type=str)
def handle_stop(self, element):
stop = StopPoint(
atco_code=element.find('naptan:AtcoCode', self.ns).text,
locality_centre=element.find('naptan:Place/naptan:LocalityCentre', self.ns).text == 'true',
active=element.get('Status') == 'active',
)
for subelement in element.find('naptan:Descriptor', self.ns):
tag = subelement.tag[27:]
if tag == 'CommonName':
stop.common_name = subelement.text
elif tag == 'Street':
stop.street = subelement.text
elif tag == 'Indicator':
stop.indicator = subelement.text.lower()
else:
warnings.warn('Stop {} has an unexpected property: {}'.format(stop.atco_code, tag))
stop_classification_element = element.find('naptan:StopClassification', self.ns)
stop_type = stop_classification_element.find('naptan:StopType', self.ns).text
if stop_type != 'class_undefined':
stop.stop_type = stop_type
bus_element = stop_classification_element.find('naptan:OnStreet/naptan:Bus', self.ns)
if bus_element is not None:
stop.bus_stop_type = bus_element.find('naptan:BusStopType', self.ns).text
stop.timing_status = bus_element.find('naptan:TimingStatus', self.ns).text
compass_point_element = bus_element.find(
'naptan:MarkedPoint/naptan:Bearing/naptan:CompassPoint', self.ns
)
if compass_point_element is not None:
stop.bearing = compass_point_element.text
if stop.bus_stop_type == 'type_undefined':
stop.bus_stop_type = ''
place_element = element.find('naptan:Place', self.ns)
location_element = place_element.find('naptan:Location', self.ns)
longitude_element = location_element.find('naptan:Longitude', self.ns)
latitude_element = location_element.find('naptan:Latitude', self.ns)
if longitude_element is None:
warnings.warn('Stop {} has no location'.format(stop.atco_code))
else:
stop.latlong = Point(float(longitude_element.text), float(latitude_element.text))
admin_area_id = element.find('naptan:AdministrativeAreaRef', self.ns).text
if not AdminArea.objects.filter(atco_code=admin_area_id).exists():
AdminArea.objects.create(id=admin_area_id, atco_code=admin_area_id, region_id='NI')
stop.admin_area_id = admin_area_id
locality_element = place_element.find('naptan:NptgLocalityRef', self.ns)
if locality_element is not None:
if not Locality.objects.filter(id=locality_element.text).exists():
Locality.objects.create(id=locality_element.text, admin_area_id=admin_area_id)
stop.locality_id = locality_element.text
stop.save()
def handle_file(self, archive, filename):
with archive.open(filename) as open_file:
iterator = ET.iterparse(open_file)
for _, element in iterator:
tag = element.tag[27:]
if tag == 'StopPoint':
self.handle_stop(element)
element.clear()
def handle(self, *args, **options):
for filename in options['filenames']:
with zipfile.ZipFile(filename) as archive:
for filename in archive.namelist():
self.handle_file(archive, filename)
|
stev-0/bustimes.org.uk
|
busstops/management/commands/import_ie_naptan_xml.py
|
Python
|
mpl-2.0
| 4,087 | 0.003181 |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core.db import db
from indico.modules.events.contributions.models.fields import ContributionFieldValueBase
from indico.util.string import format_repr, text_to_repr
class AbstractFieldValue(ContributionFieldValueBase):
"""Store a field values related to abstracts."""
__tablename__ = 'abstract_field_values'
__table_args__ = {'schema': 'event_abstracts'}
contribution_field_backref_name = 'abstract_values'
abstract_id = db.Column(
db.Integer,
db.ForeignKey('event_abstracts.abstracts.id'),
index=True,
nullable=False,
primary_key=True
)
# relationship backrefs:
# - abstract (Abstract.field_values)
def __repr__(self):
text = text_to_repr(self.data) if isinstance(self.data, str) else self.data
return format_repr(self, 'abstract_id', 'contribution_field_id', _text=text)
|
pferreir/indico
|
indico/modules/events/abstracts/models/fields.py
|
Python
|
mit
| 1,096 | 0.002737 |
"""
Infobip Client API Libraries OpenAPI Specification
OpenAPI specification containing public endpoints supported in client API libraries. # noqa: E501
The version of the OpenAPI document: 1.0.172
Contact: support@infobip.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from infobip_api_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class SmsDestination(ModelNormal):
"""
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"to": (str,), # noqa: E501
"message_id": (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"to": "to", # noqa: E501
"message_id": "messageId", # noqa: E501
}
_composed_schemas = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, to, *args, **kwargs): # noqa: E501
"""SmsDestination - a model defined in OpenAPI
Args:
to (str): Message destination address. Addresses must be in international format (Example: `41793026727`).
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
message_id (str): The ID that uniquely identifies the message sent.. [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.to = to
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
infobip/infobip-api-python-client
|
infobip_api_client/model/sms_destination.py
|
Python
|
apache-2.0
| 6,770 | 0.000295 |
import pytest
sa = pytest.importorskip("sqlalchemy")
import os
import responses
import flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import event
from flask_caching import Cache
from flask_login import LoginManager, UserMixin, current_user, login_user, logout_user
from flask_dance.consumer import OAuth2ConsumerBlueprint, oauth_authorized, oauth_error
from flask_dance.consumer.storage.sqla import OAuthConsumerMixin, SQLAlchemyStorage
try:
import blinker
except ImportError:
blinker = None
requires_blinker = pytest.mark.skipif(not blinker, reason="requires blinker")
pytestmark = [pytest.mark.usefixtures("responses")]
@pytest.fixture
def blueprint():
"Make a OAuth2 blueprint for a fictional OAuth provider"
bp = OAuth2ConsumerBlueprint(
"test-service",
__name__,
client_id="client_id",
client_secret="client_secret",
state="random-string",
base_url="https://example.com",
authorization_url="https://example.com/oauth/authorize",
token_url="https://example.com/oauth/access_token",
redirect_url="/oauth_done",
)
responses.add(
responses.POST,
"https://example.com/oauth/access_token",
body='{"access_token":"foobar","token_type":"bearer","scope":""}',
)
return bp
@pytest.fixture
def db():
"Make a Flask-SQLAlchemy instance"
return SQLAlchemy()
@pytest.fixture
def app(blueprint, db, request):
"Make a Flask app, attach Flask-SQLAlchemy, and establish an app context"
app = flask.Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = os.environ.get("DATABASE_URI", "sqlite://")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["CACHE_TYPE"] = "simple"
app.secret_key = "secret"
app.register_blueprint(blueprint, url_prefix="/login")
db.init_app(app)
# establish app context
ctx = app.app_context()
ctx.push()
request.addfinalizer(ctx.pop)
return app
class record_queries:
"""
A context manager for recording the SQLAlchemy queries that were executed
in a given context block.
"""
def __init__(self, target, identifier="before_cursor_execute"):
self.target = target
self.identifier = identifier
def record_query(self, conn, cursor, statement, parameters, context, executemany):
self.queries.append(statement)
def __enter__(self):
self.queries = []
event.listen(self.target, self.identifier, self.record_query)
return self.queries
def __exit__(self, exc_type, exc_value, traceback):
event.remove(self.target, self.identifier, self.record_query)
def test_sqla_storage_without_user(app, db, blueprint, request):
class OAuth(OAuthConsumerMixin, db.Model):
pass
blueprint.storage = SQLAlchemyStorage(OAuth, db.session)
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
with record_queries(db.engine) as queries:
with app.test_client() as client:
# reset the session before the request
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
# make the request
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
base_url="https://a.b.c",
)
# check that we redirected the client
assert resp.status_code == 302
assert resp.headers["Location"] == "https://a.b.c/oauth_done"
assert len(queries) == 2
# check the database
authorizations = OAuth.query.all()
assert len(authorizations) == 1
oauth = authorizations[0]
assert oauth.provider == "test-service"
assert isinstance(oauth.token, dict)
assert oauth.token == {
"access_token": "foobar",
"token_type": "bearer",
"scope": [""],
}
def test_sqla_model_repr(app, db, request):
class MyAwesomeOAuth(OAuthConsumerMixin, db.Model):
pass
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
o = MyAwesomeOAuth()
assert "MyAwesomeOAuth" in repr(o)
o.provider = "supercool"
assert 'provider="supercool"' in repr(o)
o.token = {"access_token": "secret"}
assert "secret" not in repr(o)
db.session.add(o)
db.session.commit()
assert "id=" in repr(o)
assert "secret" not in repr(o)
def test_sqla_storage(app, db, blueprint, request):
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
class OAuth(OAuthConsumerMixin, db.Model):
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User)
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
# for now, we'll assume that Alice is the only user
alice = User(name="Alice")
db.session.add(alice)
db.session.commit()
# load alice's ID -- this issues a database query
alice.id
blueprint.storage = SQLAlchemyStorage(OAuth, db.session, user=alice)
with record_queries(db.engine) as queries:
with app.test_client() as client:
# reset the session before the request
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
# make the request
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
base_url="https://a.b.c",
)
# check that we redirected the client
assert resp.status_code == 302
assert resp.headers["Location"] == "https://a.b.c/oauth_done"
assert len(queries) == 3
# check the database
alice = User.query.first()
authorizations = OAuth.query.all()
assert len(authorizations) == 1
oauth = authorizations[0]
assert oauth.user_id == alice.id
assert oauth.provider == "test-service"
assert isinstance(oauth.token, dict)
assert oauth.token == {
"access_token": "foobar",
"token_type": "bearer",
"scope": [""],
}
def test_sqla_load_token_for_user(app, db, blueprint, request):
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
class OAuth(OAuthConsumerMixin, db.Model):
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User)
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
# set token storage
blueprint.storage = SQLAlchemyStorage(OAuth, db.session)
# make users and OAuth tokens for several people
alice = User(name="Alice")
alice_token = {"access_token": "alice123", "token_type": "bearer"}
alice_oauth = OAuth(user=alice, token=alice_token, provider="test-service")
bob = User(name="Bob")
bob_token = {"access_token": "bob456", "token_type": "bearer"}
bob_oauth = OAuth(user=bob, token=bob_token, provider="test-service")
sue = User(name="Sue")
sue_token = {"access_token": "sue789", "token_type": "bearer"}
sue_oauth = OAuth(user=sue, token=sue_token, provider="test-service")
db.session.add_all([alice, bob, sue, alice_oauth, bob_oauth, sue_oauth])
db.session.commit()
# by default, we should not have a token for anyone
sess = blueprint.session
assert not sess.token
assert not blueprint.token
# load token for various users
blueprint.config["user"] = alice
assert sess.token == alice_token
assert blueprint.token == alice_token
blueprint.config["user"] = bob
assert sess.token == bob_token
assert blueprint.token == bob_token
blueprint.config["user"] = alice
assert sess.token == alice_token
assert blueprint.token == alice_token
blueprint.config["user"] = sue
assert sess.token == sue_token
assert blueprint.token == sue_token
# load for user ID as well
del blueprint.config["user"]
blueprint.config["user_id"] = bob.id
assert sess.token == bob_token
assert blueprint.token == bob_token
# try deleting user tokens
del blueprint.token
assert sess.token == None
assert blueprint.token == None
# shouldn't affect alice's token
blueprint.config["user_id"] = alice.id
assert sess.token == alice_token
assert blueprint.token == alice_token
def test_sqla_flask_login(app, db, blueprint, request):
login_manager = LoginManager(app)
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
class OAuth(OAuthConsumerMixin, db.Model):
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User)
blueprint.storage = SQLAlchemyStorage(OAuth, db.session, user=current_user)
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
# create some users
u1 = User(name="Alice")
u2 = User(name="Bob")
u3 = User(name="Chuck")
db.session.add_all([u1, u2, u3])
db.session.commit()
# configure login manager
@login_manager.user_loader
def load_user(userid):
return User.query.get(userid)
with record_queries(db.engine) as queries:
with app.test_client() as client:
# reset the session before the request
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
# set alice as the logged in user
sess["_user_id"] = u1.id
# make the request
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
base_url="https://a.b.c",
)
# check that we redirected the client
assert resp.status_code == 302
assert resp.headers["Location"] == "https://a.b.c/oauth_done"
assert len(queries) == 4
# lets do it again, with Bob as the logged in user -- he gets a different token
responses.reset()
responses.add(
responses.POST,
"https://example.com/oauth/access_token",
body='{"access_token":"abcdef","token_type":"bearer","scope":"bob"}',
)
with record_queries(db.engine) as queries:
with app.test_client() as client:
# reset the session before the request
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
# set bob as the logged in user
sess["_user_id"] = u2.id
# make the request
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
base_url="https://a.b.c",
)
# check that we redirected the client
assert resp.status_code == 302
assert resp.headers["Location"] == "https://a.b.c/oauth_done"
assert len(queries) == 4
# check the database
authorizations = OAuth.query.all()
assert len(authorizations) == 2
u1_oauth = OAuth.query.filter_by(user=u1).one()
assert u1_oauth.provider == "test-service"
assert u1_oauth.token == {
"access_token": "foobar",
"token_type": "bearer",
"scope": [""],
}
u2_oauth = OAuth.query.filter_by(user=u2).one()
assert u2_oauth.provider == "test-service"
assert u2_oauth.token == {
"access_token": "abcdef",
"token_type": "bearer",
"scope": ["bob"],
}
u3_oauth = OAuth.query.filter_by(user=u3).all()
assert len(u3_oauth) == 0
@requires_blinker
def test_sqla_flask_login_misconfigured(app, db, blueprint, request):
login_manager = LoginManager(app)
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
class OAuth(OAuthConsumerMixin, db.Model):
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User)
blueprint.storage = SQLAlchemyStorage(OAuth, db.session, user=current_user)
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
# configure login manager
@login_manager.user_loader
def load_user(userid):
return User.query.get(userid)
calls = []
def callback(*args, **kwargs):
calls.append((args, kwargs))
oauth_error.connect(callback)
request.addfinalizer(lambda: oauth_error.disconnect(callback))
with app.test_client() as client:
# reset the session before the request
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
# make the request
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
base_url="https://a.b.c",
)
# check that we redirected the client
assert resp.status_code == 302
assert resp.headers["Location"] == "https://a.b.c/oauth_done"
assert len(calls) == 1
assert calls[0][0] == (blueprint,)
error = calls[0][1]["error"]
assert isinstance(error, ValueError)
assert str(error) == "Cannot set OAuth token without an associated user"
@requires_blinker
def test_sqla_flask_login_anon_to_authed(app, db, blueprint, request):
login_manager = LoginManager(app)
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
class OAuth(OAuthConsumerMixin, db.Model):
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User)
blueprint.storage = SQLAlchemyStorage(OAuth, db.session, user=current_user)
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
# configure login manager
@login_manager.user_loader
def load_user(userid):
return User.query.get(userid)
# create a user object when OAuth succeeds
def logged_in(sender, token):
assert token
assert blueprint == sender
resp = sender.session.get("/user")
user = User(name=resp.json()["name"])
login_user(user)
db.session.add(user)
db.session.commit()
flask.flash("Signed in successfully")
oauth_authorized.connect(logged_in, blueprint)
request.addfinalizer(lambda: oauth_authorized.disconnect(logged_in, blueprint))
# mock out the `/user` API call
responses.add(
responses.GET, "https://example.com/user", body='{"name":"josephine"}'
)
with record_queries(db.engine) as queries:
with app.test_client() as client:
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
# make the request
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
base_url="https://a.b.c",
)
# check that we redirected the client
assert resp.status_code == 302
assert resp.headers["Location"] == "https://a.b.c/oauth_done"
assert len(queries) == 5
# check the database
users = User.query.all()
assert len(users) == 1
user = users[0]
assert user.name == "josephine"
authorizations = OAuth.query.all()
assert len(authorizations) == 1
oauth = authorizations[0]
assert oauth.provider == "test-service"
assert oauth.token == {
"access_token": "foobar",
"token_type": "bearer",
"scope": [""],
}
assert oauth.user_id == user.id
def test_sqla_flask_login_preload_logged_in_user(app, db, blueprint, request):
# need a URL to hit, so that tokens will be loaded, but result is irrelevant
responses.add(responses.GET, "https://example.com/noop")
login_manager = LoginManager(app)
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
class OAuth(OAuthConsumerMixin, db.Model):
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User)
blueprint.storage = SQLAlchemyStorage(OAuth, db.session, user=current_user)
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
# create some users, and tokens for some of them
alice = User(name="Alice")
alice_token = {"access_token": "alice123", "token_type": "bearer"}
alice_oauth = OAuth(user=alice, token=alice_token, provider="test-service")
bob = User(name="Bob")
bob_token = {"access_token": "bob456", "token_type": "bearer"}
bob_oauth = OAuth(user=bob, token=bob_token, provider="test-service")
chuck = User(name="Chuck")
# chuck doesn't get a token
db.session.add_all([alice, alice_oauth, bob, bob_oauth, chuck])
db.session.commit()
# configure login manager
@login_manager.user_loader
def load_user(userid):
return User.query.get(userid)
# create a simple view
@app.route("/")
def index():
return "success"
with app.test_request_context("/"):
login_user(alice)
# hit /noop to load tokens
blueprint.session.get("/noop")
# now the flask-dance session should have Alice's token loaded
assert blueprint.session.token == alice_token
with app.test_request_context("/"):
# set bob as the logged in user
login_user(bob)
# hit /noop to load tokens
blueprint.session.get("/noop")
# now the flask-dance session should have Bob's token loaded
assert blueprint.session.token == bob_token
with app.test_request_context("/"):
# now let's try chuck
login_user(chuck)
blueprint.session.get("/noop")
assert blueprint.session.token == None
with app.test_request_context("/"):
# no one is logged in -- this is an anonymous user
logout_user()
with pytest.raises(ValueError):
blueprint.session.get("/noop")
def test_sqla_flask_login_no_user_required(app, db, blueprint, request):
# need a URL to hit, so that tokens will be loaded, but result is irrelevant
responses.add(responses.GET, "https://example.com/noop")
login_manager = LoginManager(app)
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
class OAuth(OAuthConsumerMixin, db.Model):
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User)
blueprint.storage = SQLAlchemyStorage(
OAuth, db.session, user=current_user, user_required=False
)
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
# configure login manager
@login_manager.user_loader
def load_user(userid):
return User.query.get(userid)
# create a simple view
@app.route("/")
def index():
return "success"
with app.test_request_context("/"):
# no one is logged in -- this is an anonymous user
logout_user()
# this should *not* raise an error
blueprint.session.get("/noop")
assert blueprint.session.token == None
def test_sqla_delete_token(app, db, blueprint, request):
class OAuth(OAuthConsumerMixin, db.Model):
pass
blueprint.storage = SQLAlchemyStorage(OAuth, db.session)
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
# Create an existing OAuth token for the service
existing = OAuth(
provider="test-service",
token={"access_token": "something", "token_type": "bearer", "scope": ["blah"]},
)
db.session.add(existing)
db.session.commit()
assert len(OAuth.query.all()) == 1
assert blueprint.token == {
"access_token": "something",
"token_type": "bearer",
"scope": ["blah"],
}
del blueprint.token
assert blueprint.token == None
assert len(OAuth.query.all()) == 0
def test_sqla_overwrite_token(app, db, blueprint, request):
class OAuth(OAuthConsumerMixin, db.Model):
pass
blueprint.storage = SQLAlchemyStorage(OAuth, db.session)
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
# Create an existing OAuth token for the service
existing = OAuth(
provider="test-service",
token={"access_token": "something", "token_type": "bearer", "scope": ["blah"]},
)
db.session.add(existing)
db.session.commit()
assert len(OAuth.query.all()) == 1
with record_queries(db.engine) as queries:
with app.test_client() as client:
# reset the session before the request
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
# make the request
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
base_url="https://a.b.c",
)
# check that we redirected the client
assert resp.status_code == 302
assert resp.headers["Location"] == "https://a.b.c/oauth_done"
assert len(queries) == 2
# check that the database record was overwritten
authorizations = OAuth.query.all()
assert len(authorizations) == 1
oauth = authorizations[0]
assert oauth.provider == "test-service"
assert isinstance(oauth.token, dict)
assert oauth.token == {
"access_token": "foobar",
"token_type": "bearer",
"scope": [""],
}
def test_sqla_cache(app, db, blueprint, request):
cache = Cache(app)
class OAuth(OAuthConsumerMixin, db.Model):
pass
blueprint.storage = SQLAlchemyStorage(OAuth, db.session, cache=cache)
db.create_all()
def done():
db.session.remove()
db.drop_all()
request.addfinalizer(done)
with record_queries(db.engine) as queries:
with app.test_client() as client:
# reset the session before the request
with client.session_transaction() as sess:
sess["test-service_oauth_state"] = "random-string"
# make the request
resp = client.get(
"/login/test-service/authorized?code=secret-code&state=random-string",
base_url="https://a.b.c",
)
# check that we redirected the client
assert resp.status_code == 302
assert resp.headers["Location"] == "https://a.b.c/oauth_done"
assert len(queries) == 2
expected_token = {"access_token": "foobar", "token_type": "bearer", "scope": [""]}
# check the database
authorizations = OAuth.query.all()
assert len(authorizations) == 1
oauth = authorizations[0]
assert oauth.provider == "test-service"
assert isinstance(oauth.token, dict)
assert oauth.token == expected_token
# cache should be invalidated
assert cache.get("flask_dance_token|test-service|None") is None
# first reference to the token should generate SQL queries
with record_queries(db.engine) as queries:
assert blueprint.token == expected_token
assert len(queries) == 1
# should now be in the cache
assert cache.get("flask_dance_token|test-service|None") == expected_token
# subsequent references should not generate SQL queries
with record_queries(db.engine) as queries:
assert blueprint.token == expected_token
assert len(queries) == 0
|
singingwolfboy/flask-dance
|
tests/consumer/storage/test_sqla.py
|
Python
|
mit
| 24,167 | 0.001407 |
#!/usr/bin/python2
#
# Copyright 2012 Abid Hasan Mujtaba
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Author: Abid H. Mujtaba
# Email: abid.naqvi83@gmail.com
#
# Start Date: Aug. 9, 2012
# Last Revised: sep. 24, 2012
#
#
# This script is intended as a program that reads a configuration file and uses the information stored there-in to connect to a variety of IMAP servers and display header information about the emails in various folders (INBOX by default). It also has the capability of deleting selected emails. The advantage is that minimal information needs to be downloaded (i.e. only certain header fields) without needing to download the entire email and one can choose to delete unnecessary emails judging by the sender and/or subject only.
# Enable Python 3.x style print function:
from __future__ import print_function
import re
# Create global variables that implement global settings which are used by the following functions.
maxThreads = 5 # This value will be over-written by the global default and possibly a command-line argument
colorTitle = None
colorFlag = None
colorFrom = None
colorDate = None
colorSubjectSeen = None
colorSubjectUnseen = None
showFlags = None
def setOptions( configFile, configSpecFile ) :
'''
This function reads in the options from the configuration file and validates them using the configuration specification file passed to it. It creates a dictionary of options for each account which are used by the pollAccount() function to carry out its tasks. Additionally this function reads the 'global' section in the configuration file and creates and the globalSettings dictionary that contains the global settings for the program.
'''
from configobj import ConfigObj, ConfigObjError, flatten_errors
from validate import Validator
# Note the following code segment concerned with using ConfigObj and validating the entries has been inspired and in part copied from http://www.voidspace.org.uk/python/articles/configobj.shtml (an excellent tutorial on using ConfigObj by its author(s))
try:
config = ConfigObj( configFile, configspec = configSpecFile, file_error = True )
except (ConfigObjError, IOError), e:
print( 'Could not read "%s": %s' % (configFile, e) )
validator = Validator()
results = config.validate( validator )
if results != True : # Validation failed. Inform user of offending entries.
for (section_list, key, _) in flatten_errors( config, results ) :
if key is not None :
print( 'The "%s" key in the section "%s" failed validation' % (key, ','.join( section_list ) ) )
else :
print( 'The following section was missing: %s' % ','.join( section_list ) )
import sys
sys.exit(1)
# Validation successful so we move on to creating the 'servers' dictionary. We are implementing a default account paradigm which is not natively supported by ConfigObj. We want the ConfigParser ability where any option not provided in a subsection but contained in the 'DEFAULT' subsection are copied in to it. To achieve this we will need to know which entries are missing in each subsection without having them filled in using the default values from the config.spec file. To that end we read in the config file without reading the spec file (hence no spec defaults are read in).
configNoSpec = ConfigObj( configFile ) # Note since config passed validation we automatically know that configNoSpec is also valid.
# The first step is to copy out the default account section dictionary and use it as the basic dictionary for all accounts. We will over-write the options that are provided in each account sub-section as we read them.
listDefaultOptions = configNoSpec[ 'accounts' ][ 'DEFAULT' ].keys() # List of Default options as EXPLICITLY provided in the configuration file (hence the use of configNoSpec as compared to just config)
listAccounts = [ x for x in config[ 'accounts' ].keys() if x != 'DEFAULT' ] # List of Accounts that does NOT contain 'DEFAULT'. We are basically carrying out list subtraction here: completely removing certain elements from the list by using list comprehension along with a predicate
# Note: Everywhere a value needs to be read in we must use 'config' and NOT 'configNoSpec' since 'config' by virtue of knowing the required type of each option reads in the values as the correct type rather than as a string which is what we want.
servers = {} # Empty dictionary which we will populate with account configuration information
for account in listAccounts :
servers[ account ] = {} # Create sub-dictionary for account
servers[ account ][ 'name' ] = account # Saving account name for identification and laster use when the sub-dictionary is passed to pollAccount
for key, value in config[ 'accounts' ][ account ].items() :
servers[ account ][ key ] = value # Copy configuration information
# So far we have stored in the dictionary (for this account) the values specified explicitly and the global defaults from config.spec that are automatically loaded for missing options. Now we must over-write with the options that are not explicitly given but ARE explicitly defined in the 'DEFAULT' section since they carry precedence over the global defaults defined in the config.spec file (which should not ideally be edited by the user but rather represents the creator's fall-back default values in case an option is completely deleted by the user in the config file)
# Now we create a list of the options that are explicitly in DEFAULT but NOT in the specific account (Note the use of configNoSpec rather than config) :
listMissingDefaults = [ x for x in listDefaultOptions if x not in configNoSpec[ 'accounts' ][ account ].keys() ]
for key in listMissingDefaults :
servers[ account ][ key ] = config[ 'accounts' ][ 'DEFAULT' ][ key ]
# Now we read in the global settings:
globalSettings = {} # Create empty dictionary to populate
for key in config[ 'global' ].keys() :
globalSettings[ key ] = config[ 'global' ][ key ]
return servers, globalSettings
def argParse() :
'''
This function reads in the arguments passed to the program, validates them and if validated returns a parser.parse_args() returned object which contains the various arguments passed and which can then be used by the program as it sees fit.
'''
import argparse # This module gives powerful argument parsing abilities along with auto-generation of --help output.
# Specify the various arguments that the program expects and validate them. Additional arguments can be added as required.
parser = argparse.ArgumentParser( description = "A python script which simultaneously polls multiple IMAP accounts to display the subjects of all or only unseen messages in the specified folder (INBOX by default) without downloading complete messages.\n For further details please read the man page." )
parser.add_argument( "-c", "--config", help = "Specify the name and path to the configuration file. If not specified the program will use the default configuration file in $HOME/.fetchheaders/fetchheaders.conf. Note: The configuration specification file (fetchheaders.conf.spec) should not be altered casually and the program will only look for it in $HOME/.fetchheaders/" )
# For --accounts and --exclude which we wish to be mutually exclusive optional arguments we create a mutually exclusive group within the parser to hold them.
group = parser.add_mutually_exclusive_group()
group.add_argument( "-a", "--accounts", help = "Specify the names of IMAP accounts to be polled as a comma-separated list. e.g. -a Gmail,Hotmail. Only accounts specified in the configuration file are allowed." )
group.add_argument( "-x", "--exclude", help = "Specify the names of the IMAP accounts which are NOT to be polled, as a comma-separated list. e.g. -x Gmail,Hotmail. Only accounts specified in the configuration file are allowed to be excluded." )
parser.add_argument( "-n", "--numsonly", help = "Flag: Only show the number of unseen and total number of messages for the specified folder for each account.", action = "store_true" )
parser.add_argument( "--noColor", help = "Flag: Do NOT allow colored output. Useful for shells that don't allow colored text or when the output needs to piped to another application since colored text is implemented by encapsulating the text in xterm color escape codes.", action = "store_true" )
parser.add_argument( "--oldestFirst", help = "Flag: Show oldest email first i.e. chronological order.", action = "store_true" )
parser.add_argument( "-A", "--showAll", help = "Flag: Show all emails in specified folder, not just unseen ones.", action = "store_true" )
parser.add_argument( "--showFlags", help = "Flag: Show mutt-style flags (in square brackets) to indicate new/unseen and deleted emails when ALL emails are displayed (i.e. -A is issued).", action = "store_true" )
parser.add_argument( "-t", "--threads", help = "Specify the maximum number of parallel threads the program will use to simultaneously access IMAP servers. Set to 1 for serial (non-parallel) behaviour.", type = int)
parser.add_argument( "-T", "--terminal", help = "Flag: Show results in the terminal. Do NOT use urwid.", action = "store_true" )
# Begin reading in arguments and validate them:
args = parser.parse_args() # args contains the values of arguments passed. If incorrect arguments are passed the problem will be stopped here and argparse will display the appropriate error and help message.
return args
def applyArgs( args, servers, globalSettings ) :
'''
This function accepts both the arguments read by the script and the 'servers' object (dictionary) created by setOptions(). It will apply the arguments sent via command-line to the 'servers' and 'globalSettings' object to create and return a modified version reflecting these changes.
'''
# This function is where we carry out all operations necessary to implement the settings specified by command-line arguments.
# -a, --acounts. Limit accounts to the specified ones:
if args.accounts : # True if -a or --accounts has been specified
# We must perform some error checking on the arguments passed to the --accounts optional argument
newServers = {} # Create a new dictionary we will populate ONLY with the specified accounts
for item in args.accounts.split( ',' ) : # We are expecting a comma-separated list
# We create a list of servers the START of whose names (lowercase) matches the item in the argument list currently under consideration
matching_servers = [x for x in servers.keys() if re.match('^' + item.lower(), x.lower())]
if matching_servers: # A match has occurred
for server in matching_servers: # All matching servers are added to the list displayed
newServers[ server ] = servers[ server ]
else: # No match has occurred. This is an error.
print( '\nError: ' + item + ' is not the beginning of a valid IMAP account name specified in the configuration file.' )
import sys
sys.exit(1)
servers = newServers
# -x, --exclude. Does NOT poll the accounts specified with this argument:
if args.exclude : # True if -x or --exclude has been specified
# We must perform some error checking on the arguments passed to the --exclude optional argument
excludedAccounts = [] # Empty list which we will populate with the excluded accounts
newServers = {} # Empty dictionary with which we will construct the new 'servers' dictionary without the excluded accounts
for item in args.exclude.split( ',' ) : # We are expecting a comma-separated list
if not item in servers.keys() : # If this item in the comma-separated list is NOT an account specified in the configuration file
print( '\nError: ' + item + ' is not a vlid IMAP account name specified in the configuration file.' )
import sys
sys.exit(1)
else :
excludedAccounts.append( item )
# Now we remove the excluded accounts when we create the new 'servers' dictionary:
for account in servers.keys() :
if not account in excludedAccounts : # The current account is not in the excluded list and so can be added to the servers dictionary:
newServers[ account ] = servers[ account ]
# Place the newly constructed dicionary (with accounts excluded) in to the original 'servers' dictionary:
servers = newServers
# -n, --numsonly. If specified only the total and unseen number of messages is to be displayed. Similar to 'fetchmail -c'.
if args.numsonly :
for account in servers.keys() :
servers[ account ][ 'showOnlyNums' ] = True
# -T, --terminal. If specified the output is displayed on the terminal (stdout) and 'urwid' is NOT used.
if args.terminal:
globalSettings[ 'terminal' ] = True
else : globalSettings[ 'terminal' ] = False
# --no-color. If specified the output of the program should NOT be colored.
if args.noColor :
globalSettings[ 'color' ] = False
# -A, --showAll. Show all emails not just unseen ones.
if args.showAll :
for account in servers.keys() :
servers[ account ][ 'showUnseen' ] = False
globalSettings[ 'showFlags' ] = True # Flags are shown by default whenever ALL emails are viewed whether --showFlags is passed or not.
# --oldestFirst. Show oldest email first i.e. in chronological order.
if args.oldestFirst :
for account in servers.keys() :
servers[ account ][ 'latestEmailFirst' ] = False
# --showFlags. Show mutt-style flags (in square brackets) when all emails are being displayed.
if args.showFlags :
globalSettings[ 'showFlags' ] = True
# -t, --threads. Set max. number of parallel threads.
if args.threads :
globalSettings[ 'maxThreads' ] = args.threads
return servers, globalSettings
def applyGlobalSettings( globalSettings ) :
'''
This function applies the global settings defined in the dictionary 'globalSettings' (created using the configuration file and command-line arguments).
'''
# Apply maxThreads setting:
global maxThreads
maxThreads = globalSettings[ 'maxThreads' ]
# Apply showFlags settings:
global showFlags
showFlags = globalSettings[ 'showFlags' ]
# Apply color settings:
if globalSettings[ 'color' ] : # output is to be colored
global colorTitle, colorFlag, colorDate, colorFrom, colorSubjectSeen, colorSubjectUnseen # Accessing global text color variables
colorTitle = globalSettings[ 'colorTitle' ]
colorFlag = globalSettings[ 'colorFlag' ]
colorSubjectSeen = globalSettings[ 'colorSubjectSeen' ]
colorSubjectUnseen = globalSettings[ 'colorSubjectUnseen' ]
colorDate = globalSettings[ 'colorDate' ]
colorFrom = globalSettings[ 'colorFrom' ]
def display( out ) :
'''
Accepts an Output data structure and prints out the results to the screen.
Note: This function carries out all formatting for the output using the purely data-oriented Output object as input. The output is in a text format which can be piped forward
'''
from miscClasses import colorWidth as cW # Custom function that sets width of text fields and colors it.
print( cW( out.settings[ 'name' ] + ':', 12, colorTitle ), end = '' ) # Print name of account and allow for further text
if out.settings[ 'showNums' ] :
print( "( total: %d | unseen: %d )" % (out.numAll, out.numUnseen) )
print( '\n' )
# Preamble printed. Now start printing individual email information
if out.settings[ 'showUnseen' ] : # Show only unseen messages
for ii in range( len( out.emails ) ) :
email = out.emails[ ii ]
print( cW( str(ii + 1), out.numDigits, align = '>' ) + '. ' + cW( email.Date, 17, colorDate ) + ' ' + cW( email.From, 30, colorFrom ) + ' ' + cW( email.Subject, 120, colorSubjectUnseen, fill = False ) )
else : # Show ALL messages. Different formatting scheme.
if showFlags : # Global setting which declares that the flags associated with each message must be displayed
flags = lambda x : ' [ ' + cW( x, 2, colorFlag ) + '] '
else :
flags = lambda x : '. '
for ii in range( len( out.emails ) ) :
email = out.emails[ ii ]
if email.Seen : # Email has a Seen flag.
flag = ' '
colorSubject = colorSubjectSeen
else :
flag = 'N'
colorSubject = colorSubjectUnseen
print( cW( str(ii + 1), out.numDigits, align = '>' ) + flags( flag ) + cW( email.Date, 17, colorDate ) + ' ' + cW( email.From, 30, colorFrom ) + ' ' + cW( email.Subject, 120, colorSubject ) )
def main() :
'''
Main function that starts the execution of all of the code.
'''
args = argParse()
# Specify default locations for configuration and specification files:
import os
homeFolder = os.getenv( "HOME" ) # Basically the value in $HOME
packageFolder = '/usr/local/share/fetchheaders' # Location of folder containing all package files
# packageFolder = '.'
fileConf = homeFolder + '/.fetchheaders.conf'
fileSpec = packageFolder + '/fetchheaders.conf.spec' # Path to config specification file
# Check if a configuration file has been specified using the -c or --config flag.
if args.config : # A configuration file has been provided
fileConf = args.config
# Read in settings and options from configuration files :
servers, globalSettings = setOptions( fileConf, fileSpec )
# Override settings and options from command-line arguments :
servers, globalSettings = applyArgs( args, servers, globalSettings )
# Apply Global Settings. These are applied outside of pollAccount which acts on each account independantly.
applyGlobalSettings( globalSettings ) # Apply the global settings contained in the 'globalSettings' dictionary we created from the configuration file and command-line arguments
# Now we determine whether the output is intended to go to the terminal (stdout) straight or passed on to urwid
if globalSettings[ 'terminal' ]: # Do NOT use urwid
from miscClasses import threadedExec
for out in threadedExec( servers, maxThreads ):
if out.error: # If an error occurs while constructing the Output object the exception is caught and the error flag is set
from miscClasses import colorWidth as cW
print( cW( out.settings[ 'name' ] + ':', 12, colorTitle ), end = '' ) # We indicate in the output that an Error has occurred.
print( "Error!\n\n" )
else:
display(out)
else:
# Use urwid to display the results, interact with the display and possibly flag messages for deletion:
from urwidDisplay import urwidDisplay
# Create instance of the imported class to create and start the urwid loop to display emails
settings = { 'maxThreads': maxThreads, 'showFlags': showFlags }
urwidDisplay( servers, settings )
# Main execution of the program begins here:
main()
|
abid-mujtaba/fetchheaders
|
fetchheaders.py
|
Python
|
apache-2.0
| 20,367 | 0.022193 |
# coding=UTF-8
from django.shortcuts import redirect
from bellum.common.alliance import isAllied
from bellum.common.session.login import must_be_logged
from bellum.common.session import getAccount, getRace
from bellum.common.session.mother import getCurrentMother
from djangomako.shortcuts import render_to_response, render_to_string
from bellum.space.models import Planet
from bellum.common.gui import PrimaryGUIObject
from bellum.common.fixtures.province_build import getCosts
from bellum.common.session import getRace, getAccount, getResourceIndex
from bellum.meta import MPBI
from bellum.province.models import Province, Reinforcement
from django.core.exceptions import ObjectDoesNotExist
from bellum.orders.models import LandarmyProvintionalStrikeOrder
from bellum.orders.models import LandarmyPlanetaryStrikeOrder, LandarmyProvintionalStrikeOrder, LandarmyMotherPickupOrder
from bellum.space.ajax.pinfo import dx_html
from bellum.common.fixtures.relocation import getRelocationTime
@must_be_logged
def process_onlyprovince(request, province_id):
try: # check planet
province_id = int(province_id)
province = Province.objects.get(id=province_id)
except:
return redirect('/')
return process(request, province.planet.id, province_id=province_id)
@must_be_logged
def process(request, planet_id, province_id=None):
try: # check planet
planet = Planet.objects.get(id=planet_id)
except:
return redirect('/')
provinces = Province.objects.filter(planet=planet)
provinces_postprocessed = {}
prov = None
try: # faciliates GET getting province to zoom onto
if province_id != None:
provgrabber = province_id
else:
provgrabber = int(request.GET['province'])
except:
provgrabber = None
for province in provinces:
# 0 - MINE, 1 - ENEMY, 2 - ALLIED, 3 - NOBODYS
try:
province.provintionalpresence
except:
pv = 'gray'
else:
if province.provintionalpresence.owner == getAccount(request):
pv = 'green'
elif isAllied(getAccount(request), province.provintionalpresence.owner):
pv = 'blue'
else:
pv = 'red'
provinces_postprocessed[province.id] = [pv, False]
if province.id == provgrabber:
prov = province
try:
if province.provintionalpresence.owner == getAccount(request):
if prov == None:
prov = province
except:
pass
if prov == None:
prov = provinces[0]
provinces_postprocessed[prov.id][1] = True
mum = getCurrentMother(request)
sfx = dx_html(request, prov, mum).decode('utf8')
# can relocate?
can_relocate = False
relocation_time = None
if (planet != mum.duePosition()): # Different position than current
can_relocate = mum.canRelocate()
if can_relocate:
relocation_time = getRelocationTime(mum, getRace(request), mum.orbiting, planet)
# can scan?
can_scan = False
if getRace(request) == 1:
if mum.isRelocating() == False:
if mum.orbiting == planet:
can_scan = True
return render_to_response('space/planetview/planetview.html', {'htmldata':sfx,
'race':getRace(request),
'planet':planet,
'postprocessed':provinces_postprocessed,
'can_scan':can_scan,
'firstprovince':prov,
'can_relocate':can_relocate,
'relocation_time':relocation_time,
'wctg':lambda x: int((x+100.0)*(345.0/200.0)),
'pgo':PrimaryGUIObject(request)})
|
piotrmaslanka/bellum
|
space/views/planetview.py
|
Python
|
agpl-3.0
| 4,242 | 0.009194 |
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2012 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
This module provides the Plugin class for document generator plugins.
"""
from . import Plugin
from .docgen import TextDoc, DrawDoc
class DocGenPlugin(Plugin):
"""
This class represents a plugin for generating documents from Gramps
"""
def __init__(self, name, description, basedoc,
paper, style, extension, docoptclass, basedocname):
"""
:param name: A friendly name to call this plugin.
Example: "Plain Text"
:type name: string
:param description: A short description of the plugin.
Example: "This plugin will generate text documents in plain text."
:type description: string
:param basedoc: A class that implements the BaseDoc
interface.
:type basedoc: BaseDoc
:param paper: Indicates whether the plugin uses paper or not.
True = use paper; False = do not use paper
:type paper: bool
:param style: Indicates whether the plugin uses styles or not.
True = use styles; False = do not use styles
:type style: bool
:param extension: The extension for the output file.
Example: "txt"
:type extension: str
:param docoptclass: either None or a subclass of DocOptions
:type docoptclass: either None or a DocOptions subclass
:param basedocname: The BaseDoc name of this plugin.
Example: "AsciiDoc"
:type basedocname: string
:return: nothing
"""
Plugin.__init__(self, name, description, basedoc.__module__)
self.__basedoc = basedoc
self.__paper = paper
self.__style = style
self.__extension = extension
self.__docoptclass = docoptclass
self.__basedocname = basedocname
def get_basedoc(self):
"""
Get the :class:`.BaseDoc` class for this plugin.
:return: the :class:`.BaseDoc` class passed into :meth:`__init__`
"""
return self.__basedoc
def get_paper_used(self):
"""
Get the paper flag for this plugin.
:return: bool - True = use paper; False = do not use paper
"""
return self.__paper
def get_style_support(self):
"""
Get the style flag for this plugin.
:return: bool - True = use styles; False = do not use styles
"""
return self.__style
def get_extension(self):
"""
Get the file extension for the output file.
:return: str
"""
return self.__extension
def get_doc_option_class(self):
"""
Get the :class:`.DocOptions` subclass for this plugin, if any
:return: the :class:`.DocOptions` subclass passed into :meth:`__init__`
"""
return self.__docoptclass
def get_basedocname(self):
"""
Get the :class:`.BaseDoc` name for this plugin.
:return: the :class:`.BaseDoc` name passed into :meth:`__init__`
"""
return self.__basedocname
def get_text_support(self):
"""
Check if the plugin supports the :class:`.TextDoc` interface.
:return: bool: True if :class:`.TextDoc` is supported; False if
:class:`.TextDoc` is not supported.
"""
return bool(issubclass(self.__basedoc, TextDoc))
def get_draw_support(self):
"""
Check if the plugin supports the :class:`.DrawDoc` interface.
:return: bool: True if :class:`.DrawDoc` is supported; False if
:class:`.DrawDoc` is not supported.
"""
return bool(issubclass(self.__basedoc, DrawDoc))
|
sam-m888/gprime
|
gprime/plug/_docgenplugin.py
|
Python
|
gpl-2.0
| 4,499 | 0.000222 |
import traceback
from PyQt5 import QtCore
import androguard.session as session
from androguard.core import androconf
import logging
log = logging.getLogger("androguard.gui")
class FileLoadingThread(QtCore.QThread):
file_loaded = QtCore.pyqtSignal(bool)
def __init__(self, parent=None):
QtCore.QThread.__init__(self, parent)
self.parent = parent
self.file_path = None
self.incoming_file = ()
def load(self, file_path):
self.file_path = file_path
if file_path.endswith(".ag"):
self.incoming_file = (file_path, 'SESSION')
else:
file_type = androconf.is_android(file_path)
self.incoming_file = (file_path, file_type)
self.start(QtCore.QThread.LowestPriority)
def run(self):
if self.incoming_file:
try:
file_path, file_type = self.incoming_file
if file_type in ["APK", "DEX", "DEY"]:
ret = self.parent.session.add(file_path,
open(file_path, 'rb').read())
self.file_loaded.emit(ret)
elif file_type == "SESSION":
self.parent.session = session.Load(file_path)
self.file_loaded.emit(True)
else:
self.file_loaded.emit(False)
except Exception as e:
log.debug(e)
log.debug(traceback.format_exc())
self.file_loaded.emit(False)
self.incoming_file = []
else:
self.file_loaded.emit(False)
|
shuxin/androguard
|
androguard/gui/fileloading.py
|
Python
|
apache-2.0
| 1,625 | 0 |
from django.contrib import admin
from puzzle_captcha.models import Puzzle, PuzzlePiece
class PuzzlePieceInline(admin.StackedInline):
model = PuzzlePiece
readonly_fields = ('key', 'image', 'order')
can_delete = False
extra = 0
class PuzzleAdmin(admin.ModelAdmin):
list_display = ('key', 'rows', 'cols')
readonly_fields = ('key', 'rows', 'cols')
class Meta:
model = Puzzle
inlines = [
PuzzlePieceInline,
]
admin.site.register(Puzzle, PuzzleAdmin)
|
MegaMark16/django-puzzle-captcha
|
puzzle_captcha/admin.py
|
Python
|
bsd-3-clause
| 505 | 0.011881 |
"""
02-read-from-disk-2.py - Catching the `end-of-file` signal from the SfPlayer object.
This example demonstrates how to use the `end-of-file` signal
of the SfPlayer object to trigger another playback (possibly
with another sound, another speed, etc.).
When a SfPlayer reaches the end of the file, it sends a trigger
(more on trigger later) that the user can retrieve with the
syntax :
variable_name["trig"]
"""
from pyo import *
import random
s = Server().boot()
# Sound bank
folder = "../snds/"
sounds = ["alum1.wav", "alum2.wav", "alum3.wav", "alum4.wav"]
# Creates the left and right players
sfL = SfPlayer(folder + sounds[0], speed=1, mul=0.5).out()
sfR = SfPlayer(folder + sounds[0], speed=1, mul=0.5).out(1)
# Function to choose a new sound and a new speed for the left player
def newL():
sfL.path = folder + sounds[random.randint(0, 3)]
sfL.speed = random.uniform(0.75, 1.5)
sfL.out()
# The "end-of-file" signal triggers the function "newL"
tfL = TrigFunc(sfL["trig"], newL)
# Function to choose a new sound and a new speed for the right player
def newR():
sfR.path = folder + sounds[random.randint(0, 3)]
sfR.speed = random.uniform(0.75, 1.5)
sfR.out(1)
# The "end-of-file" signal triggers the function "newR"
tfR = TrigFunc(sfR["trig"], newR)
s.gui(locals())
|
belangeo/pyo
|
pyo/examples/04-soundfiles/02-read-from-disk-2.py
|
Python
|
lgpl-3.0
| 1,308 | 0.002294 |
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from common_openstack import OpenStackTest
class ServerTest(OpenStackTest):
def test_server_query(self):
factory = self.replay_flight_data()
p = self.load_policy({
'name': 'all-servers',
'resource': 'openstack.server'},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_server_filter_name(self):
factory = self.replay_flight_data()
policy = {
'name': 'get-server-c7n-test-1',
'resource': 'openstack.server',
'filters': [
{
"type": "value",
"key": "name",
"value": "c7n-test-1",
},
],
}
p = self.load_policy(policy, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0].name, "c7n-test-1")
def test_server_filter_flavor(self):
factory = self.replay_flight_data()
policy = {
'name': 'get-server-c7n-test-1',
'resource': 'openstack.server',
'filters': [
{
"type": "flavor",
"flavor_name": "m1.tiny",
},
],
}
p = self.load_policy(policy, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0].name, "c7n-test-1")
def test_server_filter_tags(self):
factory = self.replay_flight_data()
policy = {
'name': 'get-server-c7n-test-1',
'resource': 'openstack.server',
'filters': [
{
"type": "tags",
"tags": [
{
"key": "a",
"value": "a",
},
{
"key": "b",
"value": "b",
},
],
"op": "all",
},
],
}
p = self.load_policy(policy, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0].name, "c7n-test-2")
|
thisisshi/cloud-custodian
|
tools/c7n_openstack/tests/test_server.py
|
Python
|
apache-2.0
| 2,461 | 0 |
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
#'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c99',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c',
'-isystem',
'../BoostParts',
'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'./flibs/final_libraries/include',
'-I',
'./src',
'-I',
'./http-parser',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
#try:
# final_flags.remove( '-stdlib=libc++' )
#except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
finaldie/final_httpd_mock
|
.ycm_extra_conf.py
|
Python
|
mit
| 6,141 | 0.021821 |
#!/usr/bin/python2.7 -u
import os.path
if os.path.isdir('/dev/null'):
print '/dev/null'
if os.path.isdir('/dev'):
print '/dev'
|
Knln/COMP2041
|
examples/4/filetest2.py
|
Python
|
mit
| 135 | 0 |
import ast
import logging
import time
import unittest
from malcolm.profiler import Profiler
# https://github.com/bdarnell/plop/blob/master/plop/test/collector_test.py
class ProfilerTest(unittest.TestCase):
def filter_stacks(self, results):
# Kind of hacky, but this is the simplest way to keep the tests
# working after the internals of the collector changed to support
# multiple formatters.
stack_counts = ast.literal_eval(results)
counts = {}
for stack, count in stack_counts.items():
filtered_stack = [
frame[2] for frame in stack if frame[0].endswith("test_profiler.py")
]
if filtered_stack:
counts[tuple(filtered_stack)] = count
return counts
def check_counts(self, counts, expected):
failed = False
output = []
for stack, count in expected.items():
# every expected frame should appear in the data, but
# the inverse is not true if the signal catches us between
# calls.
self.assertTrue(stack in counts)
ratio = float(counts[stack]) / float(count)
output.append(
"%s: expected %s, got %s (%s)" % (stack, count, counts[stack], ratio)
)
if not (0.70 <= ratio <= 1.25):
failed = True
if failed:
for line in output:
logging.warning(line)
for key in set(counts.keys()) - set(expected.keys()):
logging.warning("unexpected key: %s: got %s" % (key, counts[key]))
self.fail("collected data did not meet expectations")
def test_collector(self):
start = time.time()
def a(end):
while time.time() < end:
pass
c(time.time() + 0.1)
def b(end):
while time.time() < end:
pass
c(time.time() + 0.1)
def c(end):
while time.time() < end:
pass
profiler = Profiler("/tmp")
profiler.start(interval=0.01)
a(time.time() + 0.1)
b(time.time() + 0.2)
c(time.time() + 0.3)
end = time.time()
profiler.stop("profiler_test.plop")
elapsed = end - start
self.assertTrue(0.8 < elapsed < 0.9, elapsed)
with open("/tmp/profiler_test.plop") as f:
results = f.read()
counts = self.filter_stacks(results)
expected = {
("a", "test_collector"): 10,
("c", "a", "test_collector"): 10,
("b", "test_collector"): 20,
("c", "b", "test_collector"): 10,
("c", "test_collector"): 30,
}
self.check_counts(counts, expected)
|
dls-controls/pymalcolm
|
tests/test_profiler.py
|
Python
|
apache-2.0
| 2,778 | 0.00108 |
# coding=utf-8
#! /usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
import mlpy
import sys
import math
from config import loc_store_path, times_day
class LocTrain(object):
"""
"""
def __init__(self, file_name , flag):
if flag == "ridge":
self.lrs = [ mlpy.Ridge() for i in range(7) ]
else :
self.lrs = [ mlpy.OLS() for i in range(7) ]
self.file_name = file_name
self.x_weekday = dict()
self.y_weekday = dict()
#self.x ,self.y =
def train(self):
self.get_input(self.file_name)
for weekday in range(7):
self.lrs[weekday].learn(self.x_weekday[weekday], self.y_weekday[weekday])
def predict(self,weekday,speeds):
pre_speed = self.lrs[weekday].pred(speeds)
return pre_speed
def test(self):
pass
def get_input(self, filename):
fin = open(filename)
x = []
y = []
for each in fin:
each = each[:each.find('\n')]
l = each.split(' ')
each_x = []
each_x.append(1)
each_x.append(float(l[0]))
each_x.append(float(l[1]))
each_x.append(float(l[2]))
each_x.append(float(l[3]))
weekday = int(l[5])
if weekday not in self.x_weekday:
self.x_weekday[weekday] = []
self.y_weekday[weekday] = []
self.x_weekday[weekday].append(each_x)
self.y_weekday[weekday].append(float(l[4]))
def main():
tmp = LocTrain('../data/train/3860_data',"ridge")
tmp.train()
print tmp.predict(1,[1,10,10,20,10])
pass
if __name__ == '__main__':
main()
|
fuxiang90/speed-prediction
|
script/loc_train.py
|
Python
|
bsd-3-clause
| 1,847 | 0.02653 |
# -*- coding: utf-8 -*-
#
# Copyright 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
from katello.client.api.base import KatelloAPI
class PackageAPI(KatelloAPI):
"""
Connection class to access package calls
"""
def package(self, packageId, repoId):
path = "/api/repositories/%s/packages/%s" % (repoId, packageId)
pack = self.server.GET(path)[1]
return pack
def packages_by_repo(self, repoId):
path = "/api/repositories/%s/packages" % repoId
pack_list = self.server.GET(path)[1]
return pack_list
def search(self, query, repoId):
path = "/api/repositories/%s/packages/search" % repoId
pack_list = self.server.GET(path, {"search": query})[1]
return pack_list
|
Katello/katello-cli
|
src/katello/client/api/package.py
|
Python
|
gpl-2.0
| 1,296 | 0.000772 |
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import btp_expression9
except ImportError:
btp_expression9 = sys.modules["onshape_client.oas.models.btp_expression9"]
try:
from onshape_client.oas.models import btp_expression_operator244_all_of
except ImportError:
btp_expression_operator244_all_of = sys.modules[
"onshape_client.oas.models.btp_expression_operator244_all_of"
]
try:
from onshape_client.oas.models import btp_identifier8
except ImportError:
btp_identifier8 = sys.modules["onshape_client.oas.models.btp_identifier8"]
try:
from onshape_client.oas.models import btp_space10
except ImportError:
btp_space10 = sys.modules["onshape_client.oas.models.btp_space10"]
class BTPExpressionOperator244(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("operator",): {
"NONE": "NONE",
"PLUS": "PLUS",
"MINUS": "MINUS",
"TIMES": "TIMES",
"DIVIDE": "DIVIDE",
"MODULUS": "MODULUS",
"POWER": "POWER",
"NEGATE": "NEGATE",
"OR": "OR",
"AND": "AND",
"NOT": "NOT",
"EQUAL_TO": "EQUAL_TO",
"NOT_EQUAL_TO": "NOT_EQUAL_TO",
"GREATER": "GREATER",
"LESS": "LESS",
"GREATER_OR_EQUAL": "GREATER_OR_EQUAL",
"LESS_OR_EQUAL": "LESS_OR_EQUAL",
"CONCATENATE": "CONCATENATE",
"CONDITIONAL": "CONDITIONAL",
},
("documentation_type",): {
"FUNCTION": "FUNCTION",
"PREDICATE": "PREDICATE",
"CONSTANT": "CONSTANT",
"ENUM": "ENUM",
"USER_TYPE": "USER_TYPE",
"FEATURE_DEFINITION": "FEATURE_DEFINITION",
"FILE_HEADER": "FILE_HEADER",
"UNDOCUMENTABLE": "UNDOCUMENTABLE",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"for_export": (bool,), # noqa: E501
"global_namespace": (bool,), # noqa: E501
"import_microversion": (str,), # noqa: E501
"namespace": ([btp_identifier8.BTPIdentifier8],), # noqa: E501
"operand1": (btp_expression9.BTPExpression9,), # noqa: E501
"operand2": (btp_expression9.BTPExpression9,), # noqa: E501
"operand3": (btp_expression9.BTPExpression9,), # noqa: E501
"operator": (str,), # noqa: E501
"space_after_namespace": (btp_space10.BTPSpace10,), # noqa: E501
"space_after_operator": (btp_space10.BTPSpace10,), # noqa: E501
"space_before_operator": (btp_space10.BTPSpace10,), # noqa: E501
"written_as_function_call": (bool,), # noqa: E501
"atomic": (bool,), # noqa: E501
"documentation_type": (str,), # noqa: E501
"end_source_location": (int,), # noqa: E501
"node_id": (str,), # noqa: E501
"short_descriptor": (str,), # noqa: E501
"space_after": (btp_space10.BTPSpace10,), # noqa: E501
"space_before": (btp_space10.BTPSpace10,), # noqa: E501
"space_default": (bool,), # noqa: E501
"start_source_location": (int,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"for_export": "forExport", # noqa: E501
"global_namespace": "globalNamespace", # noqa: E501
"import_microversion": "importMicroversion", # noqa: E501
"namespace": "namespace", # noqa: E501
"operand1": "operand1", # noqa: E501
"operand2": "operand2", # noqa: E501
"operand3": "operand3", # noqa: E501
"operator": "operator", # noqa: E501
"space_after_namespace": "spaceAfterNamespace", # noqa: E501
"space_after_operator": "spaceAfterOperator", # noqa: E501
"space_before_operator": "spaceBeforeOperator", # noqa: E501
"written_as_function_call": "writtenAsFunctionCall", # noqa: E501
"atomic": "atomic", # noqa: E501
"documentation_type": "documentationType", # noqa: E501
"end_source_location": "endSourceLocation", # noqa: E501
"node_id": "nodeId", # noqa: E501
"short_descriptor": "shortDescriptor", # noqa: E501
"space_after": "spaceAfter", # noqa: E501
"space_before": "spaceBefore", # noqa: E501
"space_default": "spaceDefault", # noqa: E501
"start_source_location": "startSourceLocation", # noqa: E501
}
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
"_composed_instances",
"_var_name_to_model_instances",
"_additional_properties_model_instances",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btp_expression_operator244.BTPExpressionOperator244 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
for_export (bool): [optional] # noqa: E501
global_namespace (bool): [optional] # noqa: E501
import_microversion (str): [optional] # noqa: E501
namespace ([btp_identifier8.BTPIdentifier8]): [optional] # noqa: E501
operand1 (btp_expression9.BTPExpression9): [optional] # noqa: E501
operand2 (btp_expression9.BTPExpression9): [optional] # noqa: E501
operand3 (btp_expression9.BTPExpression9): [optional] # noqa: E501
operator (str): [optional] # noqa: E501
space_after_namespace (btp_space10.BTPSpace10): [optional] # noqa: E501
space_after_operator (btp_space10.BTPSpace10): [optional] # noqa: E501
space_before_operator (btp_space10.BTPSpace10): [optional] # noqa: E501
written_as_function_call (bool): [optional] # noqa: E501
atomic (bool): [optional] # noqa: E501
documentation_type (str): [optional] # noqa: E501
end_source_location (int): [optional] # noqa: E501
node_id (str): [optional] # noqa: E501
short_descriptor (str): [optional] # noqa: E501
space_after (btp_space10.BTPSpace10): [optional] # noqa: E501
space_before (btp_space10.BTPSpace10): [optional] # noqa: E501
space_default (bool): [optional] # noqa: E501
start_source_location (int): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
constant_args = {
"_check_type": _check_type,
"_path_to_item": _path_to_item,
"_from_server": _from_server,
"_configuration": _configuration,
}
required_args = {}
# remove args whose value is Null because they are unset
required_arg_names = list(required_args.keys())
for required_arg_name in required_arg_names:
if required_args[required_arg_name] is nulltype.Null:
del required_args[required_arg_name]
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in six.iteritems(kwargs):
if (
var_name in unused_args
and self._configuration is not None
and self._configuration.discard_unknown_keys
and not self._additional_properties_model_instances
):
# discard variable.
continue
setattr(self, var_name, var_value)
@staticmethod
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
return {
"anyOf": [],
"allOf": [
btp_expression9.BTPExpression9,
btp_expression_operator244_all_of.BTPExpressionOperator244AllOf,
],
"oneOf": [],
}
|
onshape-public/onshape-clients
|
python/onshape_client/oas/models/btp_expression_operator244.py
|
Python
|
mit
| 12,168 | 0.000164 |
from __future__ import absolute_import, unicode_literals
from .api.psd_image import PSDImage
from .composer import compose
__all__ = ['PSDImage', 'compose']
|
psd-tools/psd-tools
|
src/psd_tools/__init__.py
|
Python
|
mit
| 158 | 0 |
# Copyright 2009-2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
__metaclass__ = type
__all__ = [
'BranchRevision',
]
from storm.locals import (
Int,
Reference,
Storm,
)
from zope.interface import implements
from lp.code.interfaces.branchrevision import IBranchRevision
class BranchRevision(Storm):
"""See `IBranchRevision`."""
__storm_table__ = 'BranchRevision'
__storm_primary__ = ("branch_id", "revision_id")
implements(IBranchRevision)
branch_id = Int(name='branch', allow_none=False)
branch = Reference(branch_id, 'Branch.id')
revision_id = Int(name='revision', allow_none=False)
revision = Reference(revision_id, 'Revision.id')
sequence = Int(name='sequence', allow_none=True)
def __init__(self, branch, revision, sequence=None):
self.branch = branch
self.revision = revision
self.sequence = sequence
|
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/lib/lp/code/model/branchrevision.py
|
Python
|
agpl-3.0
| 984 | 0 |
#!/usr/bin/python
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"lockbkl" : "9",
"unlockbkl" : "10",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Seperate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
luckasfb/OT_903D-kernel-2.6.35.7
|
kernel/scripts/rt-tester/rt-tester.py
|
Python
|
gpl-2.0
| 5,104 | 0.021552 |
# -*- coding: utf-8; -*-
#
# @file urls.py
# @brief collgate
# @author Frédéric SCHERMA (INRA UMR1095)
# @date 2018-09-20
# @copyright Copyright (c) 2018 INRA/CIRAD
# @license MIT (see LICENSE file)
# @details coll-gate printer module url entry point.
from django.conf.urls import include, url
urlpatterns = [
]
|
coll-gate/collgate
|
server/printer/urls.py
|
Python
|
mit
| 317 | 0.003175 |
from base import ChoicesEnum
from _version import __version__
|
tcwang817/django-choices-enum
|
django_choices_enum/__init__.py
|
Python
|
mit
| 62 | 0 |
import warnings
from django.conf import settings
from django.contrib.auth.models import User, Group, Permission, AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
class BackendTest(TestCase):
backend = 'django.contrib.auth.backends.ModelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
User.objects.create_user('test', 'test@example.com', 'test')
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
user = User.objects.get(username='test')
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_superuser = True
user.save()
self.assertEqual(user.has_perm('auth.test'), True)
user.is_staff = False
user.is_superuser = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.is_superuser = True
user.is_active = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
def test_custom_perms(self):
user = User.objects.get(username='test')
content_type=ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
# reloading user to purge the _perm_cache
user = User.objects.get(username='test')
self.assertEqual(user.get_all_permissions() == set([u'auth.test']), True)
self.assertEqual(user.get_group_permissions(), set([]))
self.assertEqual(user.has_module_perms('Group'), False)
self.assertEqual(user.has_module_perms('auth'), True)
perm = Permission.objects.create(name='test2', content_type=content_type, codename='test2')
user.user_permissions.add(perm)
user.save()
perm = Permission.objects.create(name='test3', content_type=content_type, codename='test3')
user.user_permissions.add(perm)
user.save()
user = User.objects.get(username='test')
self.assertEqual(user.get_all_permissions(), set([u'auth.test2', u'auth.test', u'auth.test3']))
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True)
perm = Permission.objects.create(name='test_group', content_type=content_type, codename='test_group')
group = Group.objects.create(name='test_group')
group.permissions.add(perm)
group.save()
user.groups.add(group)
user = User.objects.get(username='test')
exp = set([u'auth.test2', u'auth.test', u'auth.test3', u'auth.test_group'])
self.assertEqual(user.get_all_permissions(), exp)
self.assertEqual(user.get_group_permissions(), set([u'auth.test_group']))
self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']), True)
user = AnonymousUser()
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False)
def test_has_no_object_perm(self):
"""Regressiontest for #12462"""
user = User.objects.get(username='test')
content_type=ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
self.assertEqual(user.has_perm('auth.test', 'object'), False)
self.assertEqual(user.get_all_permissions('object'), set([]))
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.get_all_permissions(), set(['auth.test']))
class TestObj(object):
pass
class SimpleRowlevelBackend(object):
supports_object_permissions = True
# This class also supports tests for anonymous user permissions,
# via subclasses which just set the 'supports_anonymous_user' attribute.
def has_perm(self, user, perm, obj=None):
if not obj:
return # We only support row level perms
if isinstance(obj, TestObj):
if user.username == 'test2':
return True
elif user.is_anonymous() and perm == 'anon':
# not reached due to supports_anonymous_user = False
return True
return False
def has_module_perms(self, user, app_label):
return app_label == "app1"
def get_all_permissions(self, user, obj=None):
if not obj:
return [] # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if user.is_anonymous():
return ['anon']
if user.username == 'test2':
return ['simple', 'advanced']
else:
return ['simple']
def get_group_permissions(self, user, obj=None):
if not obj:
return # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if 'test_group' in [group.name for group in user.groups.all()]:
return ['group_perm']
else:
return ['none']
class RowlevelBackendTest(TestCase):
"""
Tests for auth backend that supports object level permissions
"""
backend = 'django.contrib.auth.tests.auth_backends.SimpleRowlevelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = tuple(self.curr_auth) + (self.backend,)
self.user1 = User.objects.create_user('test', 'test@example.com', 'test')
self.user2 = User.objects.create_user('test2', 'test2@example.com', 'test')
self.user3 = User.objects.create_user('test3', 'test3@example.com', 'test')
self.save_warnings_state()
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='django.contrib.auth')
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
self.restore_warnings_state()
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user2.has_perm('perm', TestObj()), True)
self.assertEqual(self.user2.has_perm('perm'), False)
self.assertEqual(self.user2.has_perms(['simple', 'advanced'], TestObj()), True)
self.assertEqual(self.user3.has_perm('perm', TestObj()), False)
self.assertEqual(self.user3.has_perm('anon', TestObj()), False)
self.assertEqual(self.user3.has_perms(['simple', 'advanced'], TestObj()), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set(['simple']))
self.assertEqual(self.user2.get_all_permissions(TestObj()), set(['simple', 'advanced']))
self.assertEqual(self.user2.get_all_permissions(), set([]))
def test_get_group_permissions(self):
content_type=ContentType.objects.get_for_model(Group)
group = Group.objects.create(name='test_group')
self.user3.groups.add(group)
self.assertEqual(self.user3.get_group_permissions(TestObj()), set(['group_perm']))
class AnonymousUserBackend(SimpleRowlevelBackend):
supports_anonymous_user = True
class NoAnonymousUserBackend(SimpleRowlevelBackend):
supports_anonymous_user = False
class AnonymousUserBackendTest(TestCase):
"""
Tests for AnonymousUser delegating to backend if it has 'supports_anonymous_user' = True
"""
backend = 'django.contrib.auth.tests.auth_backends.AnonymousUserBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
self.user1 = AnonymousUser()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('anon', TestObj()), True)
def test_has_perms(self):
self.assertEqual(self.user1.has_perms(['anon'], TestObj()), True)
self.assertEqual(self.user1.has_perms(['anon', 'perm'], TestObj()), False)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), True)
self.assertEqual(self.user1.has_module_perms("app2"), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set(['anon']))
class NoAnonymousUserBackendTest(TestCase):
"""
Tests that AnonymousUser does not delegate to backend if it has 'supports_anonymous_user' = False
"""
backend = 'django.contrib.auth.tests.auth_backends.NoAnonymousUserBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = tuple(self.curr_auth) + (self.backend,)
self.user1 = AnonymousUser()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('anon', TestObj()), False)
def test_has_perms(self):
self.assertEqual(self.user1.has_perms(['anon'], TestObj()), False)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), False)
self.assertEqual(self.user1.has_module_perms("app2"), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set())
|
towerjoo/mindsbook
|
django/contrib/auth/tests/auth_backends.py
|
Python
|
bsd-3-clause
| 9,952 | 0.003014 |
"""Interfaces with iAlarm control panels."""
from homeassistant.components.alarm_control_panel import AlarmControlPanelEntity
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DATA_COORDINATOR, DOMAIN
async def async_setup_entry(hass, entry, async_add_entities) -> None:
"""Set up a iAlarm alarm control panel based on a config entry."""
coordinator = hass.data[DOMAIN][entry.entry_id][DATA_COORDINATOR]
async_add_entities([IAlarmPanel(coordinator)], False)
class IAlarmPanel(CoordinatorEntity, AlarmControlPanelEntity):
"""Representation of an iAlarm device."""
@property
def device_info(self) -> DeviceInfo:
"""Return device info for this device."""
return DeviceInfo(
identifiers={(DOMAIN, self.unique_id)},
manufacturer="Antifurto365 - Meian",
name=self.name,
)
@property
def unique_id(self):
"""Return a unique id."""
return self.coordinator.mac
@property
def name(self):
"""Return the name."""
return "iAlarm"
@property
def state(self):
"""Return the state of the device."""
return self.coordinator.state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
def alarm_disarm(self, code=None):
"""Send disarm command."""
self.coordinator.ialarm.disarm()
def alarm_arm_home(self, code=None):
"""Send arm home command."""
self.coordinator.ialarm.arm_stay()
def alarm_arm_away(self, code=None):
"""Send arm away command."""
self.coordinator.ialarm.arm_away()
|
jawilson/home-assistant
|
homeassistant/components/ialarm/alarm_control_panel.py
|
Python
|
apache-2.0
| 1,923 | 0.00052 |
# -*- coding: utf-8 -*-
# Copyright (c) 2009 Las Cumbres Observatory (www.lcogt.net)
# Copyright (c) 2010 Jan Dittberner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
channel.py - Socket implementation of Google's Protocol Buffers RPC
service interface.
This package contains classes providing a socket implementation of the
RPCChannel abstract class.
Original Authors: Martin Norbury (mnorbury@lcogt.net)
Eric Saunders (esaunders@lcogt.net)
Jan Dittberner (jan@dittberner.info)
May 2009, Nov 2010
Modified for snakebite: Wouter de Bie (wouter@spotify.com)
May 2012
'''
# Standard library imports
import socket
import os
import pwd
import math
# Third party imports
from google.protobuf.service import RpcChannel
# Protobuf imports
from snakebite.protobuf.RpcHeader_pb2 import RpcRequestHeaderProto, RpcResponseHeaderProto
from snakebite.protobuf.IpcConnectionContext_pb2 import IpcConnectionContextProto
from snakebite.protobuf.ProtobufRpcEngine_pb2 import RequestHeaderProto
from snakebite.protobuf.datatransfer_pb2 import OpReadBlockProto, BlockOpResponseProto, PacketHeaderProto, ClientReadStatusProto
from snakebite.formatter import format_bytes
from snakebite.errors import RequestError
from snakebite.crc32c import crc
import google.protobuf.internal.encoder as encoder
import google.protobuf.internal.decoder as decoder
# Module imports
import logger
import logging
import struct
import uuid
# Configure package logging
log = logger.getLogger(__name__)
def log_protobuf_message(header, message):
log.debug("%s:\n\n\033[92m%s\033[0m" % (header, message))
def get_delimited_message_bytes(byte_stream, nr=4):
''' Parse a delimited protobuf message. This is done by first getting a protobuf varint from
the stream that represents the length of the message, then reading that amount of
from the message and then parse it.
Since the int can be represented as max 4 bytes, first get 4 bytes and try to decode.
The decoder returns the value and the position where the value was found, so we need
to rewind the buffer to the position, because the remaining bytes belong to the message
after.
'''
(length, pos) = decoder._DecodeVarint32(byte_stream.read(nr), 0)
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Delimited message length (pos %d): %d" % (pos, length))
delimiter_bytes = nr - pos
byte_stream.rewind(delimiter_bytes)
message_bytes = byte_stream.read(length)
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Delimited message bytes (%d): %s" % (len(message_bytes), format_bytes(message_bytes)))
total_len = length + pos
return (total_len, message_bytes)
class RpcBufferedReader(object):
'''Class that wraps a socket and provides some utility methods for reading
and rewinding of the buffer. This comes in handy when reading protobuf varints.
'''
MAX_READ_ATTEMPTS = 100
def __init__(self, socket):
self.socket = socket
self.reset()
def read(self, n):
'''Reads n bytes into the internal buffer'''
bytes_wanted = n - self.buffer_length + self.pos + 1
if bytes_wanted > 0:
self._buffer_bytes(bytes_wanted)
end_pos = self.pos + n
ret = self.buffer[self.pos + 1:end_pos + 1]
self.pos = end_pos
return ret
def _buffer_bytes(self, n):
to_read = n
for _ in xrange(self.MAX_READ_ATTEMPTS):
bytes_read = self.socket.recv(to_read)
self.buffer += bytes_read
to_read -= len(bytes_read)
if to_read == 0:
log.debug("Bytes read: %d, total: %d" % (len(bytes_read), self.buffer_length))
return n
if len(bytes_read) < n:
raise Exception("RpcBufferedReader only managed to read %s out of %s bytes" % (len(bytes_read), n))
def rewind(self, places):
'''Rewinds the current buffer to a position. Needed for reading varints,
because we might read bytes that belong to the stream after the varint.
'''
log.debug("Rewinding pos %d with %d places" % (self.pos, places))
self.pos -= places
log.debug("Reset buffer to pos %d" % self.pos)
def reset(self):
self.buffer = ""
self.pos = -1 # position of last byte read
@property
def buffer_length(self):
'''Returns the length of the current buffer.'''
return len(self.buffer)
class SocketRpcChannel(RpcChannel):
ERROR_BYTES = 18446744073709551615L
RPC_HEADER = "hrpc"
RPC_SERVICE_CLASS = 0x00
AUTH_PROTOCOL_NONE = 0x00
RPC_PROTOCOL_BUFFFER = 0x02
'''Socket implementation of an RpcChannel.
'''
def __init__(self, host, port, version, effective_user=None):
'''SocketRpcChannel to connect to a socket server on a user defined port.
It possible to define version and effective user for the communication.'''
self.host = host
self.port = port
self.sock = None
self.call_id = -3 # First time (when the connection context is sent, the call_id should be -3, otherwise start with 0 and increment)
self.version = version
self.client_id = str(uuid.uuid4())
self.effective_user = effective_user or pwd.getpwuid(os.getuid())[0]
def validate_request(self, request):
'''Validate the client request against the protocol file.'''
# Check the request is correctly initialized
if not request.IsInitialized():
raise Exception("Client request (%s) is missing mandatory fields" % type(request))
def get_connection(self, host, port):
'''Open a socket connection to a given host and port and writes the Hadoop header
The Hadoop RPC protocol looks like this when creating a connection:
+---------------------------------------------------------------------+
| Header, 4 bytes ("hrpc") |
+---------------------------------------------------------------------+
| Version, 1 byte (default verion 9) |
+---------------------------------------------------------------------+
| RPC service class, 1 byte (0x00) |
+---------------------------------------------------------------------+
| Auth protocol, 1 byte (Auth method None = 0) |
+---------------------------------------------------------------------+
| Length of the RpcRequestHeaderProto + length of the |
| of the IpcConnectionContextProto (4 bytes/32 bit int) |
+---------------------------------------------------------------------+
| Serialized delimited RpcRequestHeaderProto |
+---------------------------------------------------------------------+
| Serialized delimited IpcConnectionContextProto |
+---------------------------------------------------------------------+
'''
log.debug("############## CONNECTING ##############")
# Open socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sock.settimeout(10)
# Connect socket to server - defined by host and port arguments
self.sock.connect((host, port))
# Send RPC headers
self.write(self.RPC_HEADER) # header
self.write(struct.pack('B', self.version)) # version
self.write(struct.pack('B', self.RPC_SERVICE_CLASS)) # RPC service class
self.write(struct.pack('B', self.AUTH_PROTOCOL_NONE)) # serialization type (protobuf = 0)
rpc_header = self.create_rpc_request_header()
context = self.create_connection_context()
header_length = len(rpc_header) + encoder._VarintSize(len(rpc_header)) +len(context) + encoder._VarintSize(len(context))
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Header length: %s (%s)" % (header_length, format_bytes(struct.pack('!I', header_length))))
self.write(struct.pack('!I', header_length))
self.write_delimited(rpc_header)
self.write_delimited(context)
def write(self, data):
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Sending: %s", format_bytes(data))
self.sock.send(data)
def write_delimited(self, data):
self.write(encoder._VarintBytes(len(data)))
self.write(data)
def create_rpc_request_header(self):
'''Creates and serializes a delimited RpcRequestHeaderProto message.'''
rpcheader = RpcRequestHeaderProto()
rpcheader.rpcKind = 2 # rpcheaderproto.RpcKindProto.Value('RPC_PROTOCOL_BUFFER')
rpcheader.rpcOp = 0 # rpcheaderproto.RpcPayloadOperationProto.Value('RPC_FINAL_PACKET')
rpcheader.callId = self.call_id
rpcheader.retryCount = -1
rpcheader.clientId = self.client_id[0:16]
if self.call_id == -3:
self.call_id = 0
else:
self.call_id += 1
# Serialize delimited
s_rpcHeader = rpcheader.SerializeToString()
log_protobuf_message("RpcRequestHeaderProto (len: %d)" % (len(s_rpcHeader)), rpcheader)
return s_rpcHeader
def create_connection_context(self):
'''Creates and seriazlies a IpcConnectionContextProto (not delimited)'''
context = IpcConnectionContextProto()
context.userInfo.effectiveUser = self.effective_user
context.protocol = "org.apache.hadoop.hdfs.protocol.ClientProtocol"
s_context = context.SerializeToString()
log_protobuf_message("RequestContext (len: %d)" % len(s_context), context)
return s_context
def send_rpc_message(self, method, request):
'''Sends a Hadoop RPC request to the NameNode.
The IpcConnectionContextProto, RpcPayloadHeaderProto and HadoopRpcRequestProto
should already be serialized in the right way (delimited or not) before
they are passed in this method.
The Hadoop RPC protocol looks like this for sending requests:
When sending requests
+---------------------------------------------------------------------+
| Length of the next three parts (4 bytes/32 bit int) |
+---------------------------------------------------------------------+
| Delimited serialized RpcRequestHeaderProto (varint len + header) |
+---------------------------------------------------------------------+
| Delimited serialized RequestHeaderProto (varint len + header) |
+---------------------------------------------------------------------+
| Delimited serialized Request (varint len + request) |
+---------------------------------------------------------------------+
'''
log.debug("############## SENDING ##############")
#0. RpcRequestHeaderProto
rpc_request_header = self.create_rpc_request_header()
#1. RequestHeaderProto
request_header = self.create_request_header(method)
#2. Param
param = request.SerializeToString()
if log.getEffectiveLevel() == logging.DEBUG:
log_protobuf_message("Request", request)
rpc_message_length = len(rpc_request_header) + encoder._VarintSize(len(rpc_request_header)) + \
len(request_header) + encoder._VarintSize(len(request_header)) + \
len(param) + encoder._VarintSize(len(param))
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("RPC message length: %s (%s)" % (rpc_message_length, format_bytes(struct.pack('!I', rpc_message_length))))
self.write(struct.pack('!I', rpc_message_length))
self.write_delimited(rpc_request_header)
self.write_delimited(request_header)
self.write_delimited(param)
def create_request_header(self, method):
header = RequestHeaderProto()
header.methodName = method.name
header.declaringClassProtocolName = "org.apache.hadoop.hdfs.protocol.ClientProtocol"
header.clientProtocolVersion = 1
s_header = header.SerializeToString()
log_protobuf_message("RequestHeaderProto (len: %d)" % len(s_header), header)
return s_header
def recv_rpc_message(self):
'''Handle reading an RPC reply from the server. This is done by wrapping the
socket in a RcpBufferedReader that allows for rewinding of the buffer stream.
'''
log.debug("############## RECVING ##############")
byte_stream = RpcBufferedReader(self.sock)
return byte_stream
def get_length(self, byte_stream):
''' In Hadoop protobuf RPC, some parts of the stream are delimited with protobuf varint,
while others are delimited with 4 byte integers. This reads 4 bytes from the byte stream
and retruns the length of the delimited part that follows, by unpacking the 4 bytes
and returning the first element from a tuple. The tuple that is returned from struc.unpack()
only contains one element.
'''
length = struct.unpack("!i", byte_stream.read(4))[0]
log.debug("4 bytes delimited part length: %d" % length)
return length
def parse_response(self, byte_stream, response_class):
'''Parses a Hadoop RPC response.
The RpcResponseHeaderProto contains a status field that marks SUCCESS or ERROR.
The Hadoop RPC protocol looks like the diagram below for receiving SUCCESS requests.
+-----------------------------------------------------------+
| Length of the RPC resonse (4 bytes/32 bit int) |
+-----------------------------------------------------------+
| Delimited serialized RpcResponseHeaderProto |
+-----------------------------------------------------------+
| Serialized delimited RPC response |
+-----------------------------------------------------------+
In case of an error, the header status is set to ERROR and the error fields are set.
'''
log.debug("############## PARSING ##############")
log.debug("Payload class: %s" % response_class)
# Read first 4 bytes to get the total length
len_bytes = byte_stream.read(4)
total_length = struct.unpack("!I", len_bytes)[0]
log.debug("Total response length: %s" % total_length)
header = RpcResponseHeaderProto()
(header_len, header_bytes) = get_delimited_message_bytes(byte_stream)
log.debug("Header read %d" % header_len)
header.ParseFromString(header_bytes)
log_protobuf_message("RpcResponseHeaderProto", header)
if header.status == 0:
log.debug("header: %s, total: %s" % (header_len, total_length))
if header_len >= total_length:
return
response = response_class()
response_bytes = get_delimited_message_bytes(byte_stream, total_length - header_len)[1]
if len(response_bytes) > 0:
response.ParseFromString(response_bytes)
if log.getEffectiveLevel() == logging.DEBUG:
log_protobuf_message("Response", response)
return response
else:
self.handle_error(header)
def handle_error(self, header):
raise RequestError("\n".join([header.exceptionClassName, header.errorMsg]))
def close_socket(self):
'''Closes the socket and resets the channel.'''
log.debug("Closing socket")
if self.sock:
try:
self.sock.close()
except:
pass
self.sock = None
def CallMethod(self, method, controller, request, response_class, done):
'''Call the RPC method. The naming doesn't confirm PEP8, since it's
a method called by protobuf
'''
try:
self.validate_request(request)
if not self.sock:
self.get_connection(self.host, self.port)
self.send_rpc_message(method, request)
byte_stream = self.recv_rpc_message()
return self.parse_response(byte_stream, response_class)
except RequestError: # Raise a request error, but don't close the socket
raise
except Exception: # All other errors close the socket
self.close_socket()
raise
class DataXceiverChannel(object):
# For internal reading: should be larger than bytes_per_chunk
LOAD_SIZE = 16000.0
# Op codes
WRITE_BLOCK = 80
READ_BLOCK = 81
READ_METADATA = 82
REPLACE_BLOCK = 83
COPY_BLOCK = 84
BLOCK_CHECKSUM = 85
TRANSFER_BLOCK = 86
# Checksum types
CHECKSUM_NULL = 0
CHECKSUM_CRC32 = 1
CHECKSUM_CRC32C = 2
CHECKSUM_DEFAULT = 3
CHECKSUM_MIXED = 4
MAX_READ_ATTEMPTS = 100
def __init__(self, host, port):
self.host, self.port = host, port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect(self):
try:
self.sock.connect((self.host, self.port))
log.debug("%s connected to DataNode" % self)
return True
except Exception:
log.debug("%s connection to DataNode failed" % self)
return False
def _close_socket(self):
self.sock.close()
def _read_bytes(self, n, depth=0):
if depth > self.MAX_READ_ATTEMPTS:
raise Exception("Tried to read %d more bytes, but failed after %d attempts" % (n, self.MAX_READ_ATTEMPTS))
bytes = self.sock.recv(n)
if len(bytes) < n:
left = n - len(bytes)
depth += 1
bytes += self._read_bytes(left, depth)
return bytes
def write(self, data):
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Sending: %s", format_bytes(data))
self.sock.send(data)
def write_delimited(self, data):
self.write(encoder._VarintBytes(len(data)))
self.write(data)
def readBlock(self, length, pool_id, block_id, generation_stamp, offset, block_token, check_crc):
'''Send a read request to given block. If we receive a successful response,
we start reading packets.
Send read request:
+---------------------------------------------------------------------+
| Data Transfer Protocol Version, 2 bytes |
+---------------------------------------------------------------------+
| Op code, 1 byte (READ_BLOCK = 81) |
+---------------------------------------------------------------------+
| Delimited serialized OpReadBlockProto (varint len + request) |
+---------------------------------------------------------------------+
Receive response:
+---------------------------------------------------------------------+
| Delimited BlockOpResponseProto (varint len + response) |
+---------------------------------------------------------------------+
Start reading packets. Each packet has the following structure:
+---------------------------------------------------------------------+
| Packet length (4 bytes/32 bit int) |
+---------------------------------------------------------------------+
| Serialized size of header, 2 bytes |
+---------------------------------------------------------------------+
| Packet Header Proto |
+---------------------------------------------------------------------+
| x checksums, 4 bytes each |
+---------------------------------------------------------------------+
| x chunks of payload data |
+---------------------------------------------------------------------+
'''
log.debug("%s sending readBlock request" % self)
# Send version and opcode
self.sock.send(struct.pack('>h', 28))
self.sock.send(struct.pack('b', self.READ_BLOCK))
length = length - offset
# Create and send OpReadBlockProto message
request = OpReadBlockProto()
request.offset = offset
request.len = length
header = request.header
header.clientName = "snakebite"
base_header = header.baseHeader
# TokenProto
token = base_header.token
token.identifier = block_token.identifier
token.password = block_token.password
token.kind = block_token.kind
token.service = block_token.service
# ExtendedBlockProto
block = base_header.block
block.poolId = pool_id
block.blockId = block_id
block.generationStamp = generation_stamp
s_request = request.SerializeToString()
log_protobuf_message("OpReadBlockProto:", request)
self.write_delimited(s_request)
byte_stream = RpcBufferedReader(self.sock)
block_op_response_bytes = get_delimited_message_bytes(byte_stream)[1]
block_op_response = BlockOpResponseProto()
block_op_response.ParseFromString(block_op_response_bytes)
log_protobuf_message("BlockOpResponseProto", block_op_response)
checksum_type = block_op_response.readOpChecksumInfo.checksum.type
bytes_per_chunk = block_op_response.readOpChecksumInfo.checksum.bytesPerChecksum
log.debug("Checksum type: %s, bytesPerChecksum: %s" % (checksum_type, bytes_per_chunk))
if checksum_type in [self.CHECKSUM_CRC32C, self.CHECKSUM_CRC32]:
checksum_len = 4
else:
raise Exception("Checksum type %s not implemented" % checksum_type)
total_read = 0
if block_op_response.status == 0: # datatransfer_proto.Status.Value('SUCCESS')
while total_read < length:
log.debug("== Reading next packet")
packet_len = struct.unpack("!I", byte_stream.read(4))[0]
log.debug("Packet length: %s", packet_len)
serialized_size = struct.unpack("!H", byte_stream.read(2))[0]
log.debug("Serialized size: %s", serialized_size)
packet_header_bytes = byte_stream.read(serialized_size)
packet_header = PacketHeaderProto()
packet_header.ParseFromString(packet_header_bytes)
log_protobuf_message("PacketHeaderProto", packet_header)
data_len = packet_header.dataLen
chunks_per_packet = int((data_len + bytes_per_chunk - 1) / bytes_per_chunk)
log.debug("Nr of chunks: %d", chunks_per_packet)
data_len = packet_len - 4 - chunks_per_packet * checksum_len
log.debug("Payload len: %d", data_len)
byte_stream.reset()
# Collect checksums
if check_crc:
checksums = []
for _ in xrange(0, chunks_per_packet):
checksum = self._read_bytes(checksum_len)
checksum = struct.unpack("!I", checksum)[0]
checksums.append(checksum)
else:
self._read_bytes(checksum_len * chunks_per_packet)
# We use a fixed size buffer (a "load") to read only a couple of chunks at once.
bytes_per_load = self.LOAD_SIZE - (self.LOAD_SIZE % bytes_per_chunk)
chunks_per_load = int(bytes_per_load / bytes_per_chunk)
loads_per_packet = int(math.ceil(bytes_per_chunk * chunks_per_packet / bytes_per_load))
read_on_packet = 0
for i in range(loads_per_packet):
load = ''
for j in range(chunks_per_load):
log.debug("Reading chunk %s in load %s:", j, i)
bytes_to_read = min(bytes_per_chunk, data_len - read_on_packet)
chunk = self._read_bytes(bytes_to_read)
if check_crc:
checksum_index = i * chunks_per_load + j
if checksum_index < len(checksums) and crc(chunk) != checksums[checksum_index]:
raise Exception("Checksum doesn't match")
load += chunk
total_read += len(chunk)
read_on_packet += len(chunk)
yield load
# Send ClientReadStatusProto message confirming successful read
request = ClientReadStatusProto()
request.status = 0 # SUCCESS
log_protobuf_message("ClientReadStatusProto:", request)
s_request = request.SerializeToString()
self.write_delimited(s_request)
self._close_socket()
def __repr__(self):
return "DataXceiverChannel<%s:%d>" % (self.host, self.port)
|
dgoldin/snakebite
|
snakebite/channel.py
|
Python
|
apache-2.0
| 26,513 | 0.002565 |
# Copyright 2022 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for providing data to Mesh TF transformer."""
import functools
from absl import logging
import gin
import mesh_tensorflow.transformer.dataset as transformer_dataset
import t5.data
from t5.models import utils as model_utils
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
DEPRECATED_GIN_REFERENCES = (
"configurable_vocabulary",
"get_sentencepiece_model_path",
"maybe_print_dataset",
"num_parallel_calls",
"SentencePieceVocabulary",
"t5.data.sentencepiece_vocabulary.SentencePieceVocabulary",
"t5.models.mesh_transformer.get_sentencepiece_model_path",
"train_model",
"vocabularies.Vocabulary",
"Vocabulary",
)
@gin.configurable()
def mesh_train_dataset_fn(
mixture_or_task_name,
sequence_length,
vocabulary=None,
dataset_split=tfds.Split.TRAIN,
shuffle=True,
seed=None,
use_cached=False,
pack=True):
"""Returns the tf.data.Dataset for training on a given mixture.
This uses the format required for utils.run's `train_dataset_fn` argument in
the Mesh TF transformer standalone.
Args:
mixture_or_task_name: string, an identifier for a Mixture or Task in the
appropriate registry. Must be specified via gin.
sequence_length: dict mapping feature key to the int length for that feature
the max sequence length.
vocabulary: unused argument, maintains compatibility with other dataset_fns.
dataset_split: string, which split of the dataset to load. In most cases
this should be "train".
shuffle: Whether or not to shuffle dataset.
seed: tf.int64 scalar tf.Tensor (or None). Used for both the global seed and
shuffle seed for tf.data
use_cached: bool, whether to load the cached version of this dataset.
pack: bool, whether to pack the dataset.
Returns:
A tf.data.Dataset of preprocessed, tokenized, and batched examples.
"""
del vocabulary
mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name)
ds = mixture_or_task.get_dataset(
sequence_length, split=dataset_split, use_cached=use_cached,
shuffle=shuffle, num_epochs=None, seed=seed)
# Select just the output features which are present in the dataset.
feature_keys = tuple(k for k in mixture_or_task.output_features
if k in tf.data.get_output_shapes(ds))
# Filtering feature keys is done in pack_or_pad function. However, when
# packing is turned off, input_features aren't filtered leading to training
# problems due to strings showing up in the input example. Filtering features
# ensures that we don't rely on pack_or_pad to filter features for training.
def _filter_features(ex):
return {k: ex[k] for k in feature_keys}
ds = ds.map(
_filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE)
eos_keys = set(
k for k, f in mixture_or_task.output_features.items() if f.add_eos)
ds = transformer_dataset.pack_or_pad(
ds, sequence_length, pack=pack,
feature_keys=feature_keys, ensure_eos=eos_keys)
return ds
@gin.configurable()
def mesh_inference_dataset_fn(
mixture_or_task_name,
sequence_length,
dataset_split,
shuffle=False,
seed=None,
vocabulary=None,
num_inference_examples=-1,
use_cached=False,
priming_sequence_length=None):
"""Returns all tf.data.Datasets for LM inference on a given mixture.
For Tasks without inputs (such as language modeling), the first
`priming_sequence_length` tokens in the target are used as the "inputs" for
inference.
Args:
mixture_or_task_name: string, an identifier for a Mixture or Task in the
appropriate registry. Must be specified via gin.
sequence_length: dict mapping feature key to the int length for that feature
the max sequence length. If set to None, packing and padding will be
disabled.
dataset_split: string, which split of the dataset to load. NOTE, this
function does NOT receive the split specified in utils.run. It needs to be
specified separately.
shuffle: Whether or not to shuffle dataset.
seed: tf.int64 scalar tf.Tensor (or None). Used as shuffle seed for tf.data.
vocabulary: unused argument, maintains compatibility with other dataaset_fns
num_inference_examples: maximum number of examples per task to do inference
on. If None or less than 0, use all examples.
use_cached: bool, whether to load the cached version of this dataset.
evals but should not be used for iterative decoding.
priming_sequence_length: If the Task only has "targets", select the first
this many tokens from each target sequence to use as "inputs". This is
useful for decoder-only language models where you would like to use a
portion of the targets as a priming sequence for generation.
Returns:
A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples.
"""
del vocabulary
mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name)
def _split_targets_for_primed_inference(ex):
ex["inputs"] = ex["targets"][:priming_sequence_length]
ex["targets"] = ex["targets"][priming_sequence_length:]
ex["inputs"] = tf.pad(
ex["inputs"],
[[0, priming_sequence_length - tf.shape(ex["inputs"])[0]]], "CONSTANT")
ex["inputs"] = tf.reshape(ex["inputs"], shape=(priming_sequence_length,))
return ex
def _prepare_for_unprimed_inference(ex):
ex["inputs"] = tf.constant([], dtype=tf.int64)
return ex
def _get_dataset_for_single_task(task, sequence_length):
"""Get a tensorflow.data.Dataset for the provided task."""
ds = task.get_dataset(
sequence_length, split=dataset_split, use_cached=use_cached,
shuffle=shuffle, seed=seed)
if "inputs" not in ds.element_spec:
if not priming_sequence_length or priming_sequence_length <= 0:
logging.warning("Priming sequence length not specified so priming "
"with the empty string.")
ds = ds.map(_prepare_for_unprimed_inference)
else:
logging.info("Using the first %d tokens of each target as input.",
priming_sequence_length)
ds = ds.map(_split_targets_for_primed_inference)
elif priming_sequence_length is not None:
raise ValueError(
"Setting a priming sequence length only makes sense for decoder-only "
"Tasks, which have `targets` but no `inputs`.")
eos_keys = set(
k for k, f in mixture_or_task.output_features.items() if f.add_eos)
logging.info(
"Padding '%s' with sequence lengths: %s", task.name, sequence_length)
ds = transformer_dataset.pack_or_pad(
ds,
sequence_length,
pack=False,
feature_keys=tuple(task.output_features),
ensure_eos=eos_keys)
if num_inference_examples is not None and num_inference_examples >= 0:
ds = ds.take(num_inference_examples)
return ds
outputs = []
for task in t5.data.get_subtasks(mixture_or_task):
if dataset_split not in task.splits:
logging.info("Task %s has no '%s' split, skipping inference.",
task.name, dataset_split)
continue
outputs.append(
transformer_dataset.EvalDataset(
task.name,
functools.partial(
_get_dataset_for_single_task,
task=task,
sequence_length=sequence_length),
task.postprocess_fn,
task.metric_fns,
)
)
if not outputs:
logging.warning("No %s data found for %s.",
dataset_split, mixture_or_task_name)
return outputs
@gin.configurable()
def mesh_eval_dataset_fn(
mixture_or_task_name,
sequence_length,
dataset_split,
vocabulary=None,
num_eval_examples=-1,
use_cached=False,
pack=False,
shuffle_eval_examples=False,
seed=None):
"""Returns all tf.data.Datasets for evaluation on a given mixture.
This uses the format required for utils.run's `eval_dataset_fn` argument in
the Mesh TF transformer standalone.
Args:
mixture_or_task_name: string, an identifier for a Mixture or Task in the
appropriate registry. Must be specified via gin.
sequence_length: dict mapping feature key to the int length for that feature
the max sequence length. If set to None, packing and padding will be
disabled.
dataset_split: string, which split of the dataset to load.
vocabulary: unused argument, maintains compatibility with other dataaset_fns
num_eval_examples: maximum number of examples per task to use for continuous
eval. If None or less than 0, use all examples.
use_cached: bool, whether to load the cached version of this dataset.
pack: a boolean, whether to pack examples. This is useful for perplexity
evals but should not be used for iterative decoding.
shuffle_eval_examples: boolean, whether to shuffle eval examples, applied
only when num_eval_examples is not None. Intended to be able to eval on a
different eval slice at every iteration.
seed: tf.int64 scalar tf.Tensor (or None). Used for both the global seed and
shuffle seed for tf.data
Returns:
A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples.
"""
del vocabulary
mixture_or_task = t5.data.get_mixture_or_task(mixture_or_task_name)
def _get_dataset_for_single_task(task, sequence_length):
"""Get a tensorflow.data.Dataset for the provided task."""
if shuffle_eval_examples and seed is None:
logging.warning(("shuffle_seed_examples is true but no seed was ",
"provided. Using a random seed."))
ds = task.get_dataset(
sequence_length, split=dataset_split,
use_cached=use_cached, shuffle=shuffle_eval_examples, seed=seed,
)
eos_keys = set(
k for k, f in mixture_or_task.output_features.items() if f.add_eos)
if sequence_length is None:
logging.info(
"Skipping packing/padding for '%s' since sequence length is None.",
task.name)
else:
logging.info(
"%sing '%s' with sequence lengths: %s",
"Pack" if pack else "Padd", task.name, sequence_length)
ds = transformer_dataset.pack_or_pad(
ds,
sequence_length,
pack=pack,
feature_keys=tuple(task.output_features),
ensure_eos=eos_keys)
if num_eval_examples is not None and num_eval_examples >= 0:
ds = ds.take(num_eval_examples)
return ds
outputs = []
for task in t5.data.get_subtasks(mixture_or_task):
if dataset_split not in task.splits:
logging.info(
"Task %s has no '%s' split, skipping eval.", task.name, dataset_split
)
continue
outputs.append(
transformer_dataset.EvalDataset(
task.name,
functools.partial(
_get_dataset_for_single_task,
task=task,
sequence_length=sequence_length),
task.postprocess_fn,
task.metric_fns,
)
)
if not outputs:
logging.warning("No %s data found for %s.",
dataset_split, mixture_or_task_name)
return outputs
@gin.configurable()
def tsv_dataset_fn(
filename,
sequence_length,
dataset_split,
vocabulary,
shuffle_buffer_size=10000):
r"""Returns a dataset based on a TSV file formatted as `<input>\t<target>`."""
# Currently `tf.gfile.glob` is broken on GCS, so we only read a file or
# list of files.
return transformer_dataset.packed_parallel_tsv_dataset(
dataset=tf.data.TextLineDataset(filename).shuffle(shuffle_buffer_size),
sequence_length=sequence_length,
vocabulary=vocabulary,
dataset_split=dataset_split,
append_eos=True,
eos_id=1)
@gin.configurable()
def get_vocabulary(mixture_or_task_name=None):
"""Get the appropriate value for the utils.run.vocabulary argument.
Args:
mixture_or_task_name: string, an identifier for a Mixture or Task in the
appropriate registry. Must be specified via gin.
Returns:
Either a single t5.data.vocabularies.Vocabulary or a tuple of
t5.data.vocabularies.Vocabulary for inputs and targets.
"""
return model_utils.get_vocabulary(mixture_or_task_name)
|
google-research/text-to-text-transfer-transformer
|
t5/models/mesh_transformer.py
|
Python
|
apache-2.0
| 12,873 | 0.00536 |
"""
This module contains the global configurations of the framework
"""
#: This name suppose to be used for general meta decoration for functions,
#: methods or even classes
meta_field = '_pyrsmeta'
|
palankai/pyrs
|
pyrs/conf.py
|
Python
|
mit
| 200 | 0 |
import feedparser
import logging
from rss import sources
from util import date, dict_tool, tags
log = logging.getLogger('app')
def parse_feed_by_name(name):
feed_params = sources.get_source(name)
if not feed_params:
raise ValueError('There is no feed with name %s' % name)
source_name = feed_params['name']
feed = feedparser.parse(feed_params['url'])
data = []
for entry in feed['entries']:
data.append(
create_doc(
source_name, feed, entry,
feed_params.get('tags', ()),
feed_params.get('author_name'),
feed_params.get('author_link'),
feed_params.get('dressing_params'),
)
)
log.info('%s: got %d documents', source_name, len(data))
return data
def create_doc(source_name, feed, entry, additional_tags, default_author_name, default_author_link, dressing_params):
link = dict_tool.get_alternative(entry, 'feedburner_origlink', 'link', assert_val=True)
published = date.utc_format(
dict_tool.get_alternative(entry, 'published', 'updated', assert_val=True)
)
description = dict_tool.get_alternative(entry, 'summary', 'description', 'title', assert_val=True)
picture = dict_tool.get_deep(entry, 'gd_image', 'src')
text = dict_tool.get_deep(entry, 'content', 0, 'value')
author_name = handle_default_param(
entry,
dict_tool.get_deep(entry, 'authors', 0, 'name'),
default_author_name
)
author_link = handle_default_param(
entry,
dict_tool.get_deep(entry, 'authors', 0, 'href'),
default_author_link
)
entry_tags = []
for tag in entry.get('tags', []):
tag_text = dict_tool.get_alternative(tag, 'term', 'label')
if tag_text:
entry_tags.append(tag_text.lower())
additional_tags += tuple(entry_tags)
comments_count = entry.get('slash_comments')
if comments_count is not None:
comments_count = int(comments_count)
return {
'link': link,
'title': entry['title'],
'published': published,
'picture': picture,
'author_name': author_name,
'author_link': author_link,
'description': description,
'text': text,
'source_name': source_name,
'source_type': 'rss',
'source_title': feed['feed']['title'],
'source_link': feed['feed']['link'],
'comments_count': comments_count,
'tags': tags.create_tags_list(*additional_tags),
'__dressing_params': dressing_params,
}
def handle_default_param(entry, val, default_val):
if callable(default_val):
return default_val(entry, val)
return val or default_val
|
andre487/news487
|
collector/rss/reader.py
|
Python
|
mit
| 2,757 | 0.001451 |
#!/usr/bin/python3
"""
Merges raw data from geneteka into larger json files.
"""
from collections import defaultdict
import html
import json
import os
import re
INPUT_DIR = 'data_raw'
OUTPUT_DIR = 'data'
def extractNotes(value):
match = re.search(r'i.png" title="([^"]*)"', value)
if match:
return (value.split('<', 1)[0].strip(), match.group(1))
return (value.strip(), None)
def convertPersonRecord(record):
# Unparsed column with various information.
stuff = record[9]
lastName, lastNameNotes = extractNotes(record[3])
motherLastName, motherLastNameNotes = extractNotes(record[6])
output = {
'year': record[0].strip(),
'record_number': record[1].strip(),
'first_name': record[2].strip(),
'last_name': lastName,
'father_first_name': record[4].strip(),
'mother_first_name': record[5].strip(),
'mother_last_name': motherLastName,
'parish': record[7].strip(),
'place': record[8].strip(),
'stuff': stuff,
}
# Last name notes.
if lastNameNotes:
output['last_name_notes'] = lastNameNotes
if motherLastNameNotes:
output['mother_last_name_notes'] = motherLastNameNotes
# List of notes.
match = re.search(r'i.png" title="([^"]*)"', stuff)
if match:
output['notes'] = html.unescape(match.group(1)).strip().split('\r')
# Where archives are kept.
match = re.search(r'z.png" title="([^"]*)"', stuff)
if match:
output['archives'] = html.unescape(match.group(1)).strip()
# URL to the place the archives are kept.
match = re.search(r'href="([^"]*)" target', stuff)
if match:
output['archives_url'] = match.group(1)
# URL to metryki.genealodzy.pl where scans can be found.
match = re.search(r'href="([^"]*)">[^>]*s.png', stuff)
if match:
output['metryki_url'] = html.unescape(match.group(1))
# User that entered this record to the database.
match = re.search(r'uname=([^"]*)"', stuff)
if match:
output['user_entered'] = match.group(1)
return output
def convertMarriageRecord(record):
# Unparsed column with various information.
stuff = record[9]
husbandLastName, husbandLastNameNotes = extractNotes(record[3])
wifeLastName, wifeLastNameNotes = extractNotes(record[6])
output = {
'year': record[0].strip(),
'record_number': record[1].strip(),
'husband_first_name': record[2].strip(),
'husband_last_name': husbandLastName,
'husband_parents': record[4].strip(),
'wife_first_name': record[5].strip(),
'wife_last_name': wifeLastName,
'wife_parents': record[7].strip(),
'parish': record[8].strip(),
'stuff': stuff,
}
# Last name notes.
if husbandLastNameNotes:
output['nazwisko_meza_uwagi'] = husbandLastNameNotes
if wifeLastNameNotes:
output['nazwisko_zony_uwagi'] = wifeLastNameNotes
# List of notes.
match = re.search(r'i.png" title="([^"]*)"', stuff)
if match:
output['notes'] = html.unescape(match.group(1)).strip().split('\r')
# Where archives are kept.
match = re.search(r'z.png" title="([^"]*)"', stuff)
if match:
output['archives'] = html.unescape(match.group(1)).strip()
# URL to the place the archives are kept.
match = re.search(r'href="([^"]*)" target', stuff)
if match:
output['archives_url'] = match.group(1)
# URL to metryki.genealodzy.pl where scans can be found.
match = re.search(r'href="([^"]*)">[^>]*s.png', stuff)
if match:
output['metryki_url'] = match.group(1)
# User that entered this record to the database.
match = re.search(r'uname=([^"]*)"', stuff)
if match:
output['user_entered'] = match.group(1)
return output
def main():
# Map from prefix to list of records.
data = defaultdict(list)
# Read all files from INPUT_DIR.
for fileName in os.listdir(INPUT_DIR):
prefix = re.search('[^_]+_._[^_]+', fileName).group(0)
with open(os.path.join(INPUT_DIR, fileName)) as file:
content = json.load(file)
data[prefix] += content['data']
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
# Parse records and write one parish per file.
for key, value in data.items():
voivodeship, recordType, parishId = key.split('_')
if recordType == 'S':
converter = convertMarriageRecord
else:
converter = convertPersonRecord
value[:] = [converter(x) for x in value]
print("Writing %s" % key)
metadata = {
'voivodeship': voivodeship,
'record_type': recordType,
'parish_id': parishId,
}
outputFile = os.path.join(OUTPUT_DIR, key + '.json')
with open(outputFile, 'w') as file:
outputData = {
'data': value,
'metadata': metadata,
}
json.dump(outputData, file)
if __name__ == "__main__":
main()
|
PeWu/python-geneteka
|
merge.py
|
Python
|
apache-2.0
| 4,704 | 0.013818 |
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from pygments.lexers import PythonLexer, BashLexer
from pygments.lexer import bygroups, using
from pygments.token import Keyword, Operator, Name, Text
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class IPythonLexer(PythonLexer):
name = 'IPython'
aliases = ['ip', 'ipython']
filenames = ['*.ipy']
tokens = PythonLexer.tokens.copy()
tokens['root'] = [
(r'(\%+)(\w+)\s+(\.*)(\n)', bygroups(Operator, Keyword, using(BashLexer), Text)),
(r'(\%+)(\w+)\b', bygroups(Operator, Keyword)),
(r'^(!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
] + tokens['root']
|
Carreau/gistpynb
|
nbconvert/lexers.py
|
Python
|
apache-2.0
| 904 | 0.006637 |
# j4cDAC test code
#
# Copyright 2011 Jacob Potter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import socket
import time
import struct
def pack_point(x, y, r, g, b, i = -1, u1 = 0, u2 = 0, flags = 0):
"""Pack some color values into a struct dac_point.
Values must be specified for x, y, r, g, and b. If a value is not
passed in for the other fields, i will default to max(r, g, b); the
rest default to zero.
"""
if i < 0:
i = max(r, g, b)
return struct.pack("<HhhHHHHHH", flags, x, y, r, g, b, i, u1, u2)
class ProtocolError(Exception):
"""Exception used when a protocol error is detected."""
pass
class Status(object):
"""Represents a status response from the DAC."""
def __init__(self, data):
"""Initialize from a chunk of data."""
self.protocol_version, self.le_state, self.playback_state, \
self.source, self.le_flags, self.playback_flags, \
self.source_flags, self.fullness, self.point_rate, \
self.point_count = \
struct.unpack("<BBBBHHHHII", data)
def dump(self, prefix = " - "):
"""Dump to a string."""
lines = [
"Light engine: state %d, flags 0x%x" %
(self.le_state, self.le_flags),
"Playback: state %d, flags 0x%x" %
(self.playback_state, self.playback_flags),
"Buffer: %d points" %
(self.fullness, ),
"Playback: %d kpps, %d points played" %
(self.point_rate, self.point_count),
"Source: %d, flags 0x%x" %
(self.source, self.source_flags)
]
for l in lines:
print prefix + l
class BroadcastPacket(object):
"""Represents a broadcast packet from the DAC."""
def __init__(self, st):
"""Initialize from a chunk of data."""
self.mac = st[:6]
self.hw_rev, self.sw_rev, self.buffer_capacity, \
self.max_point_rate = struct.unpack("<HHHI", st[6:16])
self.status = Status(st[16:36])
def dump(self, prefix = " - "):
"""Dump to a string."""
lines = [
"MAC: " + ":".join(
"%02x" % (ord(o), ) for o in self.mac),
"HW %d, SW %d" %
(self.hw_rev, self.sw_rev),
"Capabilities: max %d points, %d kpps" %
(self.buffer_capacity, self.max_point_rate)
]
for l in lines:
print prefix + l
#self.status.dump(prefix)
class DAC(object):
"""A connection to a DAC."""
def read(self, l):
"""Read exactly length bytes from the connection."""
while l > len(self.buf):
self.buf += self.conn.recv(4096)
obuf = self.buf
self.buf = obuf[l:]
return obuf[:l]
def readresp(self, cmd):
"""Read a response from the DAC."""
data = self.read(22)
response = data[0]
cmdR = data[1]
status = Status(data[2:])
# status.dump()
if cmdR != cmd:
raise ProtocolError("expected resp for %r, got %r"
% (cmd, cmdR))
if response != "a":
raise ProtocolError("expected ACK, got %r"
% (response, ))
self.last_status = status
return status
def __init__(self, host, port = 7765):
"""Connect to the DAC over TCP."""
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((host, port))
#print "Connected to %s:%s" % (host, port)
self.conn = conn
self.buf = ""
# Read the "hello" message
first_status = self.readresp("?")
#first_status.dump()
def begin(self, lwm, rate):
cmd = struct.pack("<cHI", "b", lwm, rate)
self.conn.sendall(cmd)
return self.readresp("b")
def update(self, lwm, rate):
cmd = struct.pack("<cHI", "u", lwm, rate)
self.conn.sendall(cmd)
return self.readresp("u")
def encode_point(self, point):
try:
return pack_point(*point)
except Exception as e:
##print "Exception"
#print point
raise e
def write(self, points):
epoints = map(self.encode_point, points)
cmd = struct.pack("<cH", "d", len(epoints))
self.conn.sendall(cmd + "".join(epoints))
return self.readresp("d")
def prepare(self):
self.conn.sendall("p")
return self.readresp("p")
def stop(self):
self.conn.sendall("s")
return self.readresp("s")
def estop(self):
self.conn.sendall("\xFF")
return self.readresp("\xFF")
def clear_estop(self):
self.conn.sendall("c")
return self.readresp("c")
def ping(self):
self.conn.sendall("?")
return self.readresp("?")
def play_stream(self, stream):
# First, prepare the stream
if self.last_status.playback_state == 2:
raise Exception("already playing?!")
elif self.last_status.playback_state == 0:
self.prepare()
started = 0
while True:
# How much room?
cap = 1799 - self.last_status.fullness
points = stream.read(cap)
if cap < 100:
time.sleep(0.005)
cap += 150
# print "Writing %d points" % (cap, )
t0 = time.time()
self.write(points)
t1 = time.time()
# print "Took %f" % (t1 - t0, )
if not started:
self.begin(0, 30000)
started = 1
def find_dac():
"""Listen for broadcast packets."""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", 7654))
while True:
data, addr = s.recvfrom(1024)
bp = BroadcastPacket(data)
#print "Packet from %s: " % (addr, )
#bp.dump()
def find_first_dac():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", 7654))
data, addr = s.recvfrom(1024)
bp = BroadcastPacket(data)
#print "Packet from %s: " % (addr, )
return addr[0]
|
lightengine/lightstream
|
lightstream/oldlib/dac.py
|
Python
|
bsd-2-clause
| 5,679 | 0.035217 |
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect
from django.utils.translation import gettext as _
from zerver.decorator import require_realm_admin
from zerver.lib.actions import do_change_icon_source
from zerver.lib.exceptions import JsonableError
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.response import json_success
from zerver.lib.upload import upload_icon_image
from zerver.lib.url_encoding import append_url_query_string
from zerver.models import UserProfile
@require_realm_admin
def upload_icon(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
if len(request.FILES) != 1:
raise JsonableError(_("You must upload exactly one icon."))
icon_file = list(request.FILES.values())[0]
if (settings.MAX_ICON_FILE_SIZE_MIB * 1024 * 1024) < icon_file.size:
raise JsonableError(
_("Uploaded file is larger than the allowed limit of {} MiB").format(
settings.MAX_ICON_FILE_SIZE_MIB,
)
)
upload_icon_image(icon_file, user_profile)
do_change_icon_source(
user_profile.realm, user_profile.realm.ICON_UPLOADED, acting_user=user_profile
)
icon_url = realm_icon_url(user_profile.realm)
json_result = dict(
icon_url=icon_url,
)
return json_success(json_result)
@require_realm_admin
def delete_icon_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
# We don't actually delete the icon because it might still
# be needed if the URL was cached and it is rewritten
# in any case after next update.
do_change_icon_source(
user_profile.realm, user_profile.realm.ICON_FROM_GRAVATAR, acting_user=user_profile
)
gravatar_url = realm_icon_url(user_profile.realm)
json_result = dict(
icon_url=gravatar_url,
)
return json_success(json_result)
def get_icon_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
url = realm_icon_url(user_profile.realm)
# We can rely on the URL already having query parameters. Because
# our templates depend on being able to use the ampersand to
# add query parameters to our url, get_icon_url does '?version=version_number'
# hacks to prevent us from having to jump through decode/encode hoops.
url = append_url_query_string(url, request.META["QUERY_STRING"])
return redirect(url)
|
eeshangarg/zulip
|
zerver/views/realm_icon.py
|
Python
|
apache-2.0
| 2,456 | 0.00285 |
# coding: utf-8
from app.api_client.error import HTTPError
from app.helpers.login_helpers import generate_buyer_creation_token
from dmapiclient.audit import AuditTypes
from dmutils.email import generate_token, EmailError
from dmutils.forms import FakeCsrf
from ...helpers import BaseApplicationTest
from lxml import html
import mock
import pytest
from flask import session
import flask_featureflags as feature
EMAIL_SENT_MESSAGE = "send a link"
USER_CREATION_EMAIL_ERROR = "Failed to send user creation email."
PASSWORD_RESET_EMAIL_ERROR = "Failed to send password reset."
TOKEN_CREATED_BEFORE_PASSWORD_LAST_CHANGED_ERROR = "This password reset link is invalid."
USER_LINK_EXPIRED_ERROR = "The link you used to create an account may have expired."
def has_validation_errors(data, field_name):
document = html.fromstring(data)
form_field = document.xpath('//input[@name="{}"]'.format(field_name))
return 'invalid' in form_field[0].classes or 'invalid' in form_field[0].getparent().classes
class TestLogin(BaseApplicationTest):
def setup(self):
super(TestLogin, self).setup()
data_api_client_config = {'authenticate_user.return_value': self.user(
123, "email@email.com", 1234, 'name', 'name'
)}
self._data_api_client = mock.patch(
'app.main.views.login.data_api_client', **data_api_client_config
)
self.data_api_client_mock = self._data_api_client.start()
def teardown(self):
self._data_api_client.stop()
def test_should_show_login_page(self):
res = self.client.get(self.expand_path('/login'))
assert res.status_code == 200
assert 'private' in res.headers['Cache-Control']
assert "Sign in to the Marketplace" in res.get_data(as_text=True)
@mock.patch('app.main.views.login.data_api_client')
def test_redirect_on_buyer_login(self, data_api_client):
with self.app.app_context():
data_api_client.authenticate_user.return_value = self.user(123, "email@email.com", None, None, 'Name')
data_api_client.get_user.return_value = self.user(123, "email@email.com", None, None, 'Name')
res = self.client.post(self.url_for('main.process_login'), data={
'email_address': 'valid@email.com',
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
})
assert res.status_code == 302
assert res.location == 'http://localhost/2/buyer-dashboard'
assert 'Secure;' in res.headers['Set-Cookie']
@mock.patch('app.main.views.login.data_api_client')
def test_redirect_on_supplier_login(self, data_api_client):
with self.app.app_context():
data_api_client.authenticate_user.return_value = self.user(
123,
'email@email.com',
None,
None,
'Name',
role='supplier'
)
data_api_client.get_user.return_value = self.user(
123,
'email@email.com',
None,
None,
'Name',
role='supplier'
)
res = self.client.post(self.url_for('main.process_login'), data={
'email_address': 'valid@email.com',
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
})
assert res.status_code == 302
assert res.location == 'http://localhost' + \
self.expand_path('/2/seller-dashboard')
assert 'Secure;' in res.headers['Set-Cookie']
def test_should_redirect_logged_in_buyer(self):
self.login_as_buyer()
res = self.client.get(self.url_for('main.render_login'))
assert res.status_code == 302
assert res.location == 'http://localhost/2/buyer-dashboard'
def test_should_strip_whitespace_surrounding_login_email_address_field(self):
self.client.post(self.expand_path('/login'), data={
'email_address': ' valid@email.com ',
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
})
self.data_api_client_mock.authenticate_user.assert_called_with('valid@email.com', '1234567890')
def test_should_not_strip_whitespace_surrounding_login_password_field(self):
self.client.post(self.expand_path('/login'), data={
'email_address': 'valid@email.com',
'password': ' 1234567890 ',
'csrf_token': FakeCsrf.valid_token,
})
self.data_api_client_mock.authenticate_user.assert_called_with(
'valid@email.com', ' 1234567890 ')
@mock.patch('app.main.views.login.data_api_client')
def test_ok_next_url_redirects_buyer_on_login(self, data_api_client):
with self.app.app_context():
data_api_client.authenticate_user.return_value = self.user(123, "email@email.com", None, None, 'Name')
data_api_client.get_user.return_value = self.user(123, "email@email.com", None, None, 'Name')
data = {
'email_address': 'valid@email.com',
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
}
res = self.client.post(self.expand_path('/login?next={}'.format(self.expand_path('/bar-foo'))), data=data)
assert res.status_code == 302
assert res.location == 'http://localhost' + self.expand_path('/bar-foo')
@mock.patch('app.main.views.login.data_api_client')
def test_bad_next_url_redirects_user(self, data_api_client):
with self.app.app_context():
data_api_client.authenticate_user.return_value = self.user(123, "email@email.com", None, None, 'Name')
data_api_client.get_user.return_value = self.user(123, "email@email.com", None, None, 'Name')
data = {
'email_address': 'valid@email.com',
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
}
res = self.client.post(self.expand_path('/login?next=http://badness.com'), data=data)
assert res.status_code == 302
assert res.location == 'http://localhost/2/buyer-dashboard'
def test_should_have_cookie_on_redirect(self):
with self.app.app_context():
self.app.config['SESSION_COOKIE_DOMAIN'] = '127.0.0.1'
self.app.config['SESSION_COOKIE_SECURE'] = True
res = self.client.post(self.expand_path('/login'), data={
'email_address': 'valid@email.com',
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
})
cookie_value = self.get_cookie_by_name(res, 'dm_session')
assert cookie_value['dm_session'] is not None
assert cookie_value["Domain"] == "127.0.0.1"
def test_should_redirect_to_login_on_logout(self):
res = self.client.get(self.expand_path('/logout'))
assert res.status_code == 302
assert res.location == 'http://localhost/2/login'
@mock.patch('app.main.views.login.data_api_client')
def test_should_return_a_403_for_invalid_login(self, data_api_client):
data_api_client.authenticate_user.return_value = None
data_api_client.get_user.return_value = None
res = self.client.post(self.expand_path('/login'), data={
'email_address': 'valid@email.com',
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
})
assert self.strip_all_whitespace("Make sure you've entered the right email address and password") \
in self.strip_all_whitespace(res.get_data(as_text=True))
assert res.status_code == 403
def test_should_be_validation_error_if_no_email_or_password(self):
res = self.client.post(self.expand_path('/login'), data={'csrf_token': FakeCsrf.valid_token})
data = res.get_data(as_text=True)
assert res.status_code == 400
assert has_validation_errors(data, 'email_address')
assert has_validation_errors(data, 'password')
def test_should_be_validation_error_if_invalid_email(self):
res = self.client.post(self.expand_path('/login'), data={
'email_address': 'invalid',
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
})
data = res.get_data(as_text=True)
assert res.status_code == 400
assert has_validation_errors(data, 'email_address')
assert not has_validation_errors(data, 'password')
def test_valid_email_formats(self):
cases = [
'good@example.com',
'good-email@example.com',
'good-email+plus@example.com',
'good@subdomain.example.com',
'good@hyphenated-subdomain.example.com',
]
for address in cases:
res = self.client.post(self.expand_path('/login'), data={
'email_address': address,
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
})
data = res.get_data(as_text=True)
assert res.status_code == 302, address
def test_invalid_email_formats(self):
cases = [
'',
'bad',
'bad@@example.com',
'bad @example.com',
'bad@.com',
'bad.example.com',
'@',
'@example.com',
'bad@',
'bad@example.com,bad2@example.com',
'bad@example.com bad2@example.com',
'bad@example.com,other.example.com',
]
for address in cases:
res = self.client.post(self.expand_path('/login'), data={
'email_address': address,
'password': '1234567890',
'csrf_token': FakeCsrf.valid_token,
})
data = res.get_data(as_text=True)
assert res.status_code == 400, address
assert has_validation_errors(data, 'email_address'), address
class TestLoginFormsNotAutofillable(BaseApplicationTest):
def _forms_and_inputs_not_autofillable(self, url, expected_title):
response = self.client.get(url)
assert response.status_code == 200
document = html.fromstring(response.get_data(as_text=True))
page_title = document.xpath('//h1/text()')[0].strip()
assert expected_title == page_title
forms = document.xpath('//main[@id="content"]//form')
for form in forms:
assert form.get('autocomplete') == "off"
non_hidden_inputs = form.xpath('//input[@type!="hidden"]')
for input in non_hidden_inputs:
if input.get('type') != 'submit':
assert input.get('autocomplete') == "off"
def test_login_form_and_inputs_not_autofillable(self):
self._forms_and_inputs_not_autofillable(
self.expand_path('/login'),
"Sign in to the Marketplace"
)
@pytest.mark.skip
def test_request_password_reset_form_and_inputs_not_autofillable(self):
self._forms_and_inputs_not_autofillable(
self.expand_path('/reset-password'),
"Reset password"
)
@pytest.mark.skip
@mock.patch('app.main.views.login.data_api_client')
def test_reset_password_form_and_inputs_not_autofillable(
self, data_api_client
):
data_api_client.get_user.return_value = self.user(
123, "email@email.com", 1234, 'email', 'name'
)
with self.app.app_context():
token = generate_token(
{
"user": 123,
"email": 'email@email.com',
},
self.app.config['SECRET_KEY'],
self.app.config['RESET_PASSWORD_SALT'])
url = self.expand_path('/reset-password/{}').format(token)
self._forms_and_inputs_not_autofillable(
url,
"Reset password",
)
class TestTermsUpdate(BaseApplicationTest):
payload = {
'csrf_token': FakeCsrf.valid_token,
'accept_terms': 'y',
}
def test_page_load(self):
with self.app.app_context():
self.login_as_buyer()
res = self.client.get(self.url_for('main.terms_updated'))
assert res.status_code == 200
assert 'terms' in res.get_data(as_text=True)
def test_login_required(self):
with self.app.app_context():
# Not logged in
res = self.client.get(self.url_for('main.terms_updated'))
assert res.status_code == 302
@mock.patch('app.main.views.login.terms_of_use')
@mock.patch('app.main.views.login.data_api_client')
def test_submit(self, data_api_client, terms_of_use):
with self.app.app_context():
self.login_as_buyer(user_id=42)
res = self.client.post(self.url_for('main.accept_updated_terms'), data=self.payload)
data_api_client.update_user.assert_called_once_with(42, fields=mock.ANY)
terms_of_use.set_session_flag.assert_called_once_with(False)
assert res.status_code == 302
@mock.patch('app.main.views.login.data_api_client')
def test_submit_requires_login(self, data_api_client):
with self.app.app_context():
# Not logged in
res = self.client.post(self.url_for('main.accept_updated_terms'), data=self.payload)
data_api_client.update_user.assert_not_called()
assert res.status_code == 302
assert res.location.startswith(self.url_for('main.render_login', _external=True))
@mock.patch('app.main.views.login.data_api_client')
def test_submit_without_accepting(self, data_api_client):
with self.app.app_context():
self.login_as_buyer()
data = dict(self.payload)
data.pop('accept_terms')
res = self.client.post(self.url_for('main.accept_updated_terms'), data=data)
data_api_client.update_user.assert_not_called()
assert res.status_code == 400
|
AusDTO/dto-digitalmarketplace-buyer-frontend
|
tests/app/views/test_login.py
|
Python
|
mit
| 14,240 | 0.001545 |
'''
AxelProxy XBMC Addon
Copyright (C) 2013 Eldorado
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
'''
import axelcommon
import axelproxy
# This is xbmc linked class. TODO: read the settings here and send it to proxy for port etc
#TODO: check if start at launch setting is configured!
#Address and IP for Proxy to listen on
HOST_NAME = '127.0.0.1'
#HOST_NAME = 'localhost'
PORT_NUMBER = 45550 ##move this somewhere which could be configured by UI
if __name__ == '__main__':
file_dest = axelcommon.profile_path #replace this line if you want to be specific about the download folder
print file_dest
axelproxy.ProxyManager().start_proxy(port=PORT_NUMBER, host_name=HOST_NAME,download_folder=file_dest), #more param to come
|
JamesLinEngineer/RKMC
|
addons/script.module.axel.downloader/lib/standalone_server.py
|
Python
|
gpl-2.0
| 1,470 | 0.010204 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
import frappe.defaults
from frappe import msgprint, _
from frappe.utils import cstr, flt, cint
from erpnext.stock.stock_ledger import update_entries_after
from erpnext.controllers.stock_controller import StockController
from erpnext.accounts.utils import get_company_default
from erpnext.stock.utils import get_stock_balance
class OpeningEntryAccountError(frappe.ValidationError): pass
class EmptyStockReconciliationItemsError(frappe.ValidationError): pass
class StockReconciliation(StockController):
def __init__(self, *args, **kwargs):
super(StockReconciliation, self).__init__(*args, **kwargs)
self.head_row = ["Item Code", "Warehouse", "Quantity", "Valuation Rate"]
def validate(self):
if not self.expense_account:
self.expense_account = frappe.get_cached_value('Company', self.company, "stock_adjustment_account")
if not self.cost_center:
self.cost_center = frappe.get_cached_value('Company', self.company, "cost_center")
self.validate_posting_time()
self.remove_items_with_no_change()
self.validate_data()
self.validate_expense_account()
self.set_total_qty_and_amount()
def on_submit(self):
self.update_stock_ledger()
self.make_gl_entries()
def on_cancel(self):
self.delete_and_repost_sle()
self.make_gl_entries_on_cancel()
def remove_items_with_no_change(self):
"""Remove items if qty or rate is not changed"""
self.difference_amount = 0.0
def _changed(item):
qty, rate = get_stock_balance(item.item_code, item.warehouse,
self.posting_date, self.posting_time, with_valuation_rate=True)
if (item.qty==None or item.qty==qty) and (item.valuation_rate==None or item.valuation_rate==rate):
return False
else:
# set default as current rates
if item.qty==None:
item.qty = qty
if item.valuation_rate==None:
item.valuation_rate = rate
item.current_qty = qty
item.current_valuation_rate = rate
self.difference_amount += (flt(item.qty, item.precision("qty")) * \
flt(item.valuation_rate or rate, item.precision("valuation_rate")) \
- flt(qty, item.precision("qty")) * flt(rate, item.precision("valuation_rate")))
return True
items = list(filter(lambda d: _changed(d), self.items))
if not items:
frappe.throw(_("None of the items have any change in quantity or value."),
EmptyStockReconciliationItemsError)
elif len(items) != len(self.items):
self.items = items
for i, item in enumerate(self.items):
item.idx = i + 1
frappe.msgprint(_("Removed items with no change in quantity or value."))
def validate_data(self):
def _get_msg(row_num, msg):
return _("Row # {0}: ").format(row_num+1) + msg
self.validation_messages = []
item_warehouse_combinations = []
default_currency = frappe.db.get_default("currency")
for row_num, row in enumerate(self.items):
# find duplicates
if [row.item_code, row.warehouse] in item_warehouse_combinations:
self.validation_messages.append(_get_msg(row_num, _("Duplicate entry")))
else:
item_warehouse_combinations.append([row.item_code, row.warehouse])
self.validate_item(row.item_code, row_num+1)
# validate warehouse
if not frappe.db.get_value("Warehouse", row.warehouse):
self.validation_messages.append(_get_msg(row_num, _("Warehouse not found in the system")))
# if both not specified
if row.qty in ["", None] and row.valuation_rate in ["", None]:
self.validation_messages.append(_get_msg(row_num,
_("Please specify either Quantity or Valuation Rate or both")))
# do not allow negative quantity
if flt(row.qty) < 0:
self.validation_messages.append(_get_msg(row_num,
_("Negative Quantity is not allowed")))
# do not allow negative valuation
if flt(row.valuation_rate) < 0:
self.validation_messages.append(_get_msg(row_num,
_("Negative Valuation Rate is not allowed")))
if row.qty and row.valuation_rate in ["", None]:
row.valuation_rate = get_stock_balance(row.item_code, row.warehouse,
self.posting_date, self.posting_time, with_valuation_rate=True)[1]
if not row.valuation_rate:
# try if there is a buying price list in default currency
buying_rate = frappe.db.get_value("Item Price", {"item_code": row.item_code,
"buying": 1, "currency": default_currency}, "price_list_rate")
if buying_rate:
row.valuation_rate = buying_rate
else:
# get valuation rate from Item
row.valuation_rate = frappe.get_value('Item', row.item_code, 'valuation_rate')
# throw all validation messages
if self.validation_messages:
for msg in self.validation_messages:
msgprint(msg)
raise frappe.ValidationError(self.validation_messages)
def validate_item(self, item_code, row_num):
from erpnext.stock.doctype.item.item import validate_end_of_life, \
validate_is_stock_item, validate_cancelled_item
# using try except to catch all validation msgs and display together
try:
item = frappe.get_doc("Item", item_code)
# end of life and stock item
validate_end_of_life(item_code, item.end_of_life, item.disabled, verbose=0)
validate_is_stock_item(item_code, item.is_stock_item, verbose=0)
# item should not be serialized
if item.has_serial_no == 1:
raise frappe.ValidationError(_("Serialized Item {0} cannot be updated using Stock Reconciliation, please use Stock Entry").format(item_code))
# item managed batch-wise not allowed
if item.has_batch_no == 1:
raise frappe.ValidationError(_("Batched Item {0} cannot be updated using Stock Reconciliation, instead use Stock Entry").format(item_code))
# docstatus should be < 2
validate_cancelled_item(item_code, item.docstatus, verbose=0)
except Exception as e:
self.validation_messages.append(_("Row # ") + ("%d: " % (row_num)) + cstr(e))
def update_stock_ledger(self):
""" find difference between current and expected entries
and create stock ledger entries based on the difference"""
from erpnext.stock.stock_ledger import get_previous_sle
for row in self.items:
previous_sle = get_previous_sle({
"item_code": row.item_code,
"warehouse": row.warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time
})
if previous_sle:
if row.qty in ("", None):
row.qty = previous_sle.get("qty_after_transaction", 0)
if row.valuation_rate in ("", None):
row.valuation_rate = previous_sle.get("valuation_rate", 0)
if row.qty and not row.valuation_rate:
frappe.throw(_("Valuation Rate required for Item in row {0}").format(row.idx))
if ((previous_sle and row.qty == previous_sle.get("qty_after_transaction")
and row.valuation_rate == previous_sle.get("valuation_rate"))
or (not previous_sle and not row.qty)):
continue
self.insert_entries(row)
def insert_entries(self, row):
"""Insert Stock Ledger Entries"""
args = frappe._dict({
"doctype": "Stock Ledger Entry",
"item_code": row.item_code,
"warehouse": row.warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time,
"voucher_type": self.doctype,
"voucher_no": self.name,
"company": self.company,
"stock_uom": frappe.db.get_value("Item", row.item_code, "stock_uom"),
"is_cancelled": "No",
"qty_after_transaction": flt(row.qty, row.precision("qty")),
"valuation_rate": flt(row.valuation_rate, row.precision("valuation_rate"))
})
self.make_sl_entries([args])
def delete_and_repost_sle(self):
""" Delete Stock Ledger Entries related to this voucher
and repost future Stock Ledger Entries"""
existing_entries = frappe.db.sql("""select distinct item_code, warehouse
from `tabStock Ledger Entry` where voucher_type=%s and voucher_no=%s""",
(self.doctype, self.name), as_dict=1)
# delete entries
frappe.db.sql("""delete from `tabStock Ledger Entry`
where voucher_type=%s and voucher_no=%s""", (self.doctype, self.name))
# repost future entries for selected item_code, warehouse
for entries in existing_entries:
update_entries_after({
"item_code": entries.item_code,
"warehouse": entries.warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time
})
def get_gl_entries(self, warehouse_account=None):
if not self.cost_center:
msgprint(_("Please enter Cost Center"), raise_exception=1)
return super(StockReconciliation, self).get_gl_entries(warehouse_account,
self.expense_account, self.cost_center)
def validate_expense_account(self):
if not cint(erpnext.is_perpetual_inventory_enabled(self.company)):
return
if not self.expense_account:
msgprint(_("Please enter Expense Account"), raise_exception=1)
elif not frappe.db.sql("""select name from `tabStock Ledger Entry` limit 1"""):
if frappe.db.get_value("Account", self.expense_account, "report_type") == "Profit and Loss":
frappe.throw(_("Difference Account must be a Asset/Liability type account, since this Stock Reconciliation is an Opening Entry"), OpeningEntryAccountError)
def set_total_qty_and_amount(self):
for d in self.get("items"):
d.amount = flt(d.qty, d.precision("qty")) * flt(d.valuation_rate, d.precision("valuation_rate"))
d.current_amount = (flt(d.current_qty,
d.precision("current_qty")) * flt(d.current_valuation_rate, d.precision("current_valuation_rate")))
d.quantity_difference = flt(d.qty) - flt(d.current_qty)
d.amount_difference = flt(d.amount) - flt(d.current_amount)
def get_items_for(self, warehouse):
self.items = []
for item in get_items(warehouse, self.posting_date, self.posting_time, self.company):
self.append("items", item)
def submit(self):
if len(self.items) > 100:
self.queue_action('submit')
else:
self._submit()
def cancel(self):
if len(self.items) > 100:
self.queue_action('cancel')
else:
self._cancel()
@frappe.whitelist()
def get_items(warehouse, posting_date, posting_time, company):
items = frappe.db.sql('''select i.name, i.item_name from `tabItem` i, `tabBin` bin where i.name=bin.item_code
and i.disabled=0 and bin.warehouse=%s''', (warehouse), as_dict=True)
items += frappe.db.sql('''select i.name, i.item_name from `tabItem` i, `tabItem Default` id where i.name = id.parent
and i.is_stock_item=1 and i.has_serial_no=0 and i.has_batch_no=0 and i.has_variants=0 and i.disabled=0
and id.default_warehouse=%s and id.company=%s group by i.name''', (warehouse, company), as_dict=True)
res = []
for item in items:
qty, rate = get_stock_balance(item.name, warehouse, posting_date, posting_time,
with_valuation_rate=True)
res.append({
"item_code": item.name,
"warehouse": warehouse,
"qty": qty,
"item_name": item.item_name,
"valuation_rate": rate,
"current_qty": qty,
"current_valuation_rate": rate
})
return res
@frappe.whitelist()
def get_stock_balance_for(item_code, warehouse, posting_date, posting_time):
frappe.has_permission("Stock Reconciliation", "write", throw = True)
qty, rate = get_stock_balance(item_code, warehouse,
posting_date, posting_time, with_valuation_rate=True)
return {
'qty': qty,
'rate': rate
}
@frappe.whitelist()
def get_difference_account(purpose, company):
if purpose == 'Stock Reconciliation':
account = get_company_default(company, "stock_adjustment_account")
else:
account = frappe.db.get_value('Account', {'is_group': 0,
'company': company, 'account_type': 'Temporary'}, 'name')
return account
|
patilsangram/erpnext
|
erpnext/stock/doctype/stock_reconciliation/stock_reconciliation.py
|
Python
|
gpl-3.0
| 11,573 | 0.02575 |
"""Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.tools.api.generator.api.keras.preprocessing import image
from tensorflow.tools.api.generator.api.keras.preprocessing import sequence
from tensorflow.tools.api.generator.api.keras.preprocessing import text
|
ryfeus/lambda-packs
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/keras/preprocessing/__init__.py
|
Python
|
mit
| 371 | 0.002695 |
'''
Swap counting
Status: Accepted
'''
###############################################################################
def inversions(constants, variables):
"""Number of swaps"""
if variables:
pow2 = pow(2, variables - 1, 1_000_000_007)
return pow2 * (constants * 2 + variables)
return constants
###############################################################################
def main():
"""Read input and print output"""
zeroes, qmarks, swaps = 0, 0, 0
for glyph in reversed(input()):
if glyph == '0':
zeroes += 1
else:
if glyph == '1':
swaps += inversions(zeroes, qmarks)
if glyph == '?':
swaps += inversions(zeroes, qmarks) + swaps
qmarks += 1
swaps %= 1_000_000_007
print(swaps)
###############################################################################
if __name__ == '__main__':
main()
|
ivanlyon/exercises
|
kattis/k_sequences.py
|
Python
|
mit
| 969 | 0.003096 |
import copy
import json
import os
import warnings
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple
import gevent
import structlog
from eth_utils import (
decode_hex,
encode_hex,
is_checksum_address,
remove_0x_prefix,
to_canonical_address,
to_checksum_address,
)
from gevent.lock import Semaphore
from hexbytes import HexBytes
from requests.exceptions import ConnectTimeout
from web3 import Web3
from web3.contract import ContractFunction
from web3.eth import Eth
from web3.gas_strategies.rpc import rpc_gas_price_strategy
from web3.middleware import geth_poa_middleware
from web3.utils.contracts import prepare_transaction
from web3.utils.empty import empty
from web3.utils.toolz import assoc
from raiden import constants
from raiden.exceptions import (
AddressWithoutCode,
EthNodeCommunicationError,
EthNodeInterfaceError,
InsufficientFunds,
)
from raiden.network.rpc.middleware import (
block_hash_cache_middleware,
connection_test_middleware,
http_retry_with_backoff_middleware,
)
from raiden.network.rpc.smartcontract_proxy import ContractProxy
from raiden.utils import pex, privatekey_to_address
from raiden.utils.ethereum_clients import is_supported_client
from raiden.utils.filters import StatelessFilter
from raiden.utils.solc import (
solidity_library_symbol,
solidity_resolve_symbols,
solidity_unresolved_symbols,
)
from raiden.utils.typing import (
ABI,
Address,
AddressHex,
BlockHash,
BlockSpecification,
Nonce,
TransactionHash,
)
log = structlog.get_logger(__name__) # pylint: disable=invalid-name
def logs_blocks_sanity_check(from_block: BlockSpecification, to_block: BlockSpecification) -> None:
"""Checks that the from/to blocks passed onto log calls contain only appropriate types"""
is_valid_from = isinstance(from_block, int) or isinstance(from_block, str)
assert is_valid_from, "event log from block can be integer or latest,pending, earliest"
is_valid_to = isinstance(to_block, int) or isinstance(to_block, str)
assert is_valid_to, "event log to block can be integer or latest,pending, earliest"
def geth_assert_rpc_interfaces(web3: Web3):
try:
web3.version.node
except ValueError:
raise EthNodeInterfaceError(
"The underlying geth node does not have the web3 rpc interface "
"enabled. Please run it with --rpcapi eth,net,web3,txpool"
)
try:
web3.eth.blockNumber
except ValueError:
raise EthNodeInterfaceError(
"The underlying geth node does not have the eth rpc interface "
"enabled. Please run it with --rpcapi eth,net,web3,txpool"
)
try:
web3.net.version
except ValueError:
raise EthNodeInterfaceError(
"The underlying geth node does not have the net rpc interface "
"enabled. Please run it with --rpcapi eth,net,web3,txpool"
)
try:
web3.txpool.inspect
except ValueError:
raise EthNodeInterfaceError(
"The underlying geth node does not have the txpool rpc interface "
"enabled. Please run it with --rpcapi eth,net,web3,txpool"
)
def parity_assert_rpc_interfaces(web3: Web3):
try:
web3.version.node
except ValueError:
raise EthNodeInterfaceError(
"The underlying parity node does not have the web3 rpc interface "
"enabled. Please run it with --jsonrpc-apis=eth,net,web3,parity"
)
try:
web3.eth.blockNumber
except ValueError:
raise EthNodeInterfaceError(
"The underlying parity node does not have the eth rpc interface "
"enabled. Please run it with --jsonrpc-apis=eth,net,web3,parity"
)
try:
web3.net.version
except ValueError:
raise EthNodeInterfaceError(
"The underlying parity node does not have the net rpc interface "
"enabled. Please run it with --jsonrpc-apis=eth,net,web3,parity"
)
try:
web3.manager.request_blocking(
"parity_nextNonce", ["0x0000000000000000000000000000000000000000"]
)
except ValueError:
raise EthNodeInterfaceError(
"The underlying parity node does not have the parity rpc interface "
"enabled. Please run it with --jsonrpc-apis=eth,net,web3,parity"
)
def parity_discover_next_available_nonce(web3: Web3, address: AddressHex) -> Nonce:
"""Returns the next available nonce for `address`."""
next_nonce_encoded = web3.manager.request_blocking("parity_nextNonce", [address])
return Nonce(int(next_nonce_encoded, 16))
def geth_discover_next_available_nonce(web3: Web3, address: AddressHex) -> Nonce:
"""Returns the next available nonce for `address`."""
# The nonces of the mempool transactions are considered used, and it's
# assumed these transactions are different from the ones currently pending
# in the client. This is a simplification, otherwise it would be necessary
# to filter the local pending transactions based on the mempool.
pool = web3.txpool.inspect or {}
# pool is roughly:
#
# {'queued': {'account1': {nonce1: ... nonce2: ...}, 'account2': ...}, 'pending': ...}
#
# Pending refers to the current block and if it contains transactions from
# the user, these will be the younger transactions. Because this needs the
# largest nonce, queued is checked first.
address = to_checksum_address(address)
queued = pool.get("queued", {}).get(address)
if queued:
return Nonce(max(int(k) for k in queued.keys()) + 1)
pending = pool.get("pending", {}).get(address)
if pending:
return Nonce(max(int(k) for k in pending.keys()) + 1)
# The first valid nonce is 0, therefore the count is already the next
# available nonce
return web3.eth.getTransactionCount(address, "latest")
def check_address_has_code(client: "JSONRPCClient", address: Address, contract_name: str = ""):
""" Checks that the given address contains code. """
result = client.web3.eth.getCode(to_checksum_address(address), "latest")
if not result:
if contract_name:
formated_contract_name = "[{}]: ".format(contract_name)
else:
formated_contract_name = ""
raise AddressWithoutCode(
"{}Address {} does not contain code".format(
formated_contract_name, to_checksum_address(address)
)
)
def deploy_dependencies_symbols(all_contract):
dependencies = {}
symbols_to_contract = dict()
for contract_name in all_contract:
symbol = solidity_library_symbol(contract_name)
if symbol in symbols_to_contract:
raise ValueError("Conflicting library names.")
symbols_to_contract[symbol] = contract_name
for contract_name, contract in all_contract.items():
unresolved_symbols = solidity_unresolved_symbols(contract["bin"])
dependencies[contract_name] = [
symbols_to_contract[unresolved] for unresolved in unresolved_symbols
]
return dependencies
def dependencies_order_of_build(target_contract, dependencies_map):
""" Return an ordered list of contracts that is sufficient to successfully
deploy the target contract.
Note:
This function assumes that the `dependencies_map` is an acyclic graph.
"""
if not dependencies_map:
return [target_contract]
if target_contract not in dependencies_map:
raise ValueError("no dependencies defined for {}".format(target_contract))
order = [target_contract]
todo = list(dependencies_map[target_contract])
while todo:
target_contract = todo.pop(0)
target_pos = len(order)
for dependency in dependencies_map[target_contract]:
# we need to add the current contract before all its depedencies
if dependency in order:
target_pos = order.index(dependency)
else:
todo.append(dependency)
order.insert(target_pos, target_contract)
order.reverse()
return order
class ParityCallType(Enum):
ESTIMATE_GAS = 1
CALL = 2
def check_value_error_for_parity(value_error: ValueError, call_type: ParityCallType) -> bool:
"""
For parity failing calls and functions do not return None if the transaction
will fail but instead throw a ValueError exception.
This function checks the thrown exception to see if it's the correct one and
if yes returns True, if not returns False
"""
try:
error_data = json.loads(str(value_error).replace("'", '"'))
except json.JSONDecodeError:
return False
if call_type == ParityCallType.ESTIMATE_GAS:
code_checks_out = error_data["code"] == -32016
message_checks_out = "The execution failed due to an exception" in error_data["message"]
elif call_type == ParityCallType.CALL:
code_checks_out = error_data["code"] == -32015
message_checks_out = "VM execution error" in error_data["message"]
else:
raise ValueError("Called check_value_error_for_parity() with illegal call type")
if code_checks_out and message_checks_out:
return True
return False
def patched_web3_eth_estimate_gas(self, transaction, block_identifier=None):
""" Temporary workaround until next web3.py release (5.X.X)
Current master of web3.py has this implementation already:
https://github.com/ethereum/web3.py/blob/2a67ea9f0ab40bb80af2b803dce742d6cad5943e/web3/eth.py#L311
"""
if "from" not in transaction and is_checksum_address(self.defaultAccount):
transaction = assoc(transaction, "from", self.defaultAccount)
if block_identifier is None:
params = [transaction]
else:
params = [transaction, block_identifier]
try:
result = self.web3.manager.request_blocking("eth_estimateGas", params)
except ValueError as e:
if check_value_error_for_parity(e, ParityCallType.ESTIMATE_GAS):
result = None
else:
# else the error is not denoting estimate gas failure and is something else
raise e
return result
def patched_web3_eth_call(self, transaction, block_identifier=None):
if "from" not in transaction and is_checksum_address(self.defaultAccount):
transaction = assoc(transaction, "from", self.defaultAccount)
if block_identifier is None:
block_identifier = self.defaultBlock
try:
result = self.web3.manager.request_blocking("eth_call", [transaction, block_identifier])
except ValueError as e:
if check_value_error_for_parity(e, ParityCallType.CALL):
result = ""
else:
# else the error is not denoting a revert, something is wrong
raise e
return HexBytes(result)
def estimate_gas_for_function(
address,
web3,
fn_identifier=None,
transaction=None,
contract_abi=None,
fn_abi=None,
block_identifier=None,
*args,
**kwargs,
):
"""Temporary workaround until next web3.py release (5.X.X)"""
estimate_transaction = prepare_transaction(
address,
web3,
fn_identifier=fn_identifier,
contract_abi=contract_abi,
fn_abi=fn_abi,
transaction=transaction,
fn_args=args,
fn_kwargs=kwargs,
)
try:
gas_estimate = web3.eth.estimateGas(estimate_transaction, block_identifier)
except ValueError as e:
if check_value_error_for_parity(e, ParityCallType.ESTIMATE_GAS):
gas_estimate = None
else:
# else the error is not denoting estimate gas failure and is something else
raise e
return gas_estimate
def patched_contractfunction_estimateGas(self, transaction=None, block_identifier=None):
"""Temporary workaround until next web3.py release (5.X.X)"""
if transaction is None:
estimate_gas_transaction = {}
else:
estimate_gas_transaction = dict(**transaction)
if "data" in estimate_gas_transaction:
raise ValueError("Cannot set data in estimateGas transaction")
if "to" in estimate_gas_transaction:
raise ValueError("Cannot set to in estimateGas transaction")
if self.address:
estimate_gas_transaction.setdefault("to", self.address)
if self.web3.eth.defaultAccount is not empty:
estimate_gas_transaction.setdefault("from", self.web3.eth.defaultAccount)
if "to" not in estimate_gas_transaction:
if isinstance(self, type):
raise ValueError(
"When using `Contract.estimateGas` from a contract factory "
"you must provide a `to` address with the transaction"
)
else:
raise ValueError("Please ensure that this contract instance has an address.")
return estimate_gas_for_function(
self.address,
self.web3,
self.function_identifier,
estimate_gas_transaction,
self.contract_abi,
self.abi,
block_identifier,
*self.args,
**self.kwargs,
)
def monkey_patch_web3(web3, gas_price_strategy):
try:
# install caching middleware
web3.middleware_stack.add(block_hash_cache_middleware)
# set gas price strategy
web3.eth.setGasPriceStrategy(gas_price_strategy)
# In the version of web3.py we are using the http_retry_request_middleware
# is not on by default. But in recent ones it is. This solves some random
# crashes that happen on the mainnet as reported in issue
# https://github.com/raiden-network/raiden/issues/3558
web3.middleware_stack.add(http_retry_with_backoff_middleware)
# we use a PoA chain for smoketest, use this middleware to fix this
web3.middleware_stack.inject(geth_poa_middleware, layer=0)
except ValueError:
# `middleware_stack.inject()` raises a value error if the same middleware is
# injected twice. This happens with `eth-tester` setup where a single session
# scoped web3 instance is used for all clients
pass
# create the connection test middleware (but only for non-tester chain)
if not hasattr(web3, "testing"):
web3.middleware_stack.inject(connection_test_middleware, layer=0)
# Temporary until next web3.py release (5.X.X)
ContractFunction.estimateGas = patched_contractfunction_estimateGas
Eth.estimateGas = patched_web3_eth_estimate_gas
# Patch call() to achieve same behaviour between parity and geth
# At the moment geth returns '' for reverted/thrown transactions.
# Parity raises a value error. Raiden assumes the return of an empty
# string so we have to make parity behave like geth
Eth.call = patched_web3_eth_call
class JSONRPCClient:
""" Ethereum JSON RPC client.
Args:
host: Ethereum node host address.
port: Ethereum node port number.
privkey: Local user private key, used to sign transactions.
"""
def __init__(
self,
web3: Web3,
privkey: bytes,
gas_price_strategy: Callable = rpc_gas_price_strategy,
gas_estimate_correction: Callable = lambda gas: gas,
block_num_confirmations: int = 0,
uses_infura=False,
):
if privkey is None or len(privkey) != 32:
raise ValueError("Invalid private key")
if block_num_confirmations < 0:
raise ValueError("Number of confirmations has to be positive")
monkey_patch_web3(web3, gas_price_strategy)
try:
version = web3.version.node
except ConnectTimeout:
raise EthNodeCommunicationError("couldnt reach the ethereum node")
_, eth_node = is_supported_client(version)
address = privatekey_to_address(privkey)
address_checksumed = to_checksum_address(address)
if uses_infura:
warnings.warn(
"Infura does not provide an API to "
"recover the latest used nonce. This may cause the Raiden node "
"to error on restarts.\n"
"The error will manifest while there is a pending transaction "
"from a previous execution in the Ethereum's client pool. When "
"Raiden restarts the same transaction with the same nonce will "
"be retried and *rejected*, because the nonce is already used."
)
# The first valid nonce is 0, therefore the count is already the next
# available nonce
available_nonce = web3.eth.getTransactionCount(address_checksumed, "pending")
elif eth_node is constants.EthClient.PARITY:
parity_assert_rpc_interfaces(web3)
available_nonce = parity_discover_next_available_nonce(web3, address_checksumed)
elif eth_node is constants.EthClient.GETH:
geth_assert_rpc_interfaces(web3)
available_nonce = geth_discover_next_available_nonce(web3, address_checksumed)
else:
raise EthNodeInterfaceError(f"Unsupported Ethereum client {version}")
self.eth_node = eth_node
self.privkey = privkey
self.address = address
self.web3 = web3
self.default_block_num_confirmations = block_num_confirmations
self._available_nonce = available_nonce
self._nonce_lock = Semaphore()
self._gas_estimate_correction = gas_estimate_correction
log.debug(
"JSONRPCClient created",
node=pex(self.address),
available_nonce=available_nonce,
client=version,
)
def __repr__(self):
return f"<JSONRPCClient node:{pex(self.address)} nonce:{self._available_nonce}>"
def block_number(self):
""" Return the most recent block. """
return self.web3.eth.blockNumber
def get_block(self, block_identifier: BlockSpecification) -> Dict:
"""Given a block number, query the chain to get its corresponding block hash"""
return self.web3.eth.getBlock(block_identifier)
def get_confirmed_blockhash(self):
""" Gets the block CONFIRMATION_BLOCKS in the past and returns its block hash """
confirmed_block_number = self.web3.eth.blockNumber - self.default_block_num_confirmations
if confirmed_block_number < 0:
confirmed_block_number = 0
return self.blockhash_from_blocknumber(confirmed_block_number)
def blockhash_from_blocknumber(self, block_number: BlockSpecification) -> BlockHash:
"""Given a block number, query the chain to get its corresponding block hash"""
block = self.get_block(block_number)
return BlockHash(bytes(block["hash"]))
def can_query_state_for_block(self, block_identifier: BlockSpecification) -> bool:
"""
Returns if the provided block identifier is safe enough to query chain
state for. If it's close to the state pruning blocks then state should
not be queried.
More info: https://github.com/raiden-network/raiden/issues/3566.
"""
latest_block_number = self.block_number()
preconditions_block = self.web3.eth.getBlock(block_identifier)
preconditions_block_number = int(preconditions_block["number"])
difference = latest_block_number - preconditions_block_number
return difference < constants.NO_STATE_QUERY_AFTER_BLOCKS
def balance(self, account: Address):
""" Return the balance of the account of the given address. """
return self.web3.eth.getBalance(to_checksum_address(account), "pending")
def parity_get_pending_transaction_hash_by_nonce(
self, address: AddressHex, nonce: Nonce
) -> Optional[TransactionHash]:
"""Queries the local parity transaction pool and searches for a transaction.
Checks the local tx pool for a transaction from a particular address and for
a given nonce. If it exists it returns the transaction hash.
"""
assert self.eth_node is constants.EthClient.PARITY
# https://wiki.parity.io/JSONRPC-parity-module.html?q=traceTransaction#parity_alltransactions
transactions = self.web3.manager.request_blocking("parity_allTransactions", [])
log.debug("RETURNED TRANSACTIONS", transactions=transactions)
for tx in transactions:
address_match = to_checksum_address(tx["from"]) == address
if address_match and int(tx["nonce"], 16) == nonce:
return tx["hash"]
return None
def gas_price(self) -> int:
try:
# generateGasPrice takes the transaction to be send as an optional argument
# but both strategies that we are using (time-based and rpc-based) don't make
# use of this argument. It is therefore safe to not provide it at the moment.
# This needs to be reevaluated if we use different gas price strategies
price = int(self.web3.eth.generateGasPrice())
except AttributeError: # workaround for Infura gas strategy key error
# As per https://github.com/raiden-network/raiden/issues/3201
# we can sporadically get an AtttributeError here. If that happens
# use latest gas price
price = int(self.web3.eth.gasPrice)
except IndexError: # work around for a web3.py exception when
# the blockchain is somewhat empty.
# https://github.com/ethereum/web3.py/issues/1149
price = int(self.web3.eth.gasPrice)
return price
def new_contract_proxy(self, contract_interface, contract_address: Address):
""" Return a proxy for interacting with a smart contract.
Args:
contract_interface: The contract interface as defined by the json.
address: The contract's address.
"""
return ContractProxy(
self, contract=self.new_contract(contract_interface, contract_address)
)
def new_contract(self, contract_interface: Dict, contract_address: Address):
return self.web3.eth.contract(
abi=contract_interface, address=to_checksum_address(contract_address)
)
def get_transaction_receipt(self, tx_hash: bytes):
return self.web3.eth.getTransactionReceipt(encode_hex(tx_hash))
def deploy_solidity_contract(
self, # pylint: disable=too-many-locals
contract_name: str,
all_contracts: Dict[str, ABI],
libraries: Dict[str, str] = None,
constructor_parameters: Tuple[Any] = None,
contract_path: str = None,
):
"""
Deploy a solidity contract.
Args:
contract_name: The name of the contract to compile.
all_contracts: The json dictionary containing the result of compiling a file.
libraries: A list of libraries to use in deployment.
constructor_parameters: A tuple of arguments to pass to the constructor.
contract_path: If we are dealing with solc >= v0.4.9 then the path
to the contract is a required argument to extract
the contract data from the `all_contracts` dict.
"""
if libraries:
libraries = dict(libraries)
else:
libraries = dict()
ctor_parameters = constructor_parameters or ()
all_contracts = copy.deepcopy(all_contracts)
if contract_name in all_contracts:
contract_key = contract_name
elif contract_path is not None:
contract_key = os.path.basename(contract_path) + ":" + contract_name
if contract_key not in all_contracts:
raise ValueError("Unknown contract {}".format(contract_name))
else:
raise ValueError(
"Unknown contract {} and no contract_path given".format(contract_name)
)
contract = all_contracts[contract_key]
contract_interface = contract["abi"]
symbols = solidity_unresolved_symbols(contract["bin"])
if symbols:
available_symbols = list(map(solidity_library_symbol, all_contracts.keys()))
unknown_symbols = set(symbols) - set(available_symbols)
if unknown_symbols:
msg = "Cannot deploy contract, known symbols {}, unresolved symbols {}.".format(
available_symbols, unknown_symbols
)
raise Exception(msg)
dependencies = deploy_dependencies_symbols(all_contracts)
deployment_order = dependencies_order_of_build(contract_key, dependencies)
deployment_order.pop() # remove `contract_name` from the list
log.debug(
"Deploying dependencies: {}".format(str(deployment_order)), node=pex(self.address)
)
for deploy_contract in deployment_order:
dependency_contract = all_contracts[deploy_contract]
hex_bytecode = solidity_resolve_symbols(dependency_contract["bin"], libraries)
bytecode = decode_hex(hex_bytecode)
dependency_contract["bin"] = bytecode
gas_limit = self.web3.eth.getBlock("latest")["gasLimit"] * 8 // 10
transaction_hash = self.send_transaction(
to=Address(b""), startgas=gas_limit, data=bytecode
)
self.poll(transaction_hash)
receipt = self.get_transaction_receipt(transaction_hash)
contract_address = receipt["contractAddress"]
# remove the hexadecimal prefix 0x from the address
contract_address = remove_0x_prefix(contract_address)
libraries[deploy_contract] = contract_address
deployed_code = self.web3.eth.getCode(to_checksum_address(contract_address))
if not deployed_code:
raise RuntimeError("Contract address has no code, check gas usage.")
hex_bytecode = solidity_resolve_symbols(contract["bin"], libraries)
bytecode = decode_hex(hex_bytecode)
contract["bin"] = bytecode
if isinstance(contract["bin"], str):
contract["bin"] = decode_hex(contract["bin"])
contract_object = self.web3.eth.contract(abi=contract["abi"], bytecode=contract["bin"])
contract_transaction = contract_object.constructor(*ctor_parameters).buildTransaction()
transaction_hash = self.send_transaction(
to=Address(b""),
data=contract_transaction["data"],
startgas=self._gas_estimate_correction(contract_transaction["gas"]),
)
self.poll(transaction_hash)
receipt = self.get_transaction_receipt(transaction_hash)
contract_address = receipt["contractAddress"]
deployed_code = self.web3.eth.getCode(to_checksum_address(contract_address))
if not deployed_code:
raise RuntimeError(
"Deployment of {} failed. Contract address has no code, check gas usage.".format(
contract_name
)
)
return self.new_contract_proxy(contract_interface, contract_address), receipt
def send_transaction(
self, to: Address, startgas: int, value: int = 0, data: bytes = b""
) -> bytes:
""" Helper to send signed messages.
This method will use the `privkey` provided in the constructor to
locally sign the transaction. This requires an extended server
implementation that accepts the variables v, r, and s.
"""
if to == to_canonical_address(constants.NULL_ADDRESS):
warnings.warn("For contract creation the empty string must be used.")
with self._nonce_lock:
nonce = self._available_nonce
gas_price = self.gas_price()
transaction = {
"data": data,
"gas": startgas,
"nonce": nonce,
"value": value,
"gasPrice": gas_price,
}
node_gas_price = self.web3.eth.gasPrice
log.debug(
"Calculated gas price for transaction",
node=pex(self.address),
calculated_gas_price=gas_price,
node_gas_price=node_gas_price,
)
# add the to address if not deploying a contract
if to != b"":
transaction["to"] = to_checksum_address(to)
signed_txn = self.web3.eth.account.signTransaction(transaction, self.privkey)
log_details = {
"node": pex(self.address),
"nonce": transaction["nonce"],
"gasLimit": transaction["gas"],
"gasPrice": transaction["gasPrice"],
}
log.debug("send_raw_transaction called", **log_details)
tx_hash = self.web3.eth.sendRawTransaction(signed_txn.rawTransaction)
self._available_nonce += 1
log.debug("send_raw_transaction returned", tx_hash=encode_hex(tx_hash), **log_details)
return tx_hash
def poll(self, transaction_hash: bytes):
""" Wait until the `transaction_hash` is applied or rejected.
Args:
transaction_hash: Transaction hash that we are waiting for.
"""
if len(transaction_hash) != 32:
raise ValueError("transaction_hash must be a 32 byte hash")
transaction_hash = encode_hex(transaction_hash)
# used to check if the transaction was removed, this could happen
# if gas price is too low:
#
# > Transaction (acbca3d6) below gas price (tx=1 Wei ask=18
# > Shannon). All sequential txs from this address(7d0eae79)
# > will be ignored
#
last_result = None
while True:
# Could return None for a short period of time, until the
# transaction is added to the pool
transaction = self.web3.eth.getTransaction(transaction_hash)
# if the transaction was added to the pool and then removed
if transaction is None and last_result is not None:
raise Exception("invalid transaction, check gas price")
# the transaction was added to the pool and mined
if transaction and transaction["blockNumber"] is not None:
last_result = transaction
# this will wait for both APPLIED and REVERTED transactions
transaction_block = transaction["blockNumber"]
confirmation_block = transaction_block + self.default_block_num_confirmations
block_number = self.block_number()
if block_number >= confirmation_block:
return transaction
gevent.sleep(1.0)
def new_filter(
self,
contract_address: Address,
topics: List[str] = None,
from_block: BlockSpecification = 0,
to_block: BlockSpecification = "latest",
) -> StatelessFilter:
""" Create a filter in the ethereum node. """
logs_blocks_sanity_check(from_block, to_block)
return StatelessFilter(
self.web3,
{
"fromBlock": from_block,
"toBlock": to_block,
"address": to_checksum_address(contract_address),
"topics": topics,
},
)
def get_filter_events(
self,
contract_address: Address,
topics: List[str] = None,
from_block: BlockSpecification = 0,
to_block: BlockSpecification = "latest",
) -> List[Dict]:
""" Get events for the given query. """
logs_blocks_sanity_check(from_block, to_block)
return self.web3.eth.getLogs(
{
"fromBlock": from_block,
"toBlock": to_block,
"address": to_checksum_address(contract_address),
"topics": topics,
}
)
def check_for_insufficient_eth(
self,
transaction_name: str,
transaction_executed: bool,
required_gas: int,
block_identifier: BlockSpecification,
):
""" After estimate gas failure checks if our address has enough balance.
If the account did not have enough ETH balance to execute the,
transaction then it raises an `InsufficientFunds` error.
Note:
This check contains a race condition, it could be the case that a
new block is mined changing the account's balance.
https://github.com/raiden-network/raiden/issues/3890#issuecomment-485857726
"""
if transaction_executed:
return
our_address = to_checksum_address(self.address)
balance = self.web3.eth.getBalance(our_address, block_identifier)
required_balance = required_gas * self.gas_price()
if balance < required_balance:
msg = f"Failed to execute {transaction_name} due to insufficient ETH"
log.critical(msg, required_wei=required_balance, actual_wei=balance)
raise InsufficientFunds(msg)
def get_checking_block(self):
"""Workaround for parity https://github.com/paritytech/parity-ethereum/issues/9707
In parity doing any call() with the 'pending' block no longer falls back
to the latest if no pending block is found but throws a mistaken error.
Until that bug is fixed we need to enforce special behaviour for parity
and use the latest block for checking.
"""
checking_block = "pending"
if self.eth_node is constants.EthClient.PARITY:
checking_block = "latest"
return checking_block
|
hackaugusto/raiden
|
raiden/network/rpc/client.py
|
Python
|
mit
| 34,000 | 0.002441 |
# -*- coding: utf-8 -*-
"""Installer script for Pywikibot 3.0 framework."""
#
# (C) Pywikibot team, 2009-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, print_function, unicode_literals
import itertools
import os
import sys
try:
# Work around a traceback on Python < 2.7.4 and < 3.3.1
# http://bugs.python.org/issue15881#msg170215
import multiprocessing
except ImportError:
pass
# pyflakes workaround
__unused__ = (multiprocessing, )
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
PY26 = (PYTHON_VERSION < (2, 7))
versions_required_message = """
Pywikibot not available on:
%s
Pywikibot is only supported under Python 2.6.5+, 2.7.2+ or 3.3+
"""
def python_is_supported():
"""Check that Python is supported."""
# Any change to this must be copied to pwb.py
return (PYTHON_VERSION >= (3, 3, 0) or
(PY2 and PYTHON_VERSION >= (2, 7, 2)) or
(PY26 and PYTHON_VERSION >= (2, 6, 5)))
if not python_is_supported():
raise RuntimeError(versions_required_message % sys.version)
test_deps = ['bz2file', 'mock']
dependencies = ['requests!=2.18.2']
# the irc module has no Python 2.6 support since 10.0
irc_dep = 'irc==8.9' if sys.version_info < (2, 7) else 'irc'
csv_dep = 'unicodecsv!=0.14.0' if PYTHON_VERSION < (2, 7) else 'unicodecsv'
extra_deps = {
# Core library dependencies
'eventstreams': ['sseclient'],
'isbn': ['python-stdnum'],
'Graphviz': ['pydot>=1.0.28'],
'Google': ['google>=1.7'],
'IRC': [irc_dep],
'mwparserfromhell': ['mwparserfromhell>=0.3.3'],
'Tkinter': ['Pillow<3.5.0' if PY26 else 'Pillow'],
# 0.6.1 supports socket.io 1.0, but WMF is using 0.9 (T91393 and T85716)
'rcstream': ['socketIO-client<0.6.1'],
'security': ['requests[security]', 'pycparser!=2.14'],
'mwoauth': ['mwoauth>=0.2.4,!=0.3.1'],
'html': ['BeautifulSoup4'],
}
if PY2:
# Additional core library dependencies which are only available on Python 2
extra_deps.update({
'csv': [csv_dep],
'MySQL': ['oursql'],
'unicode7': ['unicodedata2>=7.0.0-2'],
})
script_deps = {
'flickrripper.py': ['Pillow<3.5.0' if PY26 else 'Pillow'],
'states_redirect.py': ['pycountry'],
'weblinkchecker.py': ['memento_client>=0.5.1,!=0.6.0'],
'patrol.py': ['mwparserfromhell>=0.3.3'],
}
# flickrapi 1.4.4 installs a root logger in verbose mode; 1.4.5 fixes this.
# The problem doesnt exist in flickrapi 2.x.
# pywikibot accepts flickrapi 1.4.5+ on Python 2, as it has been stable for a
# long time, and only depends on python-requests 1.x, whereas flickrapi 2.x
# depends on python-requests 2.x, which is first packaged in Ubuntu 14.04
# and will be first packaged for Fedora Core 21.
# flickrapi 1.4.x does not run on Python 3, and setuptools can only
# select flickrapi 2.x for Python 3 installs.
script_deps['flickrripper.py'].append(
'flickrapi>=1.4.5,<2' if PY26 else 'flickrapi')
# lunatic-python is only available for Linux
if sys.platform.startswith('linux'):
script_deps['script_wui.py'] = [irc_dep, 'lunatic-python', 'crontab']
# The main pywin32 repository contains a Python 2 only setup.py with a small
# wrapper setup3.py for Python 3.
# http://pywin32.hg.sourceforge.net:8000/hgroot/pywin32/pywin32
# The main pywinauto repository doesnt support Python 3.
# The repositories used below have a Python 3 compliant setup.py
dependency_links = [
'git+https://github.com/AlereDevices/lunatic-python.git#egg=lunatic-python',
'hg+https://bitbucket.org/TJG/pywin32#egg=pywin32',
'git+https://github.com/vasily-v-ryabov/pywinauto-64#egg=pywinauto',
'git+https://github.com/nlhepler/pydot#egg=pydot-1.0.29',
]
if PYTHON_VERSION < (2, 7, 3):
# work around distutils hardcoded unittest dependency
# work around T106512
import unittest
__unused__ += (unittest, )
if 'test' in sys.argv:
import unittest2
sys.modules['unittest'] = unittest2
if sys.version_info[0] == 2:
if PY26:
script_deps['replicate_wiki.py'] = ['argparse']
dependencies.append('future>=0.15.0') # provides collections backports
dependencies += extra_deps['unicode7'] # T102461 workaround
# tools.ip does not have a hard dependency on an IP address module,
# as it falls back to using regexes if one is not available.
# The functional backport of py3 ipaddress is acceptable:
# https://pypi.python.org/pypi/ipaddress
# However the Debian package python-ipaddr is also supported:
# https://pypi.python.org/pypi/ipaddr
# Other backports are likely broken.
# ipaddr 2.1.10+ is distributed with Debian and Fedora. See T105443.
dependencies.append('ipaddr>=2.1.10')
if sys.version_info < (2, 7, 9):
# Python versions before 2.7.9 will cause urllib3 to trigger
# InsecurePlatformWarning warnings for all HTTPS requests. By
# installing with security extras, requests will automatically set
# them up and the warnings will stop. See
# <https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning>
# for more details.
dependencies += extra_deps['security']
script_deps['data_ingestion.py'] = extra_deps['csv']
try:
import bz2
__unused__ += (bz2, )
except ImportError:
# Use bz2file if the python is not compiled with bz2 support.
dependencies.append('bz2file')
# Some of the ui_tests depend on accessing the console window's menu
# to set the console font and copy and paste, achieved using pywinauto
# which depends on pywin32.
# These tests may be disabled because pywin32 depends on VC++, is time
# comsuming to build, and the console window cant be accessed during appveyor
# builds.
# Microsoft makes available a compiler for Python 2.7
# http://www.microsoft.com/en-au/download/details.aspx?id=44266
# If you set up your own compiler for Python 3, on 3.3 two demo files
# packaged with pywin32 may fail. Remove com/win32com/demos/ie*.py
if os.name == 'nt' and os.environ.get('PYSETUP_TEST_NO_UI', '0') != '1':
# FIXME: tests/ui_tests.py suggests pywinauto 0.4.2
# which isnt provided on pypi.
test_deps += ['pywin32', 'pywinauto>=0.4.0']
extra_deps.update(script_deps)
# Add all dependencies as test dependencies,
# so all scripts can be compiled for script_tests, etc.
if 'PYSETUP_TEST_EXTRAS' in os.environ:
test_deps += list(itertools.chain(*(extra_deps.values())))
if 'oursql' in test_deps and os.name == 'nt':
test_deps.remove('oursql') # depends on Cython
if 'requests[security]' in test_deps:
# Bug T105767 on Python 2.7 release 9+
if sys.version_info[:2] == (2, 7) and sys.version_info[2] >= 9:
test_deps.remove('requests[security]')
# These extra dependencies are needed other unittest fails to load tests.
if sys.version_info[0] == 2:
test_deps += extra_deps['csv'] + ['mock']
else:
test_deps += ['six']
from setuptools import setup, find_packages
name = 'pywikibot'
version = '3.0'
try:
import subprocess
date = subprocess.check_output(['git', 'log', '-1', '--format=%ci']).strip()
date = date.decode().split(' ')[0].replace('-', '')
version = version + "." + date
except Exception as e:
print(e)
version = version + "-dev"
github_url = 'https://github.com/wikimedia/pywikibot-core'
setup(
name=name,
version=version,
description='Python MediaWiki Bot Framework',
long_description=open('pypi_description.rst').read(),
keywords=('pywikibot', 'python', 'mediawiki', 'bot', 'wiki', 'framework',
'wikimedia', 'wikipedia', 'pwb', 'pywikipedia', 'API'),
maintainer='The Pywikibot team',
maintainer_email='pywikibot@lists.wikimedia.org',
license='MIT License',
packages=[str(name)] + [package
for package in find_packages()
if package.startswith('pywikibot.')],
install_requires=dependencies,
dependency_links=dependency_links,
extras_require=extra_deps,
url='https://www.mediawiki.org/wiki/Pywikibot',
test_suite="tests.collector",
tests_require=test_deps,
classifiers=[
'License :: OSI Approved :: MIT License',
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Environment :: Console',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
use_2to3=False
)
|
npdoty/pywikibot
|
setup.py
|
Python
|
mit
| 8,699 | 0.000345 |
__author__ = 'http://www.python-course.eu/python3_inheritance.php'
class Person:
def __init__(self, first, last):
self.firstname = first
self.lastname = last
def Name(self):
return self.firstname + " " + self.lastname
class Employee(Person):
def __init__(self, first, last, staffnum):
Person.__init__(self,first, last)
self.staffnumber = staffnum
def GetEmployee(self):
return self.Name() + ", " + self.staffnumber
x = Person("Marge", "Simpson")
y = Employee("Homer", "Simpson", "1007")
print(x.Name())
print(y.GetEmployee())
|
MarketShareData/Internal
|
code/test1/inheritanceTest.py
|
Python
|
mit
| 600 | 0.006667 |
from functools import partial
from typing import Any, Callable, Dict, Generator, List, Optional
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventType
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
EPIC_NAME_TEMPLATE = "**{name}**"
STORY_NAME_TEMPLATE = "[{name}]({app_url})"
COMMENT_ADDED_TEMPLATE = (
"New comment added to the {entity} {name_template}:\n``` quote\n{text}\n```"
)
NEW_DESC_ADDED_TEMPLATE = (
"New description added to the {entity} {name_template}:\n``` quote\n{new}\n```"
)
DESC_CHANGED_TEMPLATE = (
"Description for the {entity} {name_template} was changed from:\n"
"``` quote\n{old}\n```\nto\n``` quote\n{new}\n```"
)
DESC_REMOVED_TEMPLATE = "Description for the {entity} {name_template} was removed."
STATE_CHANGED_TEMPLATE = (
"State of the {entity} {name_template} was changed from **{old}** to **{new}**."
)
NAME_CHANGED_TEMPLATE = (
"The name of the {entity} {name_template} was changed from:\n"
"``` quote\n{old}\n```\nto\n``` quote\n{new}\n```"
)
ARCHIVED_TEMPLATE = "The {entity} {name_template} was {operation}."
STORY_TASK_TEMPLATE = "Task **{task_description}** was {operation} the story {name_template}."
STORY_TASK_COMPLETED_TEMPLATE = (
"Task **{task_description}** ({name_template}) was completed. :tada:"
)
STORY_ADDED_REMOVED_EPIC_TEMPLATE = (
"The story {story_name_template} was {operation} the epic {epic_name_template}."
)
STORY_EPIC_CHANGED_TEMPLATE = "The story {story_name_template} was moved from {old_epic_name_template} to {new_epic_name_template}."
STORY_ESTIMATE_TEMPLATE = "The estimate for the story {story_name_template} was set to {estimate}."
FILE_ATTACHMENT_TEMPLATE = (
"A {type} attachment `{file_name}` was added to the story {name_template}."
)
LABEL_TEMPLATE = "**{name}**"
STORY_LABEL_TEMPLATE = "The label {labels} was added to the story {name_template}."
STORY_LABEL_PLURAL_TEMPLATE = "The labels {labels} were added to the story {name_template}."
STORY_UPDATE_PROJECT_TEMPLATE = (
"The story {name_template} was moved from the **{old}** project to **{new}**."
)
STORY_UPDATE_TYPE_TEMPLATE = (
"The type of the story {name_template} was changed from **{old_type}** to **{new_type}**."
)
DELETE_TEMPLATE = "The {entity_type} **{name}** was deleted."
STORY_UPDATE_OWNER_TEMPLATE = "New owner added to the story {name_template}."
TRAILING_WORKFLOW_STATE_CHANGE_TEMPLATE = " ({old} -> {new})"
STORY_GITHUB_PR_TEMPLATE = (
"New GitHub PR [#{name}]({url}) opened for story {name_template}{workflow_state_template}."
)
STORY_GITHUB_COMMENT_PR_TEMPLATE = "Existing GitHub PR [#{name}]({url}) associated with story {name_template}{workflow_state_template}."
STORY_GITHUB_BRANCH_TEMPLATE = "New GitHub branch [{name}]({url}) associated with story {name_template}{workflow_state_template}."
STORY_UPDATE_BATCH_TEMPLATE = "The story {name_template} {templates}{workflow_state_template}."
STORY_UPDATE_BATCH_CHANGED_TEMPLATE = "{operation} from {sub_templates}"
STORY_UPDATE_BATCH_CHANGED_SUB_TEMPLATE = "{entity_type} **{old}** to **{new}**"
STORY_UPDATE_BATCH_ADD_REMOVE_TEMPLATE = "{operation} with {entity}"
def get_action_with_primary_id(payload: Dict[str, Any]) -> Dict[str, Any]:
for action in payload["actions"]:
if payload["primary_id"] == action["id"]:
action_with_primary_id = action
return action_with_primary_id
def get_event(payload: Dict[str, Any], action: Dict[str, Any]) -> Optional[str]:
event = "{}_{}".format(action["entity_type"], action["action"])
# We only consider the change to be a batch update only if there are multiple stories (thus there is no primary_id)
if event == "story_update" and payload.get("primary_id") is None:
return "{}_{}".format(event, "batch")
if event in IGNORED_EVENTS:
return None
changes = action.get("changes")
if changes is not None:
if changes.get("description") is not None:
event = "{}_{}".format(event, "description")
elif changes.get("state") is not None:
event = "{}_{}".format(event, "state")
elif changes.get("workflow_state_id") is not None:
event = "{}_{}".format(event, "state")
elif changes.get("name") is not None:
event = "{}_{}".format(event, "name")
elif changes.get("archived") is not None:
event = "{}_{}".format(event, "archived")
elif changes.get("complete") is not None:
event = "{}_{}".format(event, "complete")
elif changes.get("epic_id") is not None:
event = "{}_{}".format(event, "epic")
elif changes.get("estimate") is not None:
event = "{}_{}".format(event, "estimate")
elif changes.get("file_ids") is not None:
event = "{}_{}".format(event, "attachment")
elif changes.get("label_ids") is not None:
event = "{}_{}".format(event, "label")
elif changes.get("project_id") is not None:
event = "{}_{}".format(event, "project")
elif changes.get("story_type") is not None:
event = "{}_{}".format(event, "type")
elif changes.get("owner_ids") is not None:
event = "{}_{}".format(event, "owner")
return event
def get_topic_function_based_on_type(payload: Dict[str, Any], action: Dict[str, Any]) -> Any:
entity_type = action["entity_type"]
return EVENT_TOPIC_FUNCTION_MAPPER.get(entity_type)
def get_delete_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str:
return DELETE_TEMPLATE.format(**action)
def get_story_create_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str:
if action.get("epic_id") is None:
message = "New story [{name}]({app_url}) of type **{story_type}** was created."
kwargs = action
else:
message = "New story [{name}]({app_url}) was created and added to the epic **{epic_name}**."
kwargs = {
"name": action["name"],
"app_url": action["app_url"],
}
epic_id = action["epic_id"]
refs = payload["references"]
for ref in refs:
if ref["id"] == epic_id:
kwargs["epic_name"] = ref["name"]
return message.format(**kwargs)
def get_epic_create_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str:
message = "New epic **{name}**({state}) was created."
return message.format(**action)
def get_comment_added_body(payload: Dict[str, Any], action: Dict[str, Any], entity: str) -> str:
actions = payload["actions"]
kwargs = {"entity": entity}
for action in actions:
if action["id"] == payload["primary_id"]:
kwargs["text"] = action["text"]
elif action["entity_type"] == entity:
name_template = get_name_template(entity).format(
name=action["name"],
app_url=action.get("app_url"),
)
kwargs["name_template"] = name_template
return COMMENT_ADDED_TEMPLATE.format(**kwargs)
def get_update_description_body(
payload: Dict[str, Any], action: Dict[str, Any], entity: str
) -> str:
desc = action["changes"]["description"]
kwargs = {
"entity": entity,
"new": desc["new"],
"old": desc["old"],
"name_template": get_name_template(entity).format(
name=action["name"],
app_url=action.get("app_url"),
),
}
if kwargs["new"] and kwargs["old"]:
body = DESC_CHANGED_TEMPLATE.format(**kwargs)
elif kwargs["new"]:
body = NEW_DESC_ADDED_TEMPLATE.format(**kwargs)
else:
body = DESC_REMOVED_TEMPLATE.format(**kwargs)
return body
def get_epic_update_state_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str:
state = action["changes"]["state"]
kwargs = {
"entity": "epic",
"new": state["new"],
"old": state["old"],
"name_template": EPIC_NAME_TEMPLATE.format(name=action["name"]),
}
return STATE_CHANGED_TEMPLATE.format(**kwargs)
def get_story_update_state_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str:
workflow_state_id = action["changes"]["workflow_state_id"]
references = payload["references"]
state = {}
for ref in references:
if ref["id"] == workflow_state_id["new"]:
state["new"] = ref["name"]
if ref["id"] == workflow_state_id["old"]:
state["old"] = ref["name"]
kwargs = {
"entity": "story",
"new": state["new"],
"old": state["old"],
"name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action.get("app_url"),
),
}
return STATE_CHANGED_TEMPLATE.format(**kwargs)
def get_update_name_body(payload: Dict[str, Any], action: Dict[str, Any], entity: str) -> str:
name = action["changes"]["name"]
kwargs = {
"entity": entity,
"new": name["new"],
"old": name["old"],
"name_template": get_name_template(entity).format(
name=action["name"],
app_url=action.get("app_url"),
),
}
return NAME_CHANGED_TEMPLATE.format(**kwargs)
def get_update_archived_body(payload: Dict[str, Any], action: Dict[str, Any], entity: str) -> str:
archived = action["changes"]["archived"]
if archived["new"]:
operation = "archived"
else:
operation = "unarchived"
kwargs = {
"entity": entity,
"name_template": get_name_template(entity).format(
name=action["name"],
app_url=action.get("app_url"),
),
"operation": operation,
}
return ARCHIVED_TEMPLATE.format(**kwargs)
def get_story_task_body(payload: Dict[str, Any], action: Dict[str, Any], operation: str) -> str:
kwargs = {
"task_description": action["description"],
"operation": operation,
}
for a in payload["actions"]:
if a["entity_type"] == "story":
kwargs["name_template"] = STORY_NAME_TEMPLATE.format(
name=a["name"],
app_url=a["app_url"],
)
return STORY_TASK_TEMPLATE.format(**kwargs)
def get_story_task_completed_body(payload: Dict[str, Any], action: Dict[str, Any]) -> Optional[str]:
kwargs = {
"task_description": action["description"],
}
story_id = action["story_id"]
for ref in payload["references"]:
if ref["id"] == story_id:
kwargs["name_template"] = STORY_NAME_TEMPLATE.format(
name=ref["name"],
app_url=ref["app_url"],
)
if action["changes"]["complete"]["new"]:
return STORY_TASK_COMPLETED_TEMPLATE.format(**kwargs)
else:
return None
def get_story_update_epic_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str:
kwargs = {
"story_name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action["app_url"],
),
}
new_id = action["changes"]["epic_id"].get("new")
old_id = action["changes"]["epic_id"].get("old")
for ref in payload["references"]:
if ref["id"] == new_id:
kwargs["new_epic_name_template"] = EPIC_NAME_TEMPLATE.format(name=ref["name"])
if ref["id"] == old_id:
kwargs["old_epic_name_template"] = EPIC_NAME_TEMPLATE.format(name=ref["name"])
if new_id and old_id:
return STORY_EPIC_CHANGED_TEMPLATE.format(**kwargs)
elif new_id:
kwargs["epic_name_template"] = kwargs["new_epic_name_template"]
kwargs["operation"] = "added to"
else:
kwargs["epic_name_template"] = kwargs["old_epic_name_template"]
kwargs["operation"] = "removed from"
return STORY_ADDED_REMOVED_EPIC_TEMPLATE.format(**kwargs)
def get_story_update_estimate_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str:
kwargs = {
"story_name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action["app_url"],
),
}
new = action["changes"]["estimate"].get("new")
if new:
kwargs["estimate"] = f"{new} points"
else:
kwargs["estimate"] = "*Unestimated*"
return STORY_ESTIMATE_TEMPLATE.format(**kwargs)
def get_reference_by_id(payload: Dict[str, Any], ref_id: int) -> Dict[str, Any]:
ref: Dict[str, Any] = {}
for reference in payload["references"]:
if reference["id"] == ref_id:
ref = reference
return ref
def get_secondary_actions_with_param(
payload: Dict[str, Any], entity: str, changed_attr: str
) -> Generator[Dict[str, Any], None, None]:
# This function is a generator for secondary actions that have the required changed attributes,
# i.e.: "story" that has "pull-request_ids" changed.
for action in payload["actions"]:
if action["entity_type"] == entity and action["changes"].get(changed_attr) is not None:
yield action
def get_story_create_github_entity_body(
payload: Dict[str, Any], action: Dict[str, Any], entity: str
) -> str:
pull_request_action: Dict[str, Any] = get_action_with_primary_id(payload)
kwargs = {
"name_template": STORY_NAME_TEMPLATE.format(**action),
"name": pull_request_action.get("number")
if entity == "pull-request" or entity == "pull-request-comment"
else pull_request_action.get("name"),
"url": pull_request_action["url"],
"workflow_state_template": "",
}
# Sometimes the workflow state of the story will not be changed when linking to a PR.
if action["changes"].get("workflow_state_id") is not None:
new_state_id = action["changes"]["workflow_state_id"]["new"]
old_state_id = action["changes"]["workflow_state_id"]["old"]
new_state = get_reference_by_id(payload, new_state_id)["name"]
old_state = get_reference_by_id(payload, old_state_id)["name"]
kwargs["workflow_state_template"] = TRAILING_WORKFLOW_STATE_CHANGE_TEMPLATE.format(
new=new_state, old=old_state
)
if entity == "pull-request":
template = STORY_GITHUB_PR_TEMPLATE
elif entity == "pull-request-comment":
template = STORY_GITHUB_COMMENT_PR_TEMPLATE
else:
template = STORY_GITHUB_BRANCH_TEMPLATE
return template.format(**kwargs)
def get_story_update_attachment_body(
payload: Dict[str, Any], action: Dict[str, Any]
) -> Optional[str]:
kwargs = {
"name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action["app_url"],
),
}
file_ids_added = action["changes"]["file_ids"].get("adds")
# If this is a payload for when an attachment is removed, ignore it
if not file_ids_added:
return None
file_id = file_ids_added[0]
for ref in payload["references"]:
if ref["id"] == file_id:
kwargs.update(
type=ref["entity_type"],
file_name=ref["name"],
)
return FILE_ATTACHMENT_TEMPLATE.format(**kwargs)
def get_story_joined_label_list(
payload: Dict[str, Any], action: Dict[str, Any], label_ids_added: List[int]
) -> str:
labels = []
for label_id in label_ids_added:
label_name = ""
for action in payload["actions"]:
if action.get("id") == label_id:
label_name = action.get("name", "")
if label_name == "":
label_name = get_reference_by_id(payload, label_id).get("name", "")
labels.append(LABEL_TEMPLATE.format(name=label_name))
return ", ".join(labels)
def get_story_label_body(payload: Dict[str, Any], action: Dict[str, Any]) -> Optional[str]:
kwargs = {
"name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action["app_url"],
),
}
label_ids_added = action["changes"]["label_ids"].get("adds")
# If this is a payload for when no label is added, ignore it
if not label_ids_added:
return None
kwargs.update(labels=get_story_joined_label_list(payload, action, label_ids_added))
return (
STORY_LABEL_TEMPLATE.format(**kwargs)
if len(label_ids_added) == 1
else STORY_LABEL_PLURAL_TEMPLATE.format(**kwargs)
)
def get_story_update_project_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str:
kwargs = {
"name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action["app_url"],
),
}
new_project_id = action["changes"]["project_id"]["new"]
old_project_id = action["changes"]["project_id"]["old"]
for ref in payload["references"]:
if ref["id"] == new_project_id:
kwargs.update(new=ref["name"])
if ref["id"] == old_project_id:
kwargs.update(old=ref["name"])
return STORY_UPDATE_PROJECT_TEMPLATE.format(**kwargs)
def get_story_update_type_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str:
kwargs = {
"name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action["app_url"],
),
"new_type": action["changes"]["story_type"]["new"],
"old_type": action["changes"]["story_type"]["old"],
}
return STORY_UPDATE_TYPE_TEMPLATE.format(**kwargs)
def get_story_update_owner_body(payload: Dict[str, Any], action: Dict[str, Any]) -> str:
kwargs = {
"name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action["app_url"],
),
}
return STORY_UPDATE_OWNER_TEMPLATE.format(**kwargs)
def get_story_update_batch_body(payload: Dict[str, Any], action: Dict[str, Any]) -> Optional[str]:
# When the user selects one or more stories with the checkbox, they can perform
# a batch update on multiple stories while changing multiple attribtues at the
# same time.
changes = action["changes"]
kwargs = {
"name_template": STORY_NAME_TEMPLATE.format(
name=action["name"],
app_url=action["app_url"],
),
"workflow_state_template": "",
}
templates = []
last_change = "other"
move_sub_templates = []
if "epic_id" in changes:
last_change = "epic"
move_sub_templates.append(
STORY_UPDATE_BATCH_CHANGED_SUB_TEMPLATE.format(
entity_type="Epic",
old=get_reference_by_id(payload, changes["epic_id"].get("old")).get("name"),
new=get_reference_by_id(payload, changes["epic_id"].get("new")).get("name"),
)
)
if "project_id" in changes:
last_change = "project"
move_sub_templates.append(
STORY_UPDATE_BATCH_CHANGED_SUB_TEMPLATE.format(
entity_type="Project",
old=get_reference_by_id(payload, changes["project_id"].get("old")).get("name"),
new=get_reference_by_id(payload, changes["project_id"].get("new")).get("name"),
)
)
if len(move_sub_templates) > 0:
templates.append(
STORY_UPDATE_BATCH_CHANGED_TEMPLATE.format(
operation="was moved",
sub_templates=", ".join(move_sub_templates),
)
)
if "story_type" in changes:
last_change = "type"
templates.append(
STORY_UPDATE_BATCH_CHANGED_TEMPLATE.format(
operation="{} changed".format("was" if len(templates) == 0 else "and"),
sub_templates=STORY_UPDATE_BATCH_CHANGED_SUB_TEMPLATE.format(
entity_type="type",
old=changes["story_type"].get("old"),
new=changes["story_type"].get("new"),
),
)
)
if "label_ids" in changes:
label_ids_added = changes["label_ids"].get("adds")
# If this is a payload for when no label is added, ignore it
if label_ids_added is not None:
last_change = "label"
labels = get_story_joined_label_list(payload, action, label_ids_added)
templates.append(
STORY_UPDATE_BATCH_ADD_REMOVE_TEMPLATE.format(
operation="{} added".format("was" if len(templates) == 0 else "and"),
entity="the new label{plural} {labels}".format(
plural="s" if len(changes["label_ids"]) > 1 else "", labels=labels
),
)
)
if "workflow_state_id" in changes:
last_change = "state"
kwargs.update(
workflow_state_template=TRAILING_WORKFLOW_STATE_CHANGE_TEMPLATE.format(
old=get_reference_by_id(payload, changes["workflow_state_id"].get("old")).get(
"name"
),
new=get_reference_by_id(payload, changes["workflow_state_id"].get("new")).get(
"name"
),
)
)
# Use the default template for state change if it is the only one change.
if len(templates) <= 1 or (len(templates) == 0 and last_change == "state"):
event: str = "{}_{}".format("story_update", last_change)
alternative_body_func = EVENT_BODY_FUNCTION_MAPPER.get(event)
# If last_change is not one of "epic", "project", "type", "label" and "state"
# we should ignore the action as there is no way for us to render the changes.
if alternative_body_func is None:
return None
return alternative_body_func(payload, action)
kwargs.update(templates=", ".join(templates))
return STORY_UPDATE_BATCH_TEMPLATE.format(**kwargs)
def get_entity_name(
payload: Dict[str, Any], action: Dict[str, Any], entity: Optional[str] = None
) -> Optional[str]:
name = action.get("name")
if name is None or action["entity_type"] == "branch":
for action in payload["actions"]:
if action["entity_type"] == entity:
name = action["name"]
if name is None:
for ref in payload["references"]:
if ref["entity_type"] == entity:
name = ref["name"]
return name
def get_name_template(entity: str) -> str:
if entity == "story":
return STORY_NAME_TEMPLATE
return EPIC_NAME_TEMPLATE
def send_stream_messages_for_actions(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Any],
action: Dict[str, Any],
event: str,
) -> None:
body_func = EVENT_BODY_FUNCTION_MAPPER.get(event)
topic_func = get_topic_function_based_on_type(payload, action)
if body_func is None or topic_func is None:
raise UnsupportedWebhookEventType(event)
topic = topic_func(payload, action)
body = body_func(payload, action)
if topic and body:
check_send_webhook_message(request, user_profile, topic, body, event)
EVENT_BODY_FUNCTION_MAPPER: Dict[str, Callable[[Dict[str, Any], Dict[str, Any]], Optional[str]]] = {
"story_update_archived": partial(get_update_archived_body, entity="story"),
"epic_update_archived": partial(get_update_archived_body, entity="epic"),
"story_create": get_story_create_body,
"pull-request_create": partial(get_story_create_github_entity_body, entity="pull-request"),
"pull-request_comment": partial(
get_story_create_github_entity_body, entity="pull-request-comment"
),
"branch_create": partial(get_story_create_github_entity_body, entity="branch"),
"story_delete": get_delete_body,
"epic_delete": get_delete_body,
"story-task_create": partial(get_story_task_body, operation="added to"),
"story-task_delete": partial(get_story_task_body, operation="removed from"),
"story-task_update_complete": get_story_task_completed_body,
"story_update_epic": get_story_update_epic_body,
"story_update_estimate": get_story_update_estimate_body,
"story_update_attachment": get_story_update_attachment_body,
"story_update_label": get_story_label_body,
"story_update_owner": get_story_update_owner_body,
"story_update_project": get_story_update_project_body,
"story_update_type": get_story_update_type_body,
"epic_create": get_epic_create_body,
"epic-comment_create": partial(get_comment_added_body, entity="epic"),
"story-comment_create": partial(get_comment_added_body, entity="story"),
"epic_update_description": partial(get_update_description_body, entity="epic"),
"story_update_description": partial(get_update_description_body, entity="story"),
"epic_update_state": get_epic_update_state_body,
"story_update_state": get_story_update_state_body,
"epic_update_name": partial(get_update_name_body, entity="epic"),
"story_update_name": partial(get_update_name_body, entity="story"),
"story_update_batch": get_story_update_batch_body,
}
ALL_EVENT_TYPES = list(EVENT_BODY_FUNCTION_MAPPER.keys())
EVENT_TOPIC_FUNCTION_MAPPER = {
"story": partial(get_entity_name, entity="story"),
"pull-request": partial(get_entity_name, entity="story"),
"branch": partial(get_entity_name, entity="story"),
"story-comment": partial(get_entity_name, entity="story"),
"story-task": partial(get_entity_name, entity="story"),
"epic": partial(get_entity_name, entity="epic"),
"epic-comment": partial(get_entity_name, entity="epic"),
}
IGNORED_EVENTS = {
"story-comment_update",
}
EVENTS_SECONDARY_ACTIONS_FUNCTION_MAPPER: Dict[
str, Callable[[Dict[str, Any]], Generator[Dict[str, Any], None, None]]
] = {
"pull-request_create": partial(
get_secondary_actions_with_param, entity="story", changed_attr="pull_request_ids"
),
"branch_create": partial(
get_secondary_actions_with_param, entity="story", changed_attr="branch_ids"
),
"pull-request_comment": partial(
get_secondary_actions_with_param, entity="story", changed_attr="pull_request_ids"
),
}
@webhook_view("ClubHouse", all_event_types=ALL_EVENT_TYPES)
@has_request_variables
def api_clubhouse_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Optional[Dict[str, Any]] = REQ(argument_type="body"),
) -> HttpResponse:
# Clubhouse has a tendency to send empty POST requests to
# third-party endpoints. It is unclear as to which event type
# such requests correspond to. So, it is best to ignore such
# requests for now.
if payload is None:
return json_success()
if payload.get("primary_id") is not None:
action = get_action_with_primary_id(payload)
primary_actions = [action]
else:
primary_actions = payload["actions"]
for primary_action in primary_actions:
event = get_event(payload, primary_action)
if event is None:
continue
if event in EVENTS_SECONDARY_ACTIONS_FUNCTION_MAPPER:
sec_actions_func = EVENTS_SECONDARY_ACTIONS_FUNCTION_MAPPER[event]
for sec_action in sec_actions_func(payload):
send_stream_messages_for_actions(request, user_profile, payload, sec_action, event)
else:
send_stream_messages_for_actions(request, user_profile, payload, primary_action, event)
return json_success()
|
rht/zulip
|
zerver/webhooks/clubhouse/view.py
|
Python
|
apache-2.0
| 27,498 | 0.002655 |
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[6, 3])
axins1 = inset_axes(ax1,
width="50%", # width = 10% of parent_bbox width
height="5%", # height : 50%
loc=1)
im1=ax1.imshow([[1,2],[2, 3]])
plt.colorbar(im1, cax=axins1, orientation="horizontal", ticks=[1,2,3])
axins1.xaxis.set_ticks_position("bottom")
axins = inset_axes(ax2,
width="5%", # width = 10% of parent_bbox width
height="50%", # height : 50%
loc=3,
bbox_to_anchor=(1.05, 0., 1, 1),
bbox_transform=ax2.transAxes,
borderpad=0,
)
# Controlling the placement of the inset axes is basically same as that
# of the legend. you may want to play with the borderpad value and
# the bbox_to_anchor coordinate.
im=ax2.imshow([[1,2],[2, 3]])
plt.colorbar(im, cax=axins, ticks=[1,2,3])
plt.draw()
plt.show()
|
cactusbin/nyt
|
matplotlib/examples/axes_grid/demo_colorbar_with_inset_locator.py
|
Python
|
unlicense
| 1,052 | 0.013308 |
import os
import string
import random
import logging
from thug.ActiveX.modules import WScriptShell
from thug.ActiveX.modules import TextStream
from thug.ActiveX.modules import File
from thug.ActiveX.modules import Folder
from thug.OS.Windows import win32_files
from thug.OS.Windows import win32_folders
log = logging.getLogger("Thug")
def BuildPath(self, arg0, arg1): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] BuildPath("{arg0}", "{arg1}")')
return f"{arg0}\\{arg1}"
def CopyFile(self, source, destination, overwritefiles = False): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] CopyFile("{source}", "{destination}")')
log.TextFiles[destination] = log.TextFiles[source]
def DeleteFile(self, filespec, force = False): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] DeleteFile("{filespec}", {force})')
def CreateTextFile(self, filename, overwrite = False, _unicode = False): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] CreateTextFile("{filename}", '
f'"{overwrite}", '
f'"{_unicode}")')
stream = TextStream.TextStream()
stream._filename = filename
return stream
def CreateFolder(self, path): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] CreateFolder("{path}")')
return Folder.Folder(path)
def FileExists(self, filespec): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] FileExists("{filespec}")')
if not filespec:
return True
if filespec.lower() in win32_files:
return True
if getattr(log, "TextFiles", None) and filespec in log.TextFiles:
return True
return False
def FolderExists(self, folder): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] FolderExists("{folder}")')
return str(folder).lower() in win32_folders
def GetExtensionName(self, path): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] GetExtensionName("{path}")')
ext = os.path.splitext(path)[1]
return ext if ext else ""
def GetFile(self, filespec): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] GetFile("{filespec}")')
return File.File(filespec)
def GetSpecialFolder(self, arg):
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] GetSpecialFolder("{arg}")')
arg = int(arg)
folder = ''
if arg == 0:
folder = WScriptShell.ExpandEnvironmentStrings(self, "%windir%")
elif arg == 1:
folder = WScriptShell.ExpandEnvironmentStrings(self, "%SystemRoot%\\system32")
elif arg == 2:
folder = WScriptShell.ExpandEnvironmentStrings(self, "%TEMP%")
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] Returning {folder} for GetSpecialFolder("{arg}")')
return folder
def GetTempName(self): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn('[Scripting.FileSystemObject ActiveX] GetTempName()')
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8))
def MoveFile(self, source, destination): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] MoveFile("{source}", "{destination}")')
log.TextFiles[destination] = log.TextFiles[source]
del log.TextFiles[source]
def OpenTextFile(self, sFilePathAndName, ForWriting = True, flag = True):
log.ThugLogging.add_behavior_warn(f'[Scripting.FileSystemObject ActiveX] OpenTextFile("{sFilePathAndName}", '
f'"{ForWriting}" ,'
f'"{flag}")')
log.ThugLogging.log_exploit_event(self._window.url,
"Scripting.FileSystemObject ActiveX",
"OpenTextFile",
data = {
"filename" : sFilePathAndName,
"ForWriting": ForWriting,
"flag" : flag
},
forward = False)
if getattr(log, 'TextFiles', None) is None:
log.TextFiles = {}
if sFilePathAndName in log.TextFiles:
return log.TextFiles[sFilePathAndName]
stream = TextStream.TextStream()
stream._filename = sFilePathAndName
if log.ThugOpts.local and sFilePathAndName in (log.ThugLogging.url, ): # pragma: no cover
with open(sFilePathAndName, encoding = 'utf-8', mode = 'r') as fd:
data = fd.read()
stream.Write(data)
log.TextFiles[sFilePathAndName] = stream
return stream
|
buffer/thug
|
thug/ActiveX/modules/ScriptingFileSystemObject.py
|
Python
|
gpl-2.0
| 5,185 | 0.010222 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement # use the features of python 3
import collections
import logging
import time
# this LRUCache is optimized for concurrency, not QPS
# n: concurrency, keys stored in the cache
# m: visits not timed out, proportional to QPS * timeout
# get & set is O(1), not O(n). thus we can support very large n
# TODO: if timeout or QPS is too large, then this cache is not very efficient,
# as sweep() causes long pause
class LRUCache(collections.MutableMapping): # ABCs for read-only and mutable mappings.
"""This class is not thread safe"""
def __init__(self, timeout=60, close_callback=None, *args, **kwargs):
self.timeout = timeout # the cache expire time
self.close_callback = close_callback # called when value will be swept from cache
self._store = {} # dict<key, value>: store cache data key value
self._time_to_keys = collections.defaultdict(list) # defaultdict<time, list<key>>
# defaultdict: dict subclass that calls a factory function to supply missing values
self._keys_to_last_time = {} # dict<key, time> stores the last time of one key visited.
self._last_visits = collections.deque() # deque<time> store all the time once key is visited.
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._time_to_keys[t].append(key)
self._last_visits.append(t)
return self._store[key]
def __setitem__(self, key, value):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._store[key] = value
self._time_to_keys[t].append(key)
self._last_visits.append(t)
def __delitem__(self, key):
# O(1)
del self._store[key]
del self._keys_to_last_time[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def sweep(self):
# O(m)
now = time.time()
c = 0 # use to log how many keys has been swept.
while len(self._last_visits) > 0:
least = self._last_visits[0] # fetch the oldest time point
if now - least <= self.timeout: # the oldest time point hasn't expire
break
if self.close_callback is not None: # callback function has been set
for key in self._time_to_keys[least]: # fetch each key visited on the oldest time
if key in self._store: # finded the cache key
if now - self._keys_to_last_time[key] > self.timeout:
value = self._store[key] # get the key of the last time and check expire or yet.
self.close_callback(value) # call callback
for key in self._time_to_keys[least]:
self._last_visits.popleft() # can't understand and have error personally
# @Sunny: use popleft to remove oldest time point in last visits
if key in self._store:
if now - self._keys_to_last_time[key] > self.timeout:
del self._store[key]
del self._keys_to_last_time[key]
c += 1
del self._time_to_keys[least]
if c:
logging.debug('%d keys swept' % c)
def test():
c = LRUCache(timeout=0.3)
c['a'] = 1
assert c['a'] == 1
time.sleep(0.5)
c.sweep()
assert 'a' not in c
c['a'] = 2
c['b'] = 3
time.sleep(0.2)
c.sweep()
assert c['a'] == 2
assert c['b'] == 3
time.sleep(0.2)
c.sweep()
c['b']
time.sleep(0.2)
c.sweep()
assert 'a' not in c
assert c['b'] == 3
time.sleep(0.5)
c.sweep()
assert 'a' not in c
assert 'b' not in c
if __name__ == '__main__':
test()
|
meowlab/shadowsocks-comment
|
shadowsocks/lru_cache.py
|
Python
|
apache-2.0
| 4,886 | 0.003684 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from blinker import Namespace
namespace = Namespace()
#: Trigerred when a dataset is published
on_dataset_published = namespace.signal('on-dataset-published')
|
jphnoel/udata
|
udata/core/dataset/signals.py
|
Python
|
agpl-3.0
| 226 | 0 |
# AnalogClock's font selector for setup dialog
# E. A. Tacao <e.a.tacao |at| estadao.com.br>
# http://j.domaindlx.com/elements28/wxpython/
# 15 Fev 2006, 22:00 GMT-03:00
# Distributed under the wxWidgets license.
import wx
from wx.lib.newevent import NewEvent
from wx.lib.buttons import GenButton
#----------------------------------------------------------------------------
(FontSelectEvent, EVT_FONTSELECT) = NewEvent()
#----------------------------------------------------------------------------
class FontSelect(GenButton):
def __init__(self, parent, size=(75, 21), value=None):
GenButton.__init__(self, parent, wx.ID_ANY, label="Select...",
size=size)
self.SetBezelWidth(1)
self.parent = parent
self.SetValue(value)
self.parent.Bind(wx.EVT_BUTTON, self.OnClick, self)
def GetValue(self):
return self.value
def SetValue(self, value):
if value is None:
value = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
self.value = value
def OnClick(self, event):
data = wx.FontData()
data.EnableEffects(False)
font = self.value; font.SetPointSize(10)
data.SetInitialFont(font)
dlg = wx.FontDialog(self, data)
changed = dlg.ShowModal() == wx.ID_OK
if changed:
data = dlg.GetFontData()
self.value = data.GetChosenFont()
self.Refresh()
dlg.Destroy()
if changed:
nevt = FontSelectEvent(id=self.GetId(), obj=self, val=self.value)
wx.PostEvent(self.parent, nevt)
#
##
### eof
|
ktan2020/legacy-automation
|
win/Lib/site-packages/wx-3.0-msw/wx/lib/analogclock/lib_setup/fontselect.py
|
Python
|
mit
| 1,713 | 0.005838 |
import binascii
from cryptography.hazmat.primitives import hmac
from cryptography.hazmat.primitives.hashes import SHA1
from cryptography.hazmat.primitives.constant_time import bytes_eq
import cryptography.hazmat.backends
from django.conf import settings
from django.core.mail import send_mail
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
crypto_backend = cryptography.hazmat.backends.default_backend()
def verify_signature(request, request_body):
hmac_hmac = hmac.HMAC(settings.GITHUB_WEBHOOK_SECRET, SHA1(), crypto_backend)
hmac_hmac.update(request_body)
signature = b'sha1=' + binascii.hexlify(hmac_hmac.finalize())
return bytes_eq(signature, request.META['HTTP_X_HUB_SIGNATURE'])
@csrf_exempt
def receive_hook(request):
verify_signature(request, request.body)
send_mail('Hook from github', request.body, settings.SERVER_EMAIL,
map(lambda t: t[-1], settings.ADMINS))
return HttpResponse(status=200)
|
weargoggles/jwt-oauth-test
|
project/hooks/views.py
|
Python
|
gpl-3.0
| 995 | 0.00201 |
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""The typed dict module"""
from itertools import chain
class tdict(dict):
def __init__(self, kt=None, vt=None, data=None):
"""This is a typed dict implementation that optionally enforces given
types on contained values on assignment."""
self._kt = kt
self._vt = vt
if kt is None and vt is None:
self.check = self._check_noop
elif kt is None:
self.check = self._check_v
elif vt is None:
self.check = self._check_k
else:
self.check = self._check_kv
if data is not None:
self.update(data)
def _check_noop(self, *_):
pass
def _check_k(self, key, _):
if not isinstance(key, self._kt):
raise TypeError(repr(key))
def _check_v(self, _, value):
if not isinstance(value, self._vt):
raise TypeError(repr(value))
def _check_kv(self, key, value):
if not isinstance(key, self._kt):
raise TypeError(repr(key))
if not isinstance(value, self._vt):
raise TypeError(repr(value))
def __setitem__(self, key, value):
self.check(key, value)
super(tdict, self).__setitem__(key, value)
def update(self, E=None, **F):
try:
it = chain(E.items(), F.items())
except AttributeError:
it = chain(E, F)
for k, v in it:
self[k] = v
def setdefault(self, k, d=None):
self._check_k(k, d) if self._kt is None else None
self._check_v(k, d) if self._vt is None else None
super(tdict, self).setdefault(k, d)
@classmethod
def fromkeys(cls, S, v=None):
kt = vt = None
if len(S) > 0:
kt, = set((type(s) for s in S))
if v is not None:
vt = type(v)
retval = tdict(kt, vt)
for s in S:
retval[s] = v
return retval
def repr(self):
return "tdict(kt=%s, vt=%s, data=%s)" % \
(self._kt, self._vt, super(tdict, self).__repr__())
|
arskom/spyne
|
spyne/util/tdict.py
|
Python
|
lgpl-2.1
| 2,861 | 0.00035 |
#!/usr/bin/python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: mwu@google.com (Mingyu Wu)
"""Unittest for baserunner module."""
__author__ = 'mwu@google.com (Mingyu Wu)'
import os
import shutil
import sys
import tempfile
import time
import unittest
from lib import baserunner
from lib import filesystemhandlerextend
from lib import mock_emailmessage
from lib import mock_reporter
from lib import mock_scanscripts
from lib import pyreringconfig
from lib import pyreringutil
global_settings = pyreringconfig.GlobalPyreRingConfig.settings
class BaseRunnerTest(unittest.TestCase):
"""Unit test cases for BaseRunner class."""
def setUp(self):
# I should config global_settings here instead of read it from file system.
self.tempdir = tempfile.mkdtemp()
root_dir = os.path.abspath(os.path.join(os.path.split(sys.argv[0])[0],
'../'))
global_settings.update(
{'report_dir': os.path.join(self.tempdir, 'report'),
'email_recipients': os.getenv('LOGNAME'),
'host_name': 'test.host',
'log_file': 'pyrering.log',
'file_errors': False,
'project_name': 'pyrering_unittest',
'root_dir': root_dir,
'sendmail': False,
'runner': 'baserunner',
'source_dir': os.path.join(root_dir, 'test'),
'tester': os.getenv('LOGNAME'),
'FATAL_STRING': 'Fatal:',
'header_file': 'header_info.txt',
'time': time.strftime('%Y%m%d%H%M'),
'skip_setup': False,
})
# get a default config and mocks
self.one_config = pyreringutil.PRConfigParser().Default()
self.scanner = mock_scanscripts.MockScanScripts()
self.emailmessage = mock_emailmessage.MockEmailMessage()
self.reporter = mock_reporter.MockTxtReporter()
self.runner = baserunner.BaseRunner(
name='test',
scanner=self.scanner,
email_message=self.emailmessage,
filesystem=filesystemhandlerextend.FileSystemHandlerExtend(),
reporter=self.reporter)
self.runner.Prepare()
if not os.path.isdir(global_settings['report_dir']):
os.makedirs(global_settings['report_dir'])
# I don't want the unit test to mess with the original log file.
global_settings['log_file'] += '.unittest'
def tearDown(self):
self.runner.CleanUp()
self.runner = ''
pyreringconfig.Reset()
self.scanner.CleanConfig()
shutil.rmtree(self.tempdir)
def testFindHeaderInfoFile(self):
global_settings['header_file'] = os.path.join(self.tempdir, 'header.txt')
fh = open(global_settings['header_file'], 'w')
fh.write('test info')
fh.close()
self.one_config['TEST_SCRIPT'] = 'echo 1'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testFindHeaderInfoFile'], False)
self.assertEqual(self.reporter.header, 'test info')
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
# Positive Test Cases:
def testOneCommand(self):
"""A simple sleep command takes some time to finish."""
# prepare the test script here
self.one_config['TEST_SCRIPT'] = 'sleep 3'
# set the mock scanscript to return this thing.
self.scanner.SetConfig([self.one_config])
# now run the test and return should be expected.
result = self.runner.Run(['testOneCommand'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
def testEchoCommand(self):
"""A simple command has output on stdout."""
self.one_config['TEST_SCRIPT'] = 'echo testEchoCommand'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testEchoCommand'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
#TODO(mwu): need to check the log file has this hello line
def testEchoToSTDERRCommand(self):
"""A simple command has output redirect to stderr."""
self.one_config['TEST_SCRIPT'] = 'echo testEchoToSTDERRCommand >&2'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testEchoSTDERRCommand'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
#TODO(mwu): need to check the log file has this hello line
def testRunScript(self):
"""A real script to run."""
self.one_config['TEST_SCRIPT'] = os.path.join(global_settings['root_dir'],
'test/test1_echo.sh')
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testRunScript'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
#TODO(mwu): need to check the log file has the echo output
def testRunScripts(self):
"""2 scripts to be run."""
self.one_config['TEST_SCRIPT'] = 'echo testRunScripts1'
config2 = pyreringutil.PRConfigParser().Default()
config2['TEST_SCRIPT'] = 'echo testRunScripts2'
self.scanner.SetConfig([self.one_config, config2])
result = self.runner.Run(['testRunScripts'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 2)
# TODO(mwu): verify both scripts run fine
def testEmailSend(self):
"""Test Email should be send."""
self.one_config['TEST_SCRIPT'] = 'echo send_email_test;exit 1'
self.scanner.SetConfig([self.one_config])
try:
self.runner.Run(['testEmailSend'], True)
except self.emailmessage.EmailCalledError:
self.assertTrue(True)
else:
self.fail(msg='Send email was not called')
def testEmailNotSendIfTestPass(self):
"""Test email should not go if all tests pass."""
self.one_config['TEST_SCRIPT'] = 'echo send_email_test;exit 0'
self.scanner.SetConfig([self.one_config])
try:
self.runner.Run(['testEmailSend'], True)
except self.emailmessage.EmailCalledError:
self.fail()
# Negative Test Cases
def testTimeoutCommand(self):
"""A command times out."""
self.one_config['TEST_SCRIPT'] = 'echo timeouttest; sleep 8'
self.one_config['TIMEOUT'] = 2
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testTimeoutCommand'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.timeout, 1)
def testNonExistCommand(self):
"""Test a wrong system command."""
self.one_config['TEST_SCRIPT'] = 'nonexist_command'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testNonExistCommand'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.failed, 1)
def testNonExistScript(self):
"""Test a nonexist script."""
self.one_config['TEST_SCRIPT'] = '/tmp/nonexist_script.sh'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testNonExistScript'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.failed, 1)
def testPermissionDenied(self):
"""Test something without permission."""
self.one_config['TEST_SCRIPT'] = 'touch /pyrering.txt'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testPermissionDenied'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.failed, 1)
def testCatchWarningMessage(self):
"""Test a command has warning output."""
self.one_config['TEST_SCRIPT'] = 'echo warn message'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testCatchWarningMessage'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
def testCatchFatalMessage(self):
"""Test a command has fatal error message even exit code still 0."""
self.one_config['TEST_SCRIPT'] = 'echo Fatal:;echo anotherline'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testCatchFatalMessage'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.failed, 1)
def testOutputLargeMessage(self):
"""Test a test can have large screen output.
As default the stdout only has a 4k buffer limit, so the code should clean
up the buffer while running the test, otherwise the writing to buffer will
be blocked when the buffer is full.
"""
self.one_config['TEST_SCRIPT'] = os.path.join(global_settings['root_dir'],
'test/outputlargetxt.py')
self.one_config['TIMEOUT'] = 4
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testLargeOutput'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
def testExitWithError(self):
"""Test a test have an error exit, which is not a failure."""
self.one_config['TEST_SCRIPT'] = 'exit 255'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testExitWithError'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.failed, 0)
self.assertEqual(self.runner.error, 1)
def testSetupTestPassed(self):
"""Test a setup test case passes."""
self.one_config['TEST_SCRIPT'] = 'exit 0'
self.scanner.SetConfig([self.one_config])
config2 = pyreringutil.PRConfigParser().Default()
config2['TEST_SCRIPT'] = 'exit 0'
self.scanner.SetConfig([config2], 'setup')
result = self.runner.Run(['testSetupTestFailed'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.failed, 0)
def testSetupTestFailed(self):
"""Test a setup test case failed, the test should exit at once."""
self.one_config['TEST_SCRIPT'] = 'exit 0'
self.scanner.SetConfig([self.one_config])
config2 = pyreringutil.PRConfigParser().Default()
config2['TEST_SCRIPT'] = 'exit 1'
self.scanner.SetConfig([config2], 'setup')
result = self.runner.Run(['testSetupTestFailed'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.failed, 1)
def testTearDownFailed(self):
"""Test a teardown test case failed, the test still reports."""
self.one_config['TEST_SCRIPT'] = 'exit 0'
self.scanner.SetConfig([self.one_config])
config2 = pyreringutil.PRConfigParser().Default()
config2['TEST_SCRIPT'] = 'exit 1'
self.scanner.SetConfig([config2], 'teardown')
result = self.runner.Run(['testTearDownTestFailed'], False)
self.assertEqual(result, 4)
self.assertEqual(self.runner.failed, 4)
if __name__ == '__main__':
unittest.main()
|
kdlucas/pyrering
|
lib/baserunner_test.py
|
Python
|
apache-2.0
| 10,873 | 0.003219 |
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines
"""Weight updating functions."""
import logging
import math
import pickle
import warnings
import os
import numpy
from ..base import py_str
from ..ndarray import (NDArray, zeros, clip, sqrt, cast, maximum, abs as NDabs, array, multiply)
from ..ndarray import (sgd_update, sgd_mom_update, adam_update, rmsprop_update, rmspropalex_update,
mp_sgd_update, mp_sgd_mom_update, square, ftrl_update, ftml_update,
signsgd_update, signum_update,
multi_sgd_update, multi_sgd_mom_update, multi_mp_sgd_update,
multi_mp_sgd_mom_update)
from ..ndarray import sparse
from ..random import normal
__all__ = [
'AdaDelta', 'AdaGrad', 'Adam', 'Adamax', 'DCASGD', 'FTML', 'Ftrl', 'LBSGD',
'NAG', 'NDabs', 'Nadam', 'Optimizer', 'RMSProp', 'SGD', 'SGLD', 'Signum',
'Test', 'Updater', 'ccSGD', 'create', 'get_updater', 'register'
]
def _flatten_list(nested_list):
return [item for sublist in nested_list for item in sublist]
class Optimizer(object):
"""The base class inherited by all optimizers.
Parameters
----------
rescale_grad : float, optional, default 1.0
Multiply the gradient with `rescale_grad` before updating. Often
choose to be ``1.0/batch_size``.
param_idx2name : dict from int to string, optional, default None
A dictionary that maps int index to string name.
clip_gradient : float, optional, default None
Clip the gradient by projecting onto the box ``[-clip_gradient, clip_gradient]``.
learning_rate : float, optional, default 0.01
The initial learning rate.
lr_scheduler : LRScheduler, optional, default None
The learning rate scheduler.
wd : float, optional, default 0.0
The weight decay (or L2 regularization) coefficient. Modifies objective
by adding a penalty for having large weights.
sym: Symbol, optional, default None
The Symbol this optimizer is applying to.
begin_num_update : int, optional, default 0
The initial number of updates.
multi_precision : bool, optional, default False
Flag to control the internal precision of the optimizer.::
False: results in using the same precision as the weights (default),
True: makes internal 32-bit copy of the weights and applies gradients
in 32-bit precision even if actual weights used in the model have lower precision.
Turning this on can improve convergence and accuracy when training with float16.
param_dict : dict of int -> gluon.Parameter, default None
Dictionary of parameter index to gluon.Parameter, used to lookup parameter attributes
such as lr_mult, wd_mult, etc. param_dict shall not be deep copied.
Properties
----------
learning_rate : float
The current learning rate of the optimizer. Given an Optimizer object
optimizer, its learning rate can be accessed as optimizer.learning_rate.
"""
def __init__(self, rescale_grad=1., param_idx2name=None, wd=0.,
clip_gradient=None, learning_rate=0.01,
lr_scheduler=None, sym=None, begin_num_update=0,
multi_precision=False, param_dict=None):
self.rescale_grad = rescale_grad
self.lr = learning_rate
self.lr_scheduler = lr_scheduler
if lr_scheduler is not None:
self.lr_scheduler.base_lr = learning_rate
self.wd = wd
self.lr_mult = {}
self.wd_mult = {}
self.begin_num_update = begin_num_update
self.num_update = begin_num_update
self._index_update_count = {}
self.clip_gradient = clip_gradient
self.multi_precision = multi_precision
self.aggregate_num = 0
if param_idx2name is None:
param_idx2name = {}
assert isinstance(param_idx2name, dict), \
'param_idx2name should be a dict of param indexes to names.'
self.idx2name = param_idx2name.copy()
self.sym_info = (sym.attr_dict(), sym.list_arguments()) if sym is not None else ()
self.param_dict = param_dict if param_dict else {}
self.set_lr_mult({})
self.set_wd_mult({})
opt_registry = {}
@staticmethod
def register(klass):
"""Registers a new optimizer.
Once an optimizer is registered, we can create an instance of this
optimizer with `create_optimizer` later.
Examples
--------
>>> @mx.optimizer.Optimizer.register
... class MyOptimizer(mx.optimizer.Optimizer):
... pass
>>> optim = mx.optimizer.Optimizer.create_optimizer('MyOptimizer')
>>> print(type(optim))
<class '__main__.MyOptimizer'>
"""
assert(isinstance(klass, type))
name = klass.__name__.lower()
if name in Optimizer.opt_registry:
warnings.warn('WARNING: New optimizer %s.%s is overriding '
'existing optimizer %s.%s' %
(klass.__module__, klass.__name__,
Optimizer.opt_registry[name].__module__,
Optimizer.opt_registry[name].__name__))
Optimizer.opt_registry[name] = klass
return klass
@staticmethod
def create_optimizer(name, **kwargs):
"""Instantiates an optimizer with a given name and kwargs.
.. note:: We can use the alias `create` for ``Optimizer.create_optimizer``.
Parameters
----------
name: str
Name of the optimizer. Should be the name
of a subclass of Optimizer. Case insensitive.
kwargs: dict
Parameters for the optimizer.
Returns
-------
Optimizer
An instantiated optimizer.
Examples
--------
>>> sgd = mx.optimizer.Optimizer.create_optimizer('sgd')
>>> type(sgd)
<class 'mxnet.optimizer.SGD'>
>>> adam = mx.optimizer.create('adam', learning_rate=.1)
>>> type(adam)
<class 'mxnet.optimizer.Adam'>
"""
if name.lower() in Optimizer.opt_registry:
return Optimizer.opt_registry[name.lower()](**kwargs)
else:
raise ValueError('Cannot find optimizer %s' % name)
@property
def learning_rate(self):
if self.lr_scheduler is not None:
return self.lr_scheduler(self.num_update)
else:
return self.lr
def create_state(self, index, weight):
"""Creates auxiliary state for a given weight.
Some optimizers require additional states, e.g. as momentum, in addition
to gradients in order to update weights. This function creates state
for a given weight which will be used in `update`. This function is
called only once for each weight.
Parameters
----------
index : int
An unique index to identify the weight.
weight : NDArray
The weight.
Returns
-------
state : any obj
The state associated with the weight.
"""
def create_state_multi_precision(self, index, weight):
"""Creates auxiliary state for a given weight, including FP32 high
precision copy if original weight is FP16.
This method is provided to perform automatic mixed precision training
for optimizers that do not support it themselves.
Parameters
----------
index : int
An unique index to identify the weight.
weight : NDArray
The weight.
Returns
-------
state : any obj
The state associated with the weight.
"""
weight_master_copy = None
if self.multi_precision and weight.dtype == numpy.float16:
weight_master_copy = weight.astype(numpy.float32)
return (weight_master_copy,) + (self.create_state(index, weight_master_copy),)
if weight.dtype == numpy.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"optimizer")
return self.create_state(index, weight)
def update(self, index, weight, grad, state):
"""Updates the given parameter using the corresponding gradient and state.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
"""
raise NotImplementedError()
def update_multi_precision(self, index, weight, grad, state):
"""Updates the given parameter using the corresponding gradient and state.
Mixed precision version.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
"""
if self.multi_precision and weight.dtype == numpy.float16:
# Wrapper for mixed precision
weight_master_copy = state[0]
original_state = state[1]
grad32 = grad.astype(numpy.float32)
self.update(index, weight_master_copy, grad32, original_state)
cast(weight_master_copy, dtype=weight.dtype, out=weight)
else:
self.update(index, weight, grad, state)
def set_learning_rate(self, lr):
"""Sets a new learning rate of the optimizer.
Parameters
----------
lr : float
The new learning rate of the optimizer.
"""
if self.lr_scheduler is not None:
raise UserWarning("LRScheduler of the optimizer has already been "
"defined. Note that set_learning_rate can mutate "
"the value of the learning rate of the optimizer "
"only when the LRScheduler of the optimizer is "
"undefined.")
else:
self.lr = lr
def set_lr_scale(self, args_lrscale): # pylint: disable=unused-argument
"""[DEPRECATED] Sets lr scale. Use set_lr_mult instead."""
raise DeprecationWarning
def set_lr_mult(self, args_lr_mult):
"""Sets an individual learning rate multiplier for each parameter.
If you specify a learning rate multiplier for a parameter, then
the learning rate for the parameter will be set as the product of
the global learning rate `self.lr` and its multiplier.
.. note:: The default learning rate multiplier of a `Variable`
can be set with `lr_mult` argument in the constructor.
Parameters
----------
args_lr_mult : dict of str/int to float
For each of its key-value entries, the learning rate multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
"""
self.lr_mult = {}
if self.sym_info:
attr, arg_names = self.sym_info
for name in arg_names:
if name in attr and '__lr_mult__' in attr[name]:
self.lr_mult[name] = float(attr[name]['__lr_mult__'])
self.lr_mult.update(args_lr_mult)
def set_wd_mult(self, args_wd_mult):
"""Sets an individual weight decay multiplier for each parameter.
By default, if `param_idx2name` was provided in the
constructor, the weight decay multipler is set as 0 for all
parameters whose name don't end with ``_weight`` or
``_gamma``.
.. note:: The default weight decay multiplier for a `Variable`
can be set with its `wd_mult` argument in the constructor.
Parameters
----------
args_wd_mult : dict of string/int to float
For each of its key-value entries, the weight decay multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
"""
self.wd_mult = {}
for n in self.idx2name.values():
if not (n.endswith('_weight') or n.endswith('_gamma')):
self.wd_mult[n] = 0.0
if self.sym_info:
attr, arg_names = self.sym_info
for name in arg_names:
if name in attr and '__wd_mult__' in attr[name]:
self.wd_mult[name] = float(attr[name]['__wd_mult__'])
self.wd_mult.update(args_wd_mult)
def _update_count(self, index):
"""Updates num_update.
Parameters
----------
index : int or list of int
The index to be updated.
"""
if not isinstance(index, (list, tuple)):
index = [index]
for idx in index:
if idx not in self._index_update_count:
self._index_update_count[idx] = self.begin_num_update
self._index_update_count[idx] += 1
self.num_update = max(self._index_update_count[idx], self.num_update)
def _get_lrs(self, indices):
"""Gets the learning rates given the indices of the weights.
Parameters
----------
indices : list of int
Indices corresponding to weights.
Returns
-------
lrs : list of float
Learning rates for those indices.
"""
if self.lr_scheduler is not None:
lr = self.lr_scheduler(self.num_update)
else:
lr = self.lr
lrs = [lr for _ in indices]
for i, index in enumerate(indices):
if index in self.param_dict:
lrs[i] *= self.param_dict[index].lr_mult
elif index in self.lr_mult:
lrs[i] *= self.lr_mult[index]
elif index in self.idx2name:
lrs[i] *= self.lr_mult.get(self.idx2name[index], 1.0)
return lrs
def _get_lr(self, index):
"""Gets the learning rate given the index of the weight.
Parameters
----------
index : int
The index corresponding to the weight.
Returns
-------
lr : float
Learning rate for this index.
"""
return self._get_lrs([index])[0]
def _get_wds(self, indices):
"""Gets weight decays for indices.
Returns 0 for non-weights if the name of weights are provided for `__init__`.
Parameters
----------
indices : list of int
Indices of weights.
Returns
-------
wds : list of float
Weight decays for those indices.
"""
wds = [self.wd for _ in indices]
for i, index in enumerate(indices):
if index in self.param_dict:
wds[i] *= self.param_dict[index].wd_mult
elif index in self.wd_mult:
wds[i] *= self.wd_mult[index]
elif index in self.idx2name:
wds[i] *= self.wd_mult.get(self.idx2name[index], 1.0)
return wds
def _get_wd(self, index):
"""Gets weight decay for index.
Returns 0 for non-weights if the name of weights are provided for `__init__`.
Parameters
----------
index : int
The index of weight.
Returns
-------
wd : float
Weight decay for this index.
"""
return self._get_wds([index])[0]
def __getstate__(self):
ret = self.__dict__.copy()
# do not include param_dict in the state
del ret['param_dict']
return ret
def __setstate__(self, state):
self.__dict__ = state
# param_dict needs to be explicitly set by the trainer
self.param_dict = {}
# convenience wrapper for Optimizer.Register
register = Optimizer.register # pylint: disable=invalid-name
# pylint: disable=line-too-long
@register
class SGD(Optimizer):
"""The SGD optimizer with momentum and weight decay.
If the storage types of grad is ``row_sparse`` and ``lazy_update`` is True, \
**lazy updates** are applied by::
for row in grad.indices:
rescaled_grad[row] = lr * (rescale_grad * clip(grad[row], clip_gradient) + wd * weight[row])
state[row] = momentum[row] * state[row] + rescaled_grad[row]
weight[row] = weight[row] - state[row]
The sparse update only updates the momentum for the weights whose row_sparse
gradient indices appear in the current batch, rather than updating it for all
indices. Compared with the original update, it can provide large
improvements in model training throughput for some applications. However, it
provides slightly different semantics than the original update, and
may lead to different empirical results.
In the case when ``update_on_kvstore`` is set to False (either globally via
MXNET_UPDATE_ON_KVSTORE=0 environment variable or as a parameter in
:class:`~mxnet.gluon.Trainer`) SGD optimizer can perform aggregated update
of parameters, which may lead to improved performance. The aggregation size
is controlled by MXNET_OPTIMIZER_AGGREGATION_SIZE environment variable and
defaults to 4.
Otherwise, **standard updates** are applied by::
rescaled_grad = lr * (rescale_grad * clip(grad, clip_gradient) + wd * weight)
state = momentum * state + rescaled_grad
weight = weight - state
For details of the update algorithm see
:class:`~mxnet.ndarray.sgd_update` and :class:`~mxnet.ndarray.sgd_mom_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
lazy_update : bool, optional
Default is True. If True, lazy updates are applied \
if the storage types of weight and grad are both ``row_sparse``.
multi_precision: bool, optional
Flag to control the internal precision of the optimizer.::
False: results in using the same precision as the weights (default),
True: makes internal 32-bit copy of the weights and applies gradients
in 32-bit precision even if actual weights used in the model have lower precision.
Turning this on can improve convergence and accuracy when training with float16.
"""
def __init__(self, momentum=0.0, lazy_update=True, **kwargs):
super(SGD, self).__init__(**kwargs)
self.momentum = momentum
self.lazy_update = lazy_update
self.aggregate_num = int(os.getenv('MXNET_OPTIMIZER_AGGREGATION_SIZE', "4"))
def create_state_multi_precision(self, index, weight):
weight_master_copy = None
if self.multi_precision and weight.dtype == numpy.float16:
weight_master_copy = weight.astype(numpy.float32)
return (self.create_state(index, weight_master_copy), weight_master_copy)
if weight.dtype == numpy.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"SGD optimizer")
return self.create_state(index, weight)
def create_state(self, index, weight):
momentum = None
if self.momentum != 0.0:
stype = weight.stype if self.lazy_update else 'default'
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype, stype=stype)
return momentum
def _update_impl(self, indices, weights, grads, states, multi_precision=False):
aggregate = True
if not isinstance(indices, (tuple, list)):
indices = [indices]
weights = [weights]
grads = [grads]
states = [states]
for weight, grad in zip(weights, grads):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
aggregate = (aggregate and
weight.stype == 'default' and
grad.stype == 'default')
self._update_count(indices)
lrs = self._get_lrs(indices)
wds = self._get_wds(indices)
kwargs = {'rescale_grad': self.rescale_grad}
if self.momentum > 0:
kwargs['momentum'] = self.momentum
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if aggregate:
if not multi_precision:
if self.momentum > 0:
multi_sgd_mom_update(*_flatten_list(zip(weights, grads, states)), out=weights,
num_weights=len(weights), lrs=lrs, wds=wds, **kwargs)
else:
multi_sgd_update(*_flatten_list(zip(weights, grads)), out=weights,
num_weights=len(weights), lrs=lrs, wds=wds, **kwargs)
else:
if self.momentum > 0:
multi_mp_sgd_mom_update(*_flatten_list(zip(weights, grads, *zip(*states))),
out=weights, num_weights=len(weights),
lrs=lrs, wds=wds, **kwargs)
else:
multi_mp_sgd_update(*_flatten_list(zip(weights, grads,
list(zip(*states))[1])),
out=weights, num_weights=len(weights),
lrs=lrs, wds=wds, **kwargs)
else:
for weight, grad, state, lr, wd in zip(weights, grads, states, lrs, wds):
if not multi_precision:
if state is not None:
sgd_mom_update(weight, grad, state, out=weight,
lazy_update=self.lazy_update, lr=lr, wd=wd, **kwargs)
else:
sgd_update(weight, grad, out=weight, lazy_update=self.lazy_update,
lr=lr, wd=wd, **kwargs)
else:
if state[0] is not None:
mp_sgd_mom_update(weight, grad, state[0], state[1], out=weight,
lr=lr, wd=wd, **kwargs)
else:
mp_sgd_update(weight, grad, state[1], out=weight,
lr=lr, wd=wd, **kwargs)
def update(self, index, weight, grad, state):
self._update_impl(index, weight, grad, state, multi_precision=False)
def update_multi_precision(self, index, weight, grad, state):
if not isinstance(index, (tuple, list)):
use_multi_precision = self.multi_precision and weight.dtype == numpy.float16
else:
use_multi_precision = self.multi_precision and weight[0].dtype == numpy.float16
self._update_impl(index, weight, grad, state,
multi_precision=use_multi_precision)
@register
class Signum(Optimizer):
r"""The Signum optimizer that takes the sign of gradient or momentum.
The optimizer updates the weight by::
rescaled_grad = rescale_grad * clip(grad, clip_gradient) + wd * weight
state = momentum * state + (1-momentum)*rescaled_grad
weight = (1 - lr * wd_lh) * weight - lr * sign(state)
References
----------
Jeremy Bernstein, Yu-Xiang Wang, Kamyar Azizzadenesheli & Anima Anandkumar. (2018).
signSGD: Compressed Optimisation for Non-Convex Problems. In ICML'18.
See: https://arxiv.org/abs/1802.04434
For details of the update algorithm see
:class:`~mxnet.ndarray.signsgd_update` and :class:`~mxnet.ndarray.signum_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
wd_lh : float, optional
The amount of decoupled weight decay regularization, see details in the original paper at:\
https://arxiv.org/abs/1711.05101
"""
def __init__(self, learning_rate=0.01, momentum=0.9, wd_lh=0.0, **kwargs):
super(Signum, self).__init__(learning_rate=learning_rate, **kwargs)
self.momentum = momentum
self.wd_lh = wd_lh
def create_state(self, index, weight):
momentum = None
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype, stype=weight.stype)
return momentum
def _update_impl(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
kwargs = {'rescale_grad': self.rescale_grad}
if self.momentum > 0:
kwargs['momentum'] = self.momentum
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if self.wd_lh:
kwargs['wd_lh'] = self.wd_lh
if state is not None:
signum_update(weight, grad, state, out=weight,
lr=lr, wd=wd, **kwargs)
else:
signsgd_update(weight, grad, out=weight,
lr=lr, wd=wd, **kwargs)
def update(self, index, weight, grad, state):
self._update_impl(index, weight, grad, state)
@register
class FTML(Optimizer):
"""The FTML optimizer.
This class implements the optimizer described in
*FTML - Follow the Moving Leader in Deep Learning*,
available at http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf.
Denote time step by t. The optimizer updates the weight by::
rescaled_grad = clip(grad * rescale_grad + wd * weight, clip_gradient)
v = beta2 * v + (1 - beta2) * square(rescaled_grad)
d_t = (1 - power(beta1, t)) / lr * square_root(v / (1 - power(beta2, t))) + epsilon)
z = beta1 * z + (1 - beta1) * rescaled_grad - (d_t - beta1 * d_(t-1)) * weight
weight = - z / d_t
For details of the update algorithm, see :class:`~mxnet.ndarray.ftml_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
beta1 : float, optional
0 < beta1 < 1. Generally close to 0.5.
beta2 : float, optional
0 < beta2 < 1. Generally close to 1.
epsilon : float, optional
Small value to avoid division by 0.
"""
def __init__(self, beta1=0.6, beta2=0.999, epsilon=1e-8, **kwargs):
super(FTML, self).__init__(**kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # d_0
zeros(weight.shape, weight.context, dtype=weight.dtype), # v_0
zeros(weight.shape, weight.context, dtype=weight.dtype)) # z_0
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
kwargs = {'beta1': self.beta1, 'beta2': self.beta2, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad, 't': t}
if self.clip_gradient:
kwargs['clip_grad'] = self.clip_gradient
prev_d, prev_v, prev_z = state
ftml_update(weight, grad, prev_d, prev_v, prev_z, out=weight,
lr=lr, wd=wd, **kwargs)
@register
class LBSGD(Optimizer):
"""The Large Batch SGD optimizer with momentum and weight decay.
The optimizer updates the weight by::
state = momentum * state + lr * rescale_grad * clip(grad, clip_gradient) + wd * weight
weight = weight - state
For details of the update algorithm see :class:`~mxnet.ndarray.sgd_update`
and :class:`~mxnet.ndarray.sgd_mom_update`.
In addition to the SGD updates the LBSGD optimizer uses the LARS, Layer-wise
Adaptive Rate Scaling, algorithm to have a separate learning rate for each
layer of the network, which leads to better stability over large batch sizes.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
multi_precision: bool, optional
Flag to control the internal precision of the optimizer.::
False: results in using the same precision as the weights (default),
True: makes internal 32-bit copy of the weights and applies gradients
in 32-bit precision even if actual weights used in the model have lower precision.
Turning this on can improve convergence and accuracy when training with float16.
warmup_strategy: string ('linear', 'power2', 'sqrt'. , 'lars' default : 'linear')
warmup_epochs: unsigned, default: 5
batch_scale: unsigned, default: 1 (same as batch size*numworkers)
updates_per_epoch: updates_per_epoch (default: 32, Default might not reflect true number batches per epoch. Used for warmup.)
begin_epoch: unsigned, default 0, starting epoch.
"""
def __init__(self, momentum=0.0, multi_precision=False, warmup_strategy='linear',
warmup_epochs=5, batch_scale=1, updates_per_epoch=32, begin_epoch=0, num_epochs=60,
**kwargs):
super(LBSGD, self).__init__(**kwargs)
logging.info('Running Large-Batch SGD Algorithm')
logging.info('(Batch_scale=%f, warmup_epochs=%d, warmup_strategy=%s, updates_per_epoch=%d)',
batch_scale, warmup_epochs, warmup_strategy, updates_per_epoch)
self.momentum = momentum
self.multi_precision = multi_precision
# new user parameters for large batch
self.warmup_strategy = warmup_strategy
self.warmup_epochs = warmup_epochs
self.batch_scale = batch_scale
self.updates_per_epoch = updates_per_epoch
self.init_updates = begin_epoch * updates_per_epoch
self.num_epochs = num_epochs
# addl internal usage parameters and storage
self.lbmult = 1
self.cumgrads = {}
# for adaptive lr
self.adaptive = False
self.admult = 1 # adaptation constant
def create_state(self, index, weight):
momentum = None
weight_master_copy = None
if self.multi_precision and weight.dtype == numpy.float16:
weight_master_copy = array(weight, ctx=weight.context, dtype=numpy.float32)
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=numpy.float32,
stype=weight.stype)
return (momentum, weight_master_copy)
if weight.dtype == numpy.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"SGD optimizer")
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype, stype=weight.stype)
return momentum
def _get_lbmult(self, nup):
"""Returns lr scaling factor for large batch according to warmup schedule
(to be implemented)
"""
nwup = self.warmup_epochs * self.updates_per_epoch
strategy = self.warmup_strategy
maxmult = float(self.batch_scale)
if nup >= nwup:
mult = maxmult
elif nwup <= 1:
mult = 1.0
else:
if (strategy == 'linear'):
mult = 1.0 + (maxmult - 1) * nup / nwup
elif (strategy == 'power2'):
mult = 1.0 + (maxmult-1) * (nup*nup)/(nwup*nwup)
elif (strategy == 'sqrt'):
mult = 1.0 + (maxmult - 1) * math.sqrt(float(nup) / nwup)
else:
mult = 1.0
return mult
def _get_lars(self, weight, g, wd):
"""Returns a scaling factor for the learning rate for this layer
default is 1
"""
weight2 = self._l2norm(weight)
grad2 = self._l2norm(g)
lars = math.sqrt(weight2 / (grad2 + wd * weight2 + 1e-18))
if lars < 0.01:
lars = 0.01
elif lars > 100:
lars = 100
return lars
def _l2norm(self, v):
"inner product implementation"
norm = multiply(v, v).asnumpy().sum()
return norm
def _reset_cum_gradient(self, index):
"called every macro-batch to reset cumulated gradients to 0 for a given index"
self.cumgrads[index]['cum_grad'] = 0
def _get_cum_gradient(self, index):
"get the cumulated gradient for index"
if index in self.cumgrads:
return self.cumgrads[index]
else:
return {}
def _put_cum_gradient(self, index, cgrad):
"store cumulated gradient for index"
self.cumgrads[index] = cgrad
def _cumulate_gradient(self, grad, index):
"Cumulate gradients for large-batch emulation. Cumulated by index (layer)"
cgrad = self._get_cum_gradient(index)
if cgrad:
num_cums = cgrad['num_cums']
if num_cums > 0:
cum_grad = cgrad['cum_grad'] + grad
num_cums += 1
else:
cum_grad = grad
num_cums = self.init_updates + 1
else:
cum_grad = grad
num_cums = self.init_updates + 1
cgrad = {'cum_grad': cum_grad, 'num_cums': num_cums}
self._put_cum_gradient(index, cgrad)
return cgrad
def update(self, index, weight, grad, state):
assert (isinstance(weight, NDArray))
assert (isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
# new stuff for large batch
cgrad = self._cumulate_gradient(grad, index)
if (cgrad['num_cums'] % self.batch_scale) == 0:
grad = cgrad['cum_grad'] / self.batch_scale
if self.warmup_strategy == 'lars':
lbmult = self._get_lars(weight, grad, wd)
else:
lbmult = self._get_lbmult(cgrad['num_cums'])
lr = lr * lbmult
# do the regular sgd update flow
kwargs = {'rescale_grad': self.rescale_grad}
if self.momentum > 0:
kwargs['momentum'] = self.momentum
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
use_multi_precision = isinstance(state, (list, tuple))
if not use_multi_precision:
if state is not None:
sgd_mom_update(weight, grad, state, out=weight, lr=lr, wd=wd, **kwargs)
else:
sgd_update(weight, grad, out=weight, lr=lr, wd=wd, **kwargs)
else:
if state[0] is not None:
mp_sgd_mom_update(weight, grad, state[0], state[1], out=weight, lr=lr, wd=wd,
**kwargs)
else:
mp_sgd_update(weight, grad, state[1], out=weight, lr=lr, wd=wd, **kwargs)
# reset update count and cumulated gradient per large batch
self._reset_cum_gradient(index)
else:
lr = 0.0
kwargs = {}
sgd_update(weight, grad, out=weight, lr=lr, wd=wd, **kwargs)
# pylint: enable=line-too-long
@register
class DCASGD(Optimizer):
"""The DCASGD optimizer.
This class implements the optimizer described in *Asynchronous Stochastic Gradient Descent
with Delay Compensation for Distributed Deep Learning*,
available at https://arxiv.org/abs/1609.08326.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
lamda : float, optional
Scale DC value.
"""
def __init__(self, momentum=0.0, lamda=0.04, **kwargs):
super(DCASGD, self).__init__(**kwargs)
self.momentum = momentum
self.weight_previous = {}
self.lamda = lamda
def create_state(self, index, weight):
if self.momentum == 0.0:
return (None,
weight.copy()) # previous weight
else:
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # momentum
weight.copy()) # previous weight
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
mom, previous_weight = state
if mom:
mom[:] *= self.momentum
mom[:] += -lr * (grad + wd * weight + self.lamda \
* grad * grad * (weight - previous_weight))
else:
assert(self.momentum == 0.0)
mom = -lr * (grad + wd * weight + self.lamda \
* grad * grad * (weight - previous_weight))
previous_weight[:] = weight
weight[:] += mom
@register
class NAG(Optimizer):
"""Nesterov accelerated SGD.
This optimizer updates each weight by::
state = momentum * state + grad + wd * weight
weight = weight - (lr * (grad + momentum * state))
Parameters
----------
momentum : float, optional
The momentum value.
multi_precision: bool, optional
Flag to control the internal precision of the optimizer.::
False: results in using the same precision as the weights (default),
True: makes internal 32-bit copy of the weights and applies gradients
in 32-bit precision even if actual weights used in the model have lower precision.
Turning this on can improve convergence and accuracy when training with float16.
"""
def __init__(self, momentum=0.0, **kwargs):
super(NAG, self).__init__(**kwargs)
self.momentum = momentum
def create_state(self, index, weight):
momentum = None
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype)
return momentum
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
if state is not None:
mom = state
mom[:] *= self.momentum
mom[:] += grad
mom[:] += wd * weight
grad[:] += self.momentum * mom
weight[:] -= lr * grad
else:
assert self.momentum == 0.0
weight[:] += -lr * (grad + wd * weight)
@register
class SGLD(Optimizer):
"""Stochastic Gradient Riemannian Langevin Dynamics.
This class implements the optimizer described in the paper *Stochastic Gradient
Riemannian Langevin Dynamics on the Probability Simplex*, available at
https://papers.nips.cc/paper/4883-stochastic-gradient-riemannian-langevin-dynamics-on-the-probability-simplex.pdf.
"""
def __init__(self, **kwargs):
super(SGLD, self).__init__(**kwargs)
def create_state(self, index, weight):
return None
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
weight[:] += - lr/2 * (grad + wd * weight) + normal(0, math.sqrt(lr), shape=weight.shape,
dtype=weight.dtype, ctx=weight.context)
@register # pylint: disable=invalid-name
class ccSGD(SGD):
"""[DEPRECATED] Same as `SGD`. Left here for backward compatibility."""
def __init__(self, *args, **kwargs):
super(ccSGD, self).__init__(*args, **kwargs)
@register
class Adam(Optimizer):
"""The Adam optimizer.
This class implements the optimizer described in *Adam: A Method for
Stochastic Optimization*, available at http://arxiv.org/abs/1412.6980.
If the storage types of grad is ``row_sparse``, and ``lazy_update`` is True, \
**lazy updates** at step t are applied by::
for row in grad.indices:
rescaled_grad[row] = clip(grad[row] * rescale_grad + wd * weight[row], clip_gradient)
m[row] = beta1 * m[row] + (1 - beta1) * rescaled_grad[row]
v[row] = beta2 * v[row] + (1 - beta2) * (rescaled_grad[row]**2)
lr = learning_rate * sqrt(1 - beta1**t) / (1 - beta2**t)
w[row] = w[row] - lr * m[row] / (sqrt(v[row]) + epsilon)
The lazy update only updates the mean and var for the weights whose row_sparse
gradient indices appear in the current batch, rather than updating it for all indices.
Compared with the original update, it can provide large improvements in model training
throughput for some applications. However, it provides slightly different semantics than
the original update, and may lead to different empirical results.
Otherwise, **standard updates** at step t are applied by::
rescaled_grad = clip(grad * rescale_grad + wd * weight, clip_gradient)
m = beta1 * m + (1 - beta1) * rescaled_grad
v = beta2 * v + (1 - beta2) * (rescaled_grad**2)
lr = learning_rate * sqrt(1 - beta1**t) / (1 - beta2**t)
w = w - lr * m / (sqrt(v) + epsilon)
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
For details of the update algorithm, see :class:`~mxnet.ndarray.adam_update`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
epsilon : float, optional
Small value to avoid division by 0.
lazy_update : bool, optional
Default is True. If True, lazy updates are applied \
if the storage types of weight and grad are both ``row_sparse``.
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
lazy_update=True, **kwargs):
super(Adam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.lazy_update = lazy_update
def create_state(self, index, weight):
stype = weight.stype if self.lazy_update else 'default'
return (zeros(weight.shape, weight.context, dtype=weight.dtype,
stype=stype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype,
stype=stype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
coef1 = 1. - self.beta1**t
coef2 = 1. - self.beta2**t
lr *= math.sqrt(coef2)/coef1
kwargs = {'beta1': self.beta1, 'beta2': self.beta2, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad}
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
mean, var = state
adam_update(weight, grad, mean, var, out=weight,
lazy_update=self.lazy_update, lr=lr, wd=wd, **kwargs)
@register
class AdaGrad(Optimizer):
"""AdaGrad optimizer.
This class implements the AdaGrad optimizer described in *Adaptive Subgradient
Methods for Online Learning and Stochastic Optimization*, and available at
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.
This optimizer updates each weight by::
grad = clip(grad * rescale_grad, clip_gradient)
history += square(grad)
div = grad / sqrt(history + float_stable_eps)
weight += (div + weight * wd) * -lr
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
See Also
----------
:meth:`mxnet.ndarray.sparse.adagrad_update`.
Parameters
----------
eps: float, optional
Initial value of the history accumulator. Avoids division by 0.
"""
def __init__(self, eps=1e-7, **kwargs):
super(AdaGrad, self).__init__(**kwargs)
self.float_stable_eps = eps
def create_state(self, index, weight):
return zeros(weight.shape, weight.context, stype=weight.stype) # history
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
is_sparse = grad.stype == 'row_sparse'
history = state
if is_sparse:
kwargs = {'epsilon': self.float_stable_eps,
'rescale_grad': self.rescale_grad}
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
sparse.adagrad_update(weight, grad, history, out=weight, lr=lr, wd=wd, **kwargs)
else:
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
history[:] += square(grad)
div = grad / sqrt(history + self.float_stable_eps)
weight[:] += (div + weight * wd) * -lr
@register
class RMSProp(Optimizer):
"""The RMSProp optimizer.
Two versions of RMSProp are implemented:
If ``centered=False``, we follow
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf by
Tieleman & Hinton, 2012.
For details of the update algorithm see :class:`~mxnet.ndarray.rmsprop_update`.
If ``centered=True``, we follow http://arxiv.org/pdf/1308.0850v5.pdf (38)-(45)
by Alex Graves, 2013.
For details of the update algorithm see :class:`~mxnet.ndarray.rmspropalex_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
gamma1: float, optional
A decay factor of moving average over past squared gradient.
gamma2: float, optional
A "momentum" factor. Only used if `centered`=``True``.
epsilon : float, optional
Small value to avoid division by 0.
centered : bool, optional
Flag to control which version of RMSProp to use.::
True: will use Graves's version of `RMSProp`,
False: will use Tieleman & Hinton's version of `RMSProp`.
clip_weights : float, optional
Clips weights into range ``[-clip_weights, clip_weights]``.
"""
def __init__(self, learning_rate=0.001, gamma1=0.9, gamma2=0.9,
epsilon=1e-8, centered=False, clip_weights=None, **kwargs):
super(RMSProp, self).__init__(learning_rate=learning_rate, **kwargs)
self.gamma1 = gamma1
self.gamma2 = gamma2
self.centered = centered
self.epsilon = epsilon
self.clip_weights = clip_weights
def create_state(self, index, weight):
if self.centered:
return (
zeros(weight.shape, weight.context, stype=weight.stype), # n
zeros(weight.shape, weight.context, stype=weight.stype), # g
zeros(weight.shape, weight.context, stype=weight.stype)) # delta
else:
return (zeros(weight.shape, weight.context, stype=weight.stype),) # n
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
kwargs = {'gamma1': self.gamma1, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad}
if self.centered:
kwargs['gamma2'] = self.gamma2
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if self.clip_weights:
kwargs['clip_weights'] = self.clip_weights
if not self.centered:
(n, ) = state
rmsprop_update(
weight, grad, n, out=weight, lr=lr, wd=wd, **kwargs)
else:
n, g, delta = state
rmspropalex_update(weight, grad, n, g, delta, out=weight,
lr=lr, wd=wd, **kwargs)
@register
class AdaDelta(Optimizer):
"""The AdaDelta optimizer.
This class implements AdaDelta, an optimizer described in *ADADELTA: An adaptive
learning rate method*, available at https://arxiv.org/abs/1212.5701.
This optimizer updates each weight by::
grad = clip(grad * rescale_grad + wd * weight, clip_gradient)
acc_grad = rho * acc_grad + (1. - rho) * grad * grad
delta = sqrt(acc_delta + epsilon) / sqrt(acc_grad + epsilon) * grad
acc_delta = rho * acc_delta + (1. - rho) * delta * delta
weight -= (delta + wd * weight)
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
rho: float
Decay rate for both squared gradients and delta.
epsilon : float
Small value to avoid division by 0.
"""
def __init__(self, rho=0.90, epsilon=1e-5, **kwargs):
super(AdaDelta, self).__init__(**kwargs)
self.rho = rho
self.epsilon = epsilon
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context), # accumulated g
zeros(weight.shape, weight.context)) # accumulated delta
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
wd = self._get_wd(index)
self._update_count(index)
# preprocess grad
grad *= self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# accumulated g and delta initlization
acc_g, acc_delta = state
# update g, delta
acc_g[:] = self.rho * acc_g + (1. - self.rho) * grad * grad
current_delta = sqrt(acc_delta + self.epsilon) / sqrt(acc_g + self.epsilon) * grad
acc_delta[:] = self.rho * acc_delta + (1. - self.rho) * current_delta * current_delta
# update weight
weight[:] -= current_delta + wd * weight
#pylint: disable=invalid-name
#pylint: disable=line-too-long
@register
class Ftrl(Optimizer):
"""The Ftrl optimizer.
Referenced from *Ad Click Prediction: a View from the Trenches*, available at
http://dl.acm.org/citation.cfm?id=2488200.
eta :
.. math::
\\eta_{t,i} = \\frac{learningrate}{\\beta+\\sqrt{\\sum_{s=1}^tg_{s,i}^2}}
The optimizer updates the weight by::
rescaled_grad = clip(grad * rescale_grad, clip_gradient)
z += rescaled_grad - (sqrt(n + rescaled_grad**2) - sqrt(n)) * weight / learning_rate
n += rescaled_grad**2
w = (sign(z) * lamda1 - z) / ((beta + sqrt(n)) / learning_rate + wd) * (abs(z) > lamda1)
If the storage types of weight, state and grad are all ``row_sparse``, \
**sparse updates** are applied by::
for row in grad.indices:
rescaled_grad[row] = clip(grad[row] * rescale_grad, clip_gradient)
z[row] += rescaled_grad[row] - (sqrt(n[row] + rescaled_grad[row]**2) - sqrt(n[row])) * weight[row] / learning_rate
n[row] += rescaled_grad[row]**2
w[row] = (sign(z[row]) * lamda1 - z[row]) / ((beta + sqrt(n[row])) / learning_rate + wd) * (abs(z[row]) > lamda1)
The sparse update only updates the z and n for the weights whose row_sparse
gradient indices appear in the current batch, rather than updating it for all
indices. Compared with the original update, it can provide large
improvements in model training throughput for some applications. However, it
provides slightly different semantics than the original update, and
may lead to different empirical results.
For details of the update algorithm, see :class:`~mxnet.ndarray.ftrl_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
lamda1 : float, optional
L1 regularization coefficient.
learning_rate : float, optional
The initial learning rate.
beta : float, optional
Per-coordinate learning rate correlation parameter.
"""
def __init__(self, lamda1=0.01, learning_rate=0.1, beta=1, **kwargs):
super(Ftrl, self).__init__(**kwargs)
self.lamda1 = lamda1
self.beta = beta
self.lr = learning_rate
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, stype=weight.stype), # z
zeros(weight.shape, weight.context, stype=weight.stype)) # n
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
wd = self._get_wd(index)
lr = self._get_lr(index)
kwargs = {'lamda1': self.lamda1, 'beta': self.beta, 'rescale_grad': self.rescale_grad}
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
# accumulated g and delta initialization
z, n = state
ftrl_update(weight, grad, z, n, out=weight,
lr=lr, wd=wd, **kwargs)
# pylint: enable=line-too-long
@register
class Adamax(Optimizer):
"""The AdaMax optimizer.
It is a variant of Adam based on the infinity norm
available at http://arxiv.org/abs/1412.6980 Section 7.
The optimizer updates the weight by::
grad = clip(grad * rescale_grad + wd * weight, clip_gradient)
m = beta1 * m_t + (1 - beta1) * grad
u = maximum(beta2 * u, abs(grad))
weight -= lr / (1 - beta1**t) * m / u
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
"""
def __init__(self, learning_rate=0.002, beta1=0.9, beta2=0.999, **kwargs):
super(Adamax, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
lr /= (1. - self.beta1**t)
# preprocess grad
grad = grad * self.rescale_grad + wd * weight
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# update m_t and u_t
m_t, u_t = state
m_t[:] = self.beta1 * m_t + (1. - self.beta1) * grad
u_t[:] = maximum(self.beta2 * u_t, NDabs(grad))
# update weight
weight[:] -= lr * m_t / u_t
@register
class Nadam(Optimizer):
"""The Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum available
at http://cs229.stanford.edu/proj2015/054_report.pdf.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
epsilon : float, optional
Small value to avoid division by 0.
schedule_decay : float, optional
Exponential decay rate for the momentum schedule
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
schedule_decay=0.004, **kwargs):
super(Nadam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.schedule_decay = schedule_decay
self.m_schedule = 1.
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
# preprocess grad
grad = grad * self.rescale_grad + wd * weight
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# warming momentum schedule
momentum_t = self.beta1 * (1. - 0.5 * (pow(0.96, t * self.schedule_decay)))
momentum_t_1 = self.beta1 * (1. - 0.5 * (pow(0.96, (t + 1) * self.schedule_decay)))
self.m_schedule = self.m_schedule * momentum_t
m_schedule_next = self.m_schedule * momentum_t_1
# update m_t and v_t
m_t, v_t = state
m_t[:] = self.beta1 * m_t + (1. - self.beta1) * grad
v_t[:] = self.beta2 * v_t + (1. - self.beta2) * grad * grad
grad_prime = grad / (1. - self.m_schedule)
m_t_prime = m_t / (1. - m_schedule_next)
v_t_prime = v_t / (1. - pow(self.beta2, t))
m_t_bar = (1. - momentum_t) * grad_prime + momentum_t_1 * m_t_prime
# update weight
weight[:] -= lr * m_t_bar / (sqrt(v_t_prime) + self.epsilon)
@register
class Test(Optimizer):
"""The Test optimizer"""
def __init__(self, **kwargs):
super(Test, self).__init__(**kwargs)
def create_state(self, index, weight):
"""Creates a state to duplicate weight."""
return zeros(weight.shape, weight.context)
def update(self, index, weight, grad, state):
"""Performs w += rescale_grad * grad."""
weight[:] += grad * self.rescale_grad
state[:] = weight
# backward compatibility wrapper for Optimizer.CreateOptimizer
create = Optimizer.create_optimizer # pylint: disable=invalid-name
class Updater(object):
"""Updater for kvstore."""
def __init__(self, optimizer):
self.optimizer = optimizer
self.states = {}
self.states_synced = {}
self.aggregate_updates = optimizer.aggregate_num > 0
def __call__(self, index, grad, weight):
"""Updates weight given gradient and index."""
if not isinstance(index, (list, tuple)):
indices = [index]
grads = [grad]
weights = [weight]
else:
indices = index
grads = grad
weights = weight
for i, idx in enumerate(indices):
# convert ctypes.char_p.value back to python str if needed
if isinstance(idx, bytes):
indices[i] = py_str(idx)
idx = indices[i]
if idx not in self.states:
self.states[idx] = self.optimizer.create_state_multi_precision(idx, weights[i])
self.states_synced[idx] = True
elif not self.states_synced[idx]:
self.states[idx] = \
self.sync_state_context(self.states[idx], weights[i].context)
self.states_synced[idx] = True
if self.aggregate_updates:
# segregate values based on type
type_map = {}
for i, w, g in zip(indices, weights, grads):
if w.dtype in type_map:
type_map[w.dtype].append((i, w, g))
else:
type_map[w.dtype] = [(i, w, g)]
for idx in type_map:
current_index = 0
indices, weights, grads = zip(*type_map[idx])
while current_index < len(indices):
states = []
step = min(self.optimizer.aggregate_num, len(indices) - current_index)
for j in range(step):
states.append(self.states[indices[current_index + j]])
self.optimizer.update_multi_precision(
indices[current_index:current_index + self.optimizer.aggregate_num],
weights[current_index:current_index + self.optimizer.aggregate_num],
grads[current_index:current_index + self.optimizer.aggregate_num],
states)
current_index += self.optimizer.aggregate_num
else:
for i, w, g in zip(indices, weights, grads):
self.optimizer.update_multi_precision(i, w, g, self.states[i])
def sync_state_context(self, state, context):
"""sync state context."""
if isinstance(state, NDArray):
return state.as_in_context(context)
elif isinstance(state, (tuple, list)):
synced_state = (self.sync_state_context(i, context) for i in state)
if isinstance(state, tuple):
return tuple(synced_state)
else:
return list(synced_state)
else:
return state
def set_states(self, states):
"""Sets updater states."""
states = pickle.loads(states)
if isinstance(states, tuple) and len(states) == 2:
self.states, self.optimizer = states
else:
self.states = states
self.states_synced = dict.fromkeys(self.states.keys(), False)
def get_states(self, dump_optimizer=False):
"""Gets updater states.
Parameters
----------
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
"""
return pickle.dumps((self.states, self.optimizer) if dump_optimizer else self.states)
def get_updater(optimizer):
"""Returns a closure of the updater needed for kvstore.
Parameters
----------
optimizer: Optimizer
The optimizer.
Returns
-------
updater: function
The closure of the updater.
"""
return Updater(optimizer)
|
ptrendx/mxnet
|
python/mxnet/optimizer/optimizer.py
|
Python
|
apache-2.0
| 66,906 | 0.002735 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qhangups/qhangupsbrowser.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_QHangupsBrowser(object):
def setupUi(self, QHangupsBrowser):
QHangupsBrowser.setObjectName("QHangupsBrowser")
QHangupsBrowser.resize(600, 450)
self.verticalLayout = QtWidgets.QVBoxLayout(QHangupsBrowser)
self.verticalLayout.setObjectName("verticalLayout")
self.browserWebView = QtWebKitWidgets.QWebView(QHangupsBrowser)
self.browserWebView.setUrl(QtCore.QUrl("about:blank"))
self.browserWebView.setObjectName("browserWebView")
self.verticalLayout.addWidget(self.browserWebView)
self.retranslateUi(QHangupsBrowser)
QtCore.QMetaObject.connectSlotsByName(QHangupsBrowser)
def retranslateUi(self, QHangupsBrowser):
_translate = QtCore.QCoreApplication.translate
QHangupsBrowser.setWindowTitle(_translate("QHangupsBrowser", "QHangups - Browser"))
from PyQt5 import QtWebKitWidgets
|
xmikos/qhangups
|
qhangups/ui_qhangupsbrowser.py
|
Python
|
gpl-3.0
| 1,146 | 0.004363 |
'''Test cases for QLayout handling of child widgets references'''
import unittest
from sys import getrefcount
from PySide.QtGui import QHBoxLayout, QVBoxLayout, QGridLayout, QWidget
from PySide.QtGui import QStackedLayout, QFormLayout
from PySide.QtGui import QApplication, QPushButton, QLabel
from helper import UsesQApplication
class SaveReference(UsesQApplication):
'''Test case to check if QLayout-derived classes increment the refcount
of widgets passed to addWidget()'''
# Adding here as nose can't see the qapplication attrib we inherit
qapplication = True
def setUp(self):
#Acquire resources
super(SaveReference, self).setUp()
self.widget1 = QPushButton('click me')
self.widget2 = QLabel('aaa')
def tearDown(self):
#Release resources
del self.widget2
del self.widget1
super(SaveReference, self).tearDown()
def checkLayoutReference(self, layout):
#Checks the reference cound handling of layout.addWidget
self.assertEqual(getrefcount(self.widget1), 2)
layout.addWidget(self.widget1)
self.assertEqual(getrefcount(self.widget1), 3)
self.assertEqual(getrefcount(self.widget2), 2)
layout.addWidget(self.widget2)
self.assertEqual(getrefcount(self.widget2), 3)
# Check if doesn't mess around with previous widget refcount
self.assertEqual(getrefcount(self.widget1), 3)
def testMoveLayout(self):
l = QHBoxLayout()
self.assertEqual(getrefcount(self.widget1), 2)
l.addWidget(self.widget1)
self.assertEqual(getrefcount(self.widget1), 3)
w = QWidget()
w.setLayout(l)
self.assertEqual(getrefcount(self.widget1), 3)
def testHBoxReference(self):
#QHBoxLayout.addWidget reference count
w = QWidget()
self.checkLayoutReference(QHBoxLayout(w))
def testVBoxReference(self):
#QVBoxLayout.addWidget reference count
w = QWidget()
self.checkLayoutReference(QVBoxLayout(w))
def testGridReference(self):
#QGridLayout.addWidget reference count
w = QWidget()
self.checkLayoutReference(QGridLayout(w))
def testFormReference(self):
#QFormLayout.addWidget reference count
w = QWidget()
self.checkLayoutReference(QFormLayout(w))
def testStackedReference(self):
#QStackedLayout.addWidget reference count
w = QWidget()
self.checkLayoutReference(QStackedLayout(w))
class MultipleAdd(UsesQApplication):
'''Test case to check if refcount is incremented only once when multiple
calls to addWidget are made with the same widget'''
qapplication = True
def setUp(self):
#Acquire resources
super(MultipleAdd, self).setUp()
self.widget = QPushButton('click me')
self.win = QWidget()
self.layout = QHBoxLayout(self.win)
def tearDown(self):
#Release resources
del self.widget
del self.layout
del self.win
super(MultipleAdd, self).tearDown()
def testRefCount(self):
#Multiple QLayout.addWidget calls on the same widget
self.assertEqual(getrefcount(self.widget), 2)
self.layout.addWidget(self.widget)
self.assertEqual(getrefcount(self.widget), 3)
self.layout.addWidget(self.widget)
self.assertEqual(getrefcount(self.widget), 3)
self.layout.addWidget(self.widget)
self.assertEqual(getrefcount(self.widget), 3)
class InternalAdd(UsesQApplication):
def testInternalRef(self):
mw = QWidget()
w = QWidget()
ow = QWidget()
topLayout = QGridLayout()
# unique reference
self.assertEqual(getrefcount(w), 2)
self.assertEqual(getrefcount(ow), 2)
topLayout.addWidget(w, 0, 0)
topLayout.addWidget(ow, 1, 0)
# layout keep the referemce
self.assertEqual(getrefcount(w), 3)
self.assertEqual(getrefcount(ow), 3)
mainLayout = QGridLayout()
mainLayout.addLayout(topLayout, 1, 0, 1, 4)
# the same reference
self.assertEqual(getrefcount(w), 3)
self.assertEqual(getrefcount(ow), 3)
mw.setLayout(mainLayout)
# now trasfer the ownership to mw
self.assertEqual(getrefcount(w), 3)
self.assertEqual(getrefcount(ow), 3)
del mw
# remove the ref and invalidate the widget
self.assertEqual(getrefcount(w), 2)
self.assertEqual(getrefcount(ow), 2)
if __name__ == '__main__':
unittest.main()
|
M4rtinK/pyside-android
|
tests/QtGui/qlayout_ref_test.py
|
Python
|
lgpl-2.1
| 4,597 | 0.003481 |
# Copyright (c) by it's authors.
# Some rights reserved. See LICENSE, AUTHORS.
from peer import *
from viewer import Viewer
from editor import Editor
class SearchDocument(Peer):
#Receiving pillows:
Search = Pillow.In
Sending = [
Viewer.In.Document
]
Routings = [
(Editor.Out.FieldChanged, Viewer.In.Refresh)
]
def __init__(self, searchRoom, resultRoom=None, document=None, identifier=None, appendWildcard=False):
Peer.__init__(self, searchRoom)
if not resultRoom:
resultRoom = searchRoom
self._resultRoom = resultRoom
self._identifier = identifier
self._appendWildcard = appendWildcard
if document:
self._document = document
else:
from wallaby.common.queryDocument import QueryDocument
self._document = QueryDocument()
self._document.set('identifier', self._identifier)
self._document.set('query', '*')
self._catch(SearchDocument.In.Search, self._search)
def initialize(self):
# set search field editable
self._throw(Viewer.In.Document, self._document)
self._throw(Editor.In.Enable, True)
def _search(self, pillow, feathers):
query = self._document.get('query')
if self._appendWildcard and (len(query) == 0 or query[-1] != "*"):
self._document.set('query', query + "*")
from abstractQuery import AbstractQuery
self._throw(self._resultRoom+":"+AbstractQuery.In.Query, self._document)
|
FreshXOpenSource/wallaby-base
|
wallaby/pf/peer/searchDocument.py
|
Python
|
bsd-2-clause
| 1,556 | 0.005141 |
def countup(n):
if n >= 10:
print "Blastoff!"
else:
print n
countup(n+1)
def main():
countup(1)
main()
def countdown_from_to(start,stop):
if start == stop:
print "Blastoff!"
elif start <= stop:
print "Invalid pair"
else:
print start
countdown_from_to(start - 1,stop)
def main():
countdown_from_to(89,53)
main()
def adder(sum_):
number = (raw_input("Next Number"))
if (number) == "":
print "The Sum Is {}".format(sum_)
elif number == float:
print number
else:
sum_ += float(number)
print "Running total: {}".format(sum_)
adder(sum_)
def main():
sum_ = 0
adder(sum_)
main()
|
tonsom1592-cmis/tonsom1592-cmis-cs2
|
recursion.py
|
Python
|
cc0-1.0
| 624 | 0.0625 |
#!/usr/bin/env python
"""zip source directory tree"""
import argparse
import fnmatch
import logging
import os
import re
import subprocess
import zipfile
def get_version():
command = ['git', 'describe', '--tags', '--dirty', '--always']
return subprocess.check_output(command).decode('utf-8')
def source_walk(root):
root = os.path.abspath(root)
regex = re.compile(fnmatch.translate('*.py[co]'))
for path, _, files in os.walk(root):
files[:] = [f for f in files if regex.match(f) is None]
for filename in files:
fullpath = os.path.join(path, filename)
yield fullpath, os.path.relpath(fullpath, root)
def setup():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-d', '--debug',
action='store_true',
help='print debug information')
argparser.add_argument(
'-o',
metavar='zipfile',
dest='output',
help='output file name')
argparser.add_argument(
'source',
help='source directory')
args = argparser.parse_args()
loglevel = logging.DEBUG if args.debug else logging.WARNING
logging.basicConfig(format='%(levelname)s: %(message)s', level=loglevel)
if not os.path.isdir(args.source):
logging.critical('"%s" is not a directory', args.source)
return
if args.output is None:
args.output = args.source + '.zip'
with zipfile.ZipFile(args.output, 'w', zipfile.ZIP_DEFLATED) as fzip:
fzip.writestr('version.txt', get_version())
for path, relpath in source_walk(args.source):
fzip.write(path, relpath)
if __name__ == '__main__':
setup()
|
nsubiron/configure-pyz
|
setup.py
|
Python
|
gpl-3.0
| 1,666 | 0.006002 |
# -*- coding: utf-8 -*-
# File: enemy.py
# Author: Casey Jones
#
# Created on July 20, 2009, 4:48 PM
#
# This file is part of Alpha Beta Gamma (abg).
#
# ABG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ABG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ABG. If not, see <http://www.gnu.org/licenses/>.
#class to handle all enemies on screen
import sys, pygame, frametime, properties, random
from enemy import Enemy
class Enemies:
enemies = []
blackSurface = pygame.Surface([Enemy.enemy.get_width(), Enemy.enemy.get_height()])
blackSurface.fill([0,0,0])
screen = None
def set_screen(self, screen):
self.screen = screen
def create(self):
#range that the current player ship can shoot
where_spawn = random.randint(1, properties.width - Enemy.enemy.get_width())
lenemy = Enemy(where_spawn)
self.enemies.append(lenemy)
def move(self, bullet):
to_update = []
if frametime.can_create_enemy():
self.create()
to_delete = []
to_update += [x.enemyrect for x in self.enemies]
if len(self.enemies) > 0:
for i in range(len(self.enemies)):
self.enemies[i].update(bullet)
self.screen.blit(self.blackSurface, self.enemies[i].enemyrect)
self.screen.blit(Enemy.enemy, self.enemies[i].enemyrect)
#If enemy goes off the bottom of the screen
if self.enemies[i].enemyrect.top > 800:
to_delete.append(i)
for x in to_delete:
self.remove(x)
to_update += [x.enemyrect for x in self.enemies]
return to_update
def getEnemies(self):
return self.enemies
def remove(self, index):
try:
to_update = self.enemies[index].enemyrect
self.screen.blit(self.blackSurface, self.enemies[index].enemyrect)
del self.enemies[index]
return to_update
except IndexError:
print("IndexError for enemy {0} of {1}".format(index, len(self.enemies)))
def game_over(self):
for i in range(len(self.enemies)):
self.screen.blit(self.blackSurface, self.enemies[i].enemyrect)
del self.enemies[:]
|
jcnix/abg
|
enemies.py
|
Python
|
gpl-3.0
| 2,843 | 0.008442 |
##
# Copyright 2011-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Module naming scheme API.
:author: Jens Timmerman (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
import re
from easybuild.base import fancylogger
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import Singleton
from easybuild.tools.py2vs3 import create_base_metaclass
DEVEL_MODULE_SUFFIX = '-easybuild-devel'
# singleton metaclass: only one instance is created
BaseModuleNamingScheme = create_base_metaclass('BaseModuleNamingScheme', Singleton, object)
class ModuleNamingScheme(BaseModuleNamingScheme):
"""Abstract class for a module naming scheme implementation."""
REQUIRED_KEYS = None
def __init__(self, *args, **kwargs):
"""Initialize logger."""
self.log = fancylogger.getLogger(self.__class__.__name__, fname=False)
def is_sufficient(self, keys):
"""Determine whether specified list of easyconfig parameters is sufficient for this module naming scheme."""
if self.REQUIRED_KEYS is not None:
return set(keys).issuperset(set(self.REQUIRED_KEYS))
else:
raise EasyBuildError("Constant REQUIRED_KEYS is not defined, "
"should specify required easyconfig parameters.")
def requires_toolchain_details(self):
"""
Determine whether toolchain details are required by this module naming scheme,
e.g. whether one of det_toolchain_* functions are relied upon.
"""
return False
def det_full_module_name(self, ec):
"""
Determine full module name, relative to the top of the module path.
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: string with full module name, e.g.: '<compiler>/<mpi_lib>/<name>/<version>'
"""
raise NotImplementedError
def det_short_module_name(self, ec):
"""
Determine short module name, i.e. the name under which modules will be exposed to users.
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: string with module name, e.g. '<name>/<version>'
"""
# by default: full module name doesn't include a $MODULEPATH subdir
return self.det_full_module_name(ec)
def det_install_subdir(self, ec):
"""
Determine name of software installation subdirectory of install path.
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: string with name of subdirectory, e.g.: '<compiler>/<mpi_lib>/<name>/<version>'
"""
# by default: use full module name as name for install subdir
return self.det_full_module_name(ec)
def det_module_subdir(self, ec):
"""
Determine subdirectory for module file in $MODULEPATH.
This determines the separation between module names exposed to users, and what's part of the $MODULEPATH.
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: string with subdir path (relative to $MODULEPATH), e.g. '<compiler>/<mpi_lib>'
"""
# by default: no subdirectory
return ''
def det_module_symlink_paths(self, ec):
"""
Determine list of paths in which symlinks to module files must be created.
"""
# by default: make a symlink from moduleclass subdirectory of $MODULEPATH
return [ec['moduleclass']]
def det_modpath_extensions(self, ec):
"""
Determine list of subdirectories for which to extend $MODULEPATH with when this module is loaded (if any).
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: A list of $MODULEPATH subdirectories.
"""
# by default: an empty list of subdirectories to extend $MODULEPATH with
return []
def det_user_modpath_extensions(self, ec):
"""
Determine list of subdirectories relative to the user-specific modules directory for which to extend
$MODULEPATH with when this module is loaded (if any).
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: A list of $MODULEPATH subdirectories.
"""
# by default: use "system" module path extensions of naming scheme
return self.det_modpath_extensions(ec)
def det_init_modulepaths(self, ec):
"""
Determine initial module paths, where the modules that are top of the hierarchy (if any) live.
"""
return []
def expand_toolchain_load(self, ec=None):
"""
Determine whether load statements for a toolchain should be expanded to load statements for its dependencies.
This is useful when toolchains are not exposed to users.
"""
# by default: just include a load statement for the toolchain
return False
def is_short_modname_for(self, short_modname, name):
"""
Determine whether the specified (short) module name is a module for software with the specified name.
Default implementation checks via a strict regex pattern, and assumes short module names are of the form:
<name>/<version>[-<toolchain>]
"""
modname_regex = re.compile('^%s(/\S+)?$' % re.escape(name))
res = bool(modname_regex.match(short_modname))
self.log.debug("Checking whether '%s' is a module name for software with name '%s' via regex %s: %s",
short_modname, name, modname_regex.pattern, res)
return res
def det_make_devel_module(self):
"""
Determine if a devel module should be generated.
Can be used to create a separate set of modules with a different naming scheme.
Software is already installed beforehand with one naming scheme, including development module.
"""
return True
|
gppezzi/easybuild-framework
|
easybuild/tools/module_naming_scheme/mns.py
|
Python
|
gpl-2.0
| 7,666 | 0.004696 |
#!/usr/bin/env python
# Copyright 2018, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_json_output module."""
import json
import os
from googletest.test import gtest_json_test_utils
from googletest.test import gtest_test_utils
GTEST_OUTPUT_SUBDIR = 'json_outfiles'
GTEST_OUTPUT_1_TEST = 'gtest_xml_outfile1_test_'
GTEST_OUTPUT_2_TEST = 'gtest_xml_outfile2_test_'
EXPECTED_1 = {
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'name':
u'AllTests',
u'testsuites': [{
u'name':
u'PropertyOne',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'TestSomeProperties',
u'status': u'RUN',
u'result': u'COMPLETED',
u'time': u'*',
u'timestamp': u'*',
u'classname': u'PropertyOne',
u'SetUpProp': u'1',
u'TestSomeProperty': u'1',
u'TearDownProp': u'1',
}],
}],
}
EXPECTED_2 = {
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'name':
u'AllTests',
u'testsuites': [{
u'name':
u'PropertyTwo',
u'tests':
1,
u'failures':
0,
u'disabled':
0,
u'errors':
0,
u'time':
u'*',
u'timestamp':
u'*',
u'testsuite': [{
u'name': u'TestSomeProperties',
u'status': u'RUN',
u'result': u'COMPLETED',
u'timestamp': u'*',
u'time': u'*',
u'classname': u'PropertyTwo',
u'SetUpProp': u'2',
u'TestSomeProperty': u'2',
u'TearDownProp': u'2',
}],
}],
}
class GTestJsonOutFilesTest(gtest_test_utils.TestCase):
"""Unit test for Google Test's JSON output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, '')
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + '.json'))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + '.json'))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_2)
def _TestOutFile(self, test_name, expected):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, '--gtest_output=json:%s' % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
output_file_name1 = test_name + '.json'
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
if os.path.isfile(output_file1):
with open(output_file1) as f:
actual = json.load(f)
else:
with open(output_file2) as f:
actual = json.load(f)
self.assertEqual(expected, gtest_json_test_utils.normalize(actual))
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '0'
gtest_test_utils.Main()
|
grpc/grpc-ios
|
native_src/third_party/googletest/googletest/test/googletest-json-outfiles-test.py
|
Python
|
apache-2.0
| 5,705 | 0.003155 |
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
ProfileNumericComparator = Enum(
'equal',
'greaterThan',
'greaterThanEqual',
'lessThan',
'lessThanEqual',
'notEqual',
)
|
xuru/pyvisdk
|
pyvisdk/enums/profile_numeric_comparator.py
|
Python
|
mit
| 307 | 0 |
__author__ = 'saeedamen'
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
marketliquidity_examples
Shows how to calculate market liquidity using bid/ask data and tick counts.
"""
# TODO
|
poeticcapybara/pythalesians
|
pythalesians-examples/marketliquidity_examples.py
|
Python
|
apache-2.0
| 758 | 0.006596 |
# NamoInstaller ActiveX Control 1.x - 3.x
# CVE-NOMATCH
import logging
log = logging.getLogger("Thug")
def Install(self, arg):
if len(arg) > 1024:
log.ThugLogging.log_exploit_event(self._window.url,
"NamoInstaller ActiveX",
"Overflow in Install method")
log.DFT.check_shellcode(arg)
if str([arg]).find('http') > -1:
log.ThugLogging.add_behavior_warn('[NamoInstaller ActiveX] Insecure download from URL %s' % (arg, ))
log.ThugLogging.log_exploit_event(self._window.url,
"NamoInstaller ActiveX",
"Insecure download from URL",
forward = False,
data = {
"url": arg
}
)
try:
self._window._navigator.fetch(arg, redirect_type = "NamoInstaller Exploit")
except Exception:
log.ThugLogging.add_behavior_warn('[NamoInstaller ActiveX] Fetch failed')
|
pdelsante/thug
|
thug/ActiveX/modules/NamoInstaller.py
|
Python
|
gpl-2.0
| 1,217 | 0.008217 |
#!/usr/bin/env python
#Alberto
from __future__ import print_function, division
import os
import glob
import argparse
from subprocess import Popen, PIPE
from argparse import RawTextHelpFormatter
import sys
import re
from textwrap import dedent
from subprocess import call
def warn(*objs):
print(*objs, file=sys.stderr)
class FileLineWrapper(object):
"""File reader with line numbers and readRequiredLine"""
def __init__(self, f):
self.f = f
self.line = 0
def close(self):
return self.f.close()
def readline(self):
self.line += 1
return self.f.readline()
def readRequiredLine(self):
line = self.readline()
if not line:
raise IOError("Unexpected end of file found: %d" % self.line)
return line
def __enter__(self):
self.f.__enter__()
return self
def __exit__(self, tp, value, traceback):
self.f.__exit__(tp, value, traceback)
class GJob:
"""Class representing a gaussian job"""
COMMANDPat = re.compile("\s*#[PTN]* ")
SKIPGeomPat = re.compile("geom=\S*check", re.IGNORECASE)
LINKPat = re.compile("\s*--Link1--")
OPTPat = re.compile("\sopt([=\(]+([^\s\)]+)?\)?)", re.IGNORECASE)
FREQPat = re.compile("\sfreq[\s=\(]", re.IGNORECASE)
MAXCylPat = re.compile("MaxCycles=\d+", re.IGNORECASE)
CALCFCPat = re.compile("readFC|calcfc|calchffc|rcfc", re.IGNORECASE)
GEOMPat = re.compile("\s*geom(=\S+)", re.IGNORECASE)
GUESSPat = re.compile("\s*guess(=\S+)", re.IGNORECASE)
def __init__(self, start, command, middle, coords, end):
self.start = start # % directives
self.command = command # gaussian command line
self.middle = middle # comment, charge and multiplicity
self.coords = coords
self.end = end # anything after the coordinates
def __str__(self):
return ''.join(
[self.start, self.command, self.middle, self.coords, self.end])
def isOpt(self):
return GJob.OPTPat.search(self.command)
def isFreq(self):
return GJob.FREQPat.search(self.command)
def execute(self, outName):
com = dedent("""
date>>%s;gaussian.csh >>%s<<'gJOBComs'
%s'gJOBComs'""") % (outName,outName, str(self))
#warn(com)
status = call(["/bin/csh", "-fc", com])
if status > 0:
raise IOError("Gaussian returned error code=%d" % status)
p = Popen("tail -n 10 "+outName, shell=True, bufsize=2048,
stdin=PIPE, stdout=PIPE, close_fds=True)
stdin,stdout= p.stdin, p.stdout
#stdin,stdout = os.popen2("tail -n 10 "+outName)
stdin.close()
lines = stdout.read()
stdout.close()
return b" Normal termination of Gaussian" in lines
def copy(self, chkGeom=False, optSteps='', optCalcFC=False, optReadFC=False):
newCom = self.command
newMiddle = self.middle
newCoords = self.coords
ma = GJob.OPTPat.search(newCom)
if (optSteps or optCalcFC or optReadFC) and not ma:
raise Exception("Not an optimization:" + str(self))
elif optSteps or optCalcFC or optReadFC:
optArgs= ma.group(2)
if optSteps:
optArgs= GJob.MAXCylPat.sub("",optArgs)
if optArgs: optArgs += ","
optArgs += "MaxCycles="+str(optSteps)
if optCalcFC:
optArgs = GJob.CALCFCPat.sub("",optArgs)
if optArgs: optArgs += ","
optArgs += "CalcFC"
if optReadFC:
optArgs = GJob.CALCFCPat.sub("",optArgs)
if optArgs: optArgs += ","
optArgs += "ReadFC"
optArgs = optArgs.replace(",,",",")
if optArgs.startswith(",") : optArgs = optArgs[1:]
newCom = GJob.OPTPat.sub(" opt=(%s)"%optArgs,newCom)
if chkGeom:
newCom = GJob.GEOMPat.sub("",newCom)
newCom = GJob.GUESSPat.sub("",newCom)
newCom = newCom.rstrip() + " Geom=AllCheck Guess=TCheck\n"
newMiddle = ""
newCoords = ""
return GJob(self.start, newCom, newMiddle, newCoords, self.end)
@staticmethod
def readNext(inFile):
start = ""
command = ""
middle = ""
coords = ""
end = ""
line = inFile.readline()
if not line: return None
while not GJob.COMMANDPat.match(line):
start += line
line = inFile.readRequiredLine()
while line.strip():
command += line
line = inFile.readRequiredLine()
if not GJob.SKIPGeomPat.search(command):
middle = "\n"
line = inFile.readRequiredLine()
# read comment lines
while line.strip():
middle += line
line = inFile.readRequiredLine()
middle += line
# read charge and multiplicity
middle += inFile.readRequiredLine()
line = inFile.readRequiredLine()
while line.strip():
coords += line
line = inFile.readRequiredLine()
while line and not GJob.LINKPat.match(line):
end += line
line = inFile.readline()
return GJob(start, command, middle, coords, end)
desc = """Run guassian optimization run.
Your gInFile may contain multiple jobs.
Whenever an optimization job is found it will be executed in multiple
subjobs with MaxCycle=optSteps. If the optimization does not
complete a frequency calculation is done with the final geometry.
If the n-1'ed step was a freq job it's parameters will be retained,
if not then the "CalcFC" option will be added to the opt keyword.
Note that gOpt will modify your gaussian options somewhat.
Example: set n=myName.g ; set nCPU=4 ; mysub.py -q medium -jobName $n:r -nCPU $nCPU -totalMem 10 -- gOpt.py -in $n"""
parser = argparse.ArgumentParser(description=desc, formatter_class=RawTextHelpFormatter)
parser.add_argument('-in', dest='gInFileName', required=True,
help='gaussian command file, out will be name.out')
parser.add_argument('-optSteps', dest='optSteps', required=False, default=8,
help='Number of optimizaton steps to execute before recalculating freq (def=%d)'%8)
parser.add_argument('-restartJob', metavar="<n>", type=int, required=False, default=0,
help='restart this computation with job number <n>. Only for opt jobs.')
args = parser.parse_args()
gInFileName = args.gInFileName
gOutFileName, dummy = os.path.splitext(gInFileName)
gOutFileName += ".out"
restartJob = args.restartJob
optSteps=args.__dict__.get('optSteps',8)
gJobs = []
with FileLineWrapper(open(gInFileName)) as gInFile:
gJob = GJob.readNext(gInFile)
while gJob:
gJobs.append(gJob)
gJob = GJob.readNext(gInFile)
lastGJob = None
for gJob in gJobs:
restartJob -= 1
if restartJob > 0:
continue
if gJob.isOpt(): # and lastGJob != None:
newGJob = gJob.copy(optSteps=optSteps)
success = newGJob.execute(gOutFileName)
while not success:
if lastGJob and lastGJob.isFreq():
newGJob = lastGJob.copy(chkGeom=True)
if not newGJob.execute(gOutFileName) :
raise IOError("Freq calculation did not complete!")
newGJob = gJob.copy(optSteps=optSteps,optReadFC=True)
success = newGJob.execute(gOutFileName)
else:
newGJob = gJob.copy(chkGeom=True,optSteps=optSteps,optCalcFC=True)
success = newGJob.execute(gOutFileName)
else:
gJob.execute(gOutFileName)
lastGJob = gJob
|
chemalot/chemalot
|
bin/gOpt.py
|
Python
|
apache-2.0
| 7,487 | 0.024709 |
# !!!!!!!!!!! Add a header to the documentation, that starts with something
# like "uncertainties.UFloat-compatible version of...", for all functions.
"""
Implementation of umath.py, with internals.
"""
# This module exists so as to define __all__, which in turn defines
# which functions are visible to the user in umath.py through from
# umath import * and Python shell completion.
# Many analytical derivatives depend on this
# Standard modules
import math
import sys
import itertools
# Local modules
import uncertainties.core as uncert_core
from uncertainties.core import (to_affine_scalar, AffineScalarFunc,
LinearCombination)
###############################################################################
# We wrap the functions from the math module so that they keep track of
# uncertainties by returning a AffineScalarFunc object.
# Some functions from the math module cannot be adapted in a standard
# way so to work with AffineScalarFunc objects (either as their result
# or as their arguments):
# (1) Some functions return a result of a type whose value and
# variations (uncertainties) cannot be represented by AffineScalarFunc
# (e.g., math.frexp, which returns a tuple). The exception raised
# when not wrapping them with wrap() is more obvious than the
# one obtained when wrapping them (in fact, the wrapped functions
# attempts operations that are not supported, such as calculation a
# subtraction on a result of type tuple).
# (2) Some functions don't take continuous scalar arguments (which can
# be varied during differentiation): math.fsum, math.factorial...
# Such functions can either be:
# - wrapped in a special way.
# - excluded from standard wrapping by adding their name to
# no_std_wrapping
# Math functions that have a standard interface: they take
# one or more float arguments, and return a scalar:
many_scalars_to_scalar_funcs = []
# Some functions require a specific treatment and must therefore be
# excluded from standard wrapping. Functions
# no_std_wrapping = ['modf', 'frexp', 'ldexp', 'fsum', 'factorial']
# Functions with numerical derivatives:
#
# !! Python2.7+: {..., ...}
num_deriv_funcs = set(['fmod', 'gamma', 'lgamma'])
# Functions are by definition locally constant (on real
# numbers): their value does not depend on the uncertainty (because
# this uncertainty is supposed to lead to a good linear approximation
# of the function in the uncertainty region). The type of their output
# for floats is preserved, as users should not care about deviations
# in their value: their value is locally constant due to the nature of
# the function (0 derivative). This situation is similar to that of
# comparisons (==, >, etc.).
#
# !! Python 2.7+: {..., ...}
locally_cst_funcs = set(['ceil', 'floor', 'isinf', 'isnan', 'trunc'])
# Functions that do not belong in many_scalars_to_scalar_funcs, but
# that have a version that handles uncertainties. These functions are
# also not in numpy (see unumpy/core.py).
non_std_wrapped_funcs = []
# Function that copies the relevant attributes from generalized
# functions from the math module:
# This is a copy&paste job from the functools module, changing
# the default arugment for assigned
def wraps(wrapper,
wrapped,
assigned=('__doc__',),
updated=('__dict__',)):
"""Update a wrapper function to look like the wrapped function.
wrapper -- function to be updated
wrapped -- original function
assigned -- tuple naming the attributes assigned directly
from the wrapped function to the wrapper function
updated -- tuple naming the attributes of the wrapper that
are updated with the corresponding attribute from the wrapped
function.
"""
for attr in assigned:
setattr(wrapper, attr, getattr(wrapped, attr))
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
########################################
# Wrapping of math functions:
# Fixed formulas for the derivatives of some functions from the math
# module (some functions might not be present in all version of
# Python). Singular points are not taken into account. The user
# should never give "large" uncertainties: problems could only appear
# if this assumption does not hold.
# Functions not mentioned in _fixed_derivatives have their derivatives
# calculated numerically.
# Functions that have singularities (possibly at infinity) benefit
# from analytical calculations (instead of the default numerical
# calculation) because their derivatives generally change very fast.
# Even slowly varying functions (e.g., abs()) yield more precise
# results when differentiated analytically, because of the loss of
# precision in numerical calculations.
#def log_1arg_der(x):
# """
# Derivative of log(x) (1-argument form).
# """
# return 1/x
def log_der0(*args):
"""
Derivative of math.log() with respect to its first argument.
Works whether 1 or 2 arguments are given.
"""
if len(args) == 1:
return 1/args[0]
else:
return 1/args[0]/math.log(args[1]) # 2-argument form
# The following version goes about as fast:
## A 'try' is used for the most common case because it is fast when no
## exception is raised:
#try:
# return log_1arg_der(*args) # Argument number check
#except TypeError:
# return 1/args[0]/math.log(args[1]) # 2-argument form
def _deriv_copysign(x,y):
if x >= 0:
return math.copysign(1, y)
else:
return -math.copysign(1, y)
def _deriv_fabs(x):
if x >= 0:
return 1
else:
return -1
def _deriv_pow_0(x, y):
if y == 0:
return 0.
elif x != 0 or y % 1 == 0:
return y*math.pow(x, y-1)
else:
return float('nan')
def _deriv_pow_1(x, y):
if x == 0 and y > 0:
return 0.
else:
return math.log(x) * math.pow(x, y)
erf_coef = 2/math.sqrt(math.pi) # Optimization for erf()
fixed_derivatives = {
# In alphabetical order, here:
'acos': [lambda x: -1/math.sqrt(1-x**2)],
'acosh': [lambda x: 1/math.sqrt(x**2-1)],
'asin': [lambda x: 1/math.sqrt(1-x**2)],
'asinh': [lambda x: 1/math.sqrt(1+x**2)],
'atan': [lambda x: 1/(1+x**2)],
'atan2': [lambda y, x: x/(x**2+y**2), # Correct for x == 0
lambda y, x: -y/(x**2+y**2)], # Correct for x == 0
'atanh': [lambda x: 1/(1-x**2)],
'copysign': [_deriv_copysign,
lambda x, y: 0],
'cos': [lambda x: -math.sin(x)],
'cosh': [math.sinh],
'degrees': [lambda x: math.degrees(1)],
'erf': [lambda x: math.exp(-x**2)*erf_coef],
'erfc': [lambda x: -math.exp(-x**2)*erf_coef],
'exp': [math.exp],
'expm1': [math.exp],
'fabs': [_deriv_fabs],
'hypot': [lambda x, y: x/math.hypot(x, y),
lambda x, y: y/math.hypot(x, y)],
'log': [log_der0,
lambda x, y: -math.log(x, y)/y/math.log(y)],
'log10': [lambda x: 1/x/math.log(10)],
'log1p': [lambda x: 1/(1+x)],
'pow': [_deriv_pow_0, _deriv_pow_1],
'radians': [lambda x: math.radians(1)],
'sin': [math.cos],
'sinh': [math.cosh],
'sqrt': [lambda x: 0.5/math.sqrt(x)],
'tan': [lambda x: 1+math.tan(x)**2],
'tanh': [lambda x: 1-math.tanh(x)**2]
}
# Many built-in functions in the math module are wrapped with a
# version which is uncertainty aware:
this_module = sys.modules[__name__]
def wrap_locally_cst_func(func):
'''
Return a function that returns the same arguments as func, but
after converting any AffineScalarFunc object to its nominal value.
This function is useful for wrapping functions that are locally
constant: the uncertainties should have no role in the result
(since they are supposed to keep the function linear and hence,
here, constant).
'''
def wrapped_func(*args, **kwargs):
args_float = list(map(uncert_core.nominal_value, args))
# !! In Python 2.7+, dictionary comprehension: {argname:...}
kwargs_float = dict(
(arg_name, uncert_core.nominal_value(value))
for (arg_name, value) in kwargs.items())
return func(*args_float, **kwargs_float)
return wrapped_func
# for (name, attr) in vars(math).items():
for name in dir(math):
if name in fixed_derivatives: # Priority to functions in fixed_derivatives
derivatives = fixed_derivatives[name]
elif name in num_deriv_funcs:
# Functions whose derivatives are calculated numerically by
# this module fall here (isinf, fmod,...):
derivatives = [] # Means: numerical calculation required
elif name not in locally_cst_funcs:
continue # 'name' not wrapped by this module (__doc__, e, etc.)
func = getattr(math, name)
if name in locally_cst_funcs:
wrapped_func = wrap_locally_cst_func(func)
else: # Function with analytical or numerical derivatives:
# Errors during the calculation of the derivatives are converted
# to a NaN result: it is assumed that a mathematical calculation
# that cannot be calculated indicates a non-defined derivative
# (the derivatives in fixed_derivatives must be written this way):
wrapped_func = uncert_core.wrap(
func, list(map(uncert_core.nan_if_exception, derivatives)))
# !! The same effect could be achieved with globals()[...] = ...
setattr(this_module, name, wraps(wrapped_func, func))
many_scalars_to_scalar_funcs.append(name)
###############################################################################
########################################
# Special cases: some of the functions from no_std_wrapping:
##########
# The math.factorial function is not converted to an uncertainty-aware
# function, because it does not handle non-integer arguments: it does
# not make sense to give it an argument with a numerical error
# (whereas this would be relevant for the gamma function).
##########
# fsum takes a single argument, which cannot be differentiated.
# However, each of the arguments inside this single list can
# be a variable. We handle this in a specific way:
# Only for Python 2.6+:
# For drop-in compatibility with the math module:
factorial = math.factorial
non_std_wrapped_funcs.append('factorial')
# We wrap math.fsum
original_func = math.fsum # For optimization purposes
# The function below exists so that temporary variables do not
# pollute the module namespace:
def wrapped_fsum():
"""
Return an uncertainty-aware version of math.fsum, which must
be contained in _original_func.
"""
# The fsum function is flattened, in order to use the
# wrap() wrapper:
flat_fsum = lambda *args: original_func(args)
flat_fsum_wrap = uncert_core.wrap(
flat_fsum, itertools.repeat(lambda *args: 1))
return wraps(lambda arg_list: flat_fsum_wrap(*arg_list),
original_func)
# !!!!!!!! Documented?
fsum = wrapped_fsum()
non_std_wrapped_funcs.append('fsum')
##########
# Some functions that either return multiple arguments (modf, frexp)
# or take some non-float arguments (which should not be converted to
# numbers with uncertainty).
# ! The arguments have the same names as in the math module
# documentation, so that the docstrings are consistent with them.
@uncert_core.set_doc(math.modf.__doc__)
def modf(x):
"""
Version of modf that works for numbers with uncertainty, and also
for regular numbers.
"""
# The code below is inspired by uncert_core.wrap(). It is
# simpler because only 1 argument is given, and there is no
# delegation to other functions involved (as for __mul__, etc.).
aff_func = to_affine_scalar(x) # Uniform treatment of all numbers
(frac_part, int_part) = math.modf(aff_func.nominal_value)
if aff_func._linear_part: # If not a constant
# The derivative of the fractional part is simply 1: the
# linear part of modf(x)[0] is the linear part of x:
return (AffineScalarFunc(frac_part, aff_func._linear_part), int_part)
else:
# This function was not called with an AffineScalarFunc
# argument: there is no need to return numbers with uncertainties:
return (frac_part, int_part)
many_scalars_to_scalar_funcs.append('modf')
@uncert_core.set_doc(math.ldexp.__doc__)
def ldexp(x, i):
# Another approach would be to add an additional argument to
# uncert_core.wrap() so that some arguments are automatically
# considered as constants.
aff_func = to_affine_scalar(x) # y must be an integer, for math.ldexp
if aff_func._linear_part:
return AffineScalarFunc(
math.ldexp(aff_func.nominal_value, i),
LinearCombination([(2**i, aff_func._linear_part)]))
else:
# This function was not called with an AffineScalarFunc
# argument: there is no need to return numbers with uncertainties:
# aff_func.nominal_value is not passed instead of x, because
# we do not have to care about the type of the return value of
# math.ldexp, this way (aff_func.nominal_value might be the
# value of x coerced to a difference type [int->float, for
# instance]):
return math.ldexp(x, i)
many_scalars_to_scalar_funcs.append('ldexp')
@uncert_core.set_doc(math.frexp.__doc__)
def frexp(x):
"""
Version of frexp that works for numbers with uncertainty, and also
for regular numbers.
"""
# The code below is inspired by uncert_core.wrap(). It is
# simpler because only 1 argument is given, and there is no
# delegation to other functions involved (as for __mul__, etc.).
aff_func = to_affine_scalar(x)
if aff_func._linear_part:
(mantissa, exponent) = math.frexp(aff_func.nominal_value)
return (
AffineScalarFunc(
mantissa,
# With frexp(x) = (m, e), x = m*2**e, so m = x*2**-e
# and therefore dm/dx = 2**-e (as e in an integer that
# does not vary when x changes):
LinearCombination([2**-exponent, aff_func._linear_part])),
# The exponent is an integer and is supposed to be
# continuous (errors must be small):
exponent)
else:
# This function was not called with an AffineScalarFunc
# argument: there is no need to return numbers with uncertainties:
return math.frexp(x)
non_std_wrapped_funcs.append('frexp')
###############################################################################
# Exported functions:
__all__ = many_scalars_to_scalar_funcs + non_std_wrapped_funcs
|
EternityForest/KaithemAutomation
|
kaithem/src/thirdparty/uncertainties/umath_core.py
|
Python
|
gpl-3.0
| 14,767 | 0.001761 |
#forces : A fast customized optimization solver.
#
#Copyright (C) 2013-2016 EMBOTECH GMBH [info@embotech.com]. All rights reserved.
#
#
#This software is intended for simulation and testing purposes only.
#Use of this software for any commercial purpose is prohibited.
#
#This program is distributed in the hope that it will be useful.
#EMBOTECH makes NO WARRANTIES with respect to the use of the software
#without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
#PARTICULAR PURPOSE.
#
#EMBOTECH shall not have any liability for any damage arising from the use
#of the software.
#
#This Agreement shall exclusively be governed by and interpreted in
#accordance with the laws of Switzerland, excluding its principles
#of conflict of laws. The Courts of Zurich-City shall have exclusive
#jurisdiction in case of any dispute.
#
from distutils.ccompiler import new_compiler
c = new_compiler()
#from numpy.distutils.intelccompiler import IntelCCompiler
#c = IntelCCompiler()
import os
import sys
import distutils
# determine source file
sourcefile = os.path.join(os.getcwd(),"forces","src","forces"+".c")
# determine lib file
if sys.platform.startswith('win'):
libfile = os.path.join(os.getcwd(),"forces","lib","forces"+".lib")
else:
libfile = os.path.join(os.getcwd(),"forces","lib","forces"+".so")
# create lib dir if it does not exist yet
if not os.path.exists(os.path.join(os.getcwd(),"forces","lib")):
os.makedirs(os.path.join(os.getcwd(),"forces","lib"))
# compile into object file
objdir = os.path.join(os.getcwd(),"forces","obj")
if isinstance(c,distutils.unixccompiler.UnixCCompiler):
objects = c.compile([sourcefile], output_dir=objdir, extra_preargs=['-O3','-fPIC','-fopenmp','-mavx'])
if sys.platform.startswith('linux'):
c.set_libraries(['rt','gomp'])
else:
objects = c.compile([sourcefile], output_dir=objdir)
# create libraries
libdir = os.path.join(os.getcwd(),"forces","lib")
exportsymbols = ["%s_solve" % "forces"]
c.create_static_lib(objects, "forces", output_dir=libdir)
c.link_shared_lib(objects, "forces", output_dir=libdir, export_symbols=exportsymbols)
|
embotech/forcesnlp-examples
|
robot/acado/export_MPC/forces/interface/forces_build.py
|
Python
|
mit
| 2,126 | 0.031044 |
# -*- coding: utf-8 -*-
""" Module for making calls to EPO-OPS REST-API.
This module contain classes and functions to get data from
[EPO-OPS API](http://www.epo.org/searching-for-patents/technical/espacenet/ops.html)
"""
import logging
import re
import time
from base64 import b64encode
from collections import namedtuple
from datetime import datetime, timedelta
import requests
from .constants import AUTH_URL, URL_PREFIX, VALID_ENDPOINTS, \
VALID_IDTYPES
from epo_utils.exceptions import FetchFailed, QuotaPerHourExceeded, \
QuotaPerWeekExceeded
from epo_utils.ops import Services, ReferenceType
try:
import requests_cache
except ImportError:
_HAS_CACHE = False
else:
_HAS_CACHE = True
from .documents import DocumentID
class APIInput:
"""
Encapsulation of API-input.
Provides ID-formatting and interface
to :class:`epo_utils.documents.DocumentID`.
Attributes
----------
id_type : str
ID-format (epodoc, docdb, original).
number : str
Document number.
kind : str
Document kind code.
country : str
Country code.
date : str
Date as YYYYMMDD-string.
"""
def __init__(self, id_type, number, kind=None, country=None, date=None):
if id_type not in VALID_IDTYPES:
raise ValueError('invalid id_type: {}'.format(id_type))
if date is not None:
date = str(date)
try:
datetime.strptime(date, '%Y%m%d')
except ValueError:
raise ValueError('date must be in YYYYMMDD-format')
else:
if len(date) != 8:
raise ValueError('date must be in YYYYMMDD-format')
if country is not None and not country.strip():
raise ValueError('country cant be empty if provided')
if kind is not None and not kind.strip():
raise ValueError('kind cant be empty if provided')
self.id_type = id_type
self.number = str(number)
self.kind = kind
self.country = country
self.date = date
@classmethod
def from_document_id(cls, document_id):
""" Convert instance of :class:`epo_utils.documents.DocumentID`
to `APIInput`.
Parameters
----------
document_id : epo_utils.documents.DocumentID
Document-ID to translate.
Returns
-------
APIInput
"""
if not isinstance(document_id, DocumentID):
raise ValueError('document_id must be DocumentID-instance')
return cls(document_id.id_type, document_id.doc_number,
document_id.kind, document_id.country, document_id.date)
def to_id(self):
""" Format as valid API-input ID.
Returns
-------
str
"""
if (',' in self.number or '.' in self.number or '/' in self.number) \
and self.id_type != 'classification':
number = '({})'.format(self.number)
else:
number = self.number
parts = [part for part in [self.country, number, self.kind, self.date]
if part is not None]
if self.id_type == 'original':
id_ = '.'.join(parts).replace(' ', '%20')
elif self.id_type == 'docdb':
id_ = '.'.join(parts)
elif self.id_type == 'epodoc':
if self.date is not None:
id_ = ''.join(parts[:-1])
id_ += '.' + self.date
else:
id_ = ''.join(parts)
elif self.id_type == 'classification':
return number
else:
raise ValueError('invalid id_type: {}'.format(self.id_type))
return id_
def __repr__(self):
module = self.__class__.__module__
class_name = self.__class__.__name__
return '<{0}.{1}: {2}>'.format(module, class_name, self.to_id())
class Token(namedtuple('Token', ['token', 'expires'])):
""" Wrapper around access-token. """
class EPOClient:
""" Client to call EPO-OPS REST-API using `requests`.
Features auto-throttling based on OPS throttling headers and
automatic retries on server-side error codes.
Parameters
----------
accept_type : str
Http accept type.
key : str, optional
EPO OPS user key.
secret : str, optional
EPO OPS user secret.
cache : bool
If True, try to use `requests_cache` for caching. Default False.
cache_kwargs : dict, optional.
Passed to :py:func:`requests_cache.install_cache` as keyword
arguments if provided.
max_retries : int
Number of allowed retries at 500-responses.
retry_timeout : float, int
Timeout in seconds between calls when retrying at 500-responses.
Attributes
----------
secret : str
key : str
token : Token or None
quota_per_hour_used : int
quota_per_week_used : int
"""
HAS_FULLTEXT = {'EP'}
def __init__(self, accept_type='xml', key=None, secret=None, cache=False,
cache_kwargs=None, max_retries=1, retry_timeout=10):
try:
_check_epoclient_input(accept_type, key, secret, cache,
cache_kwargs, max_retries, retry_timeout)
except AssertionError as e:
raise ValueError(str(e))
if accept_type.startswith('application/'):
self.accept_type = accept_type
else:
self.accept_type = 'application/{}'.format(accept_type)
if cache and _HAS_CACHE:
logging.info('Installs cache.')
requests_cache.install_cache(**(cache_kwargs or dict()))
elif cache:
raise ValueError('cache is set to True but requests_cache '
'is not available.')
self.secret = secret
self.key = key
self.max_retries = max_retries
self.retry_timeout = retry_timeout
self.quota_per_hour_used = 0
self.quota_per_week_used = 0
if all([secret, key]):
logging.debug('Auth provided.')
self.token = self.authenticate()
else:
logging.debug('Auth not provided')
self.token = None
self._last_call = {
'search': None,
'retrieval': None,
'inpadoc': None,
'images': None,
'other': None
}
self._next_call = self._last_call.copy()
def fetch(self, service, ref_type, api_input, endpoint='',
options=None, extra_headers=None):
""" Generic function to fetch data from the EPO-OPS API.
Parameters
----------
service : epo_utils.ops.Services
OPS-service to fetch from.
ref_type : epo_utils.ops.ReferenceType
OPS-reference type of data to fetch.
api_input : APIInput, list[APIInput]
Input to API-call.
endpoint : str
API-endpoint to call.
options : list, optional
API-call constitents.
extra_headers : dict, optional
Additional or custom headers to be used.
use_post : bool
If True, POST will be used for request.
Returns
-------
requests.Response
"""
if not isinstance(ref_type, ReferenceType):
raise ValueError('invalid ref_type: {}'.format(ref_type))
if not isinstance(service, Services):
raise ValueError('invalid service: {}'.format(service))
if endpoint not in VALID_ENDPOINTS:
raise ValueError('invalid endpoint: {}'.format(endpoint))
try:
input_text = ','.join(i.to_id() for i in api_input)
except TypeError:
input_text = api_input.to_id()
id_types = {api_input.id_type}
else:
id_types = {i.id_type for i in api_input}
if len(id_types) > 1:
raise ValueError('non-matching id-types')
options = options or list()
url = build_ops_url(service, ref_type, id_types.pop(),
endpoint, options)
headers = self._make_headers(extra_headers)
logging.debug('Makes request to: {}\nheaders: {}'.format(url, headers))
logging.info('fetches {}'.format(input_text))
try:
response = self.post('retrieval', url, input_text, headers=headers)
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
logging.error('{} not found'.format(input_text))
raise FetchFailed(input_text)
else:
raise
logging.info('Fetch succeeded.')
return response
def search(self, query, fetch_range, service=Services.PublishedSearch,
endpoint='', extra_headers=None):
""" Post a GET-search query.
Parameters
----------
query : str
Query string.
fetch_range : tuple[int, int]
Get entries `fetch_range[0]` to `fetch_range[1]`.
service : Services
Which service to use for search.
endpoint : str, list[str]
Endpoint(s) to search.
extra_headers : dict, optional
Additional or custom headers to be used.
Returns
-------
requests.Response
"""
if not isinstance(service, Services):
raise ValueError('invalid service: {}'.format(service))
if not isinstance(endpoint, (list, tuple)):
endpoint = [endpoint]
if not all(e in VALID_ENDPOINTS for e in endpoint):
invalid = filter(lambda e: e not in VALID_ENDPOINTS,
endpoint)
raise ValueError('invalid endpoint: {}'.format(next(invalid)))
if not len(fetch_range) == 2 \
and all(isinstance(i, int) for i in fetch_range):
raise ValueError('invalid fetch_range: {}'.format(fetch_range))
headers = self._make_headers(
{'Accept': 'application/exchange+xml',
'X-OPS-Range': '{}-{}'.format(*fetch_range)}
)
headers.update(extra_headers or dict())
url = build_ops_url(service, options=endpoint)
logging.info('Sends query: {}'.format(query))
response = self.post('search', url, headers=headers, data={'q': query})
logging.info('Query successful.')
return response
def authenticate(self):
""" If EPO-OPS customer key and secret is available
get access-token.
Returns
-------
token : Token
Token and expiration time.
"""
if not all([self.secret, self.key]):
return None
logging.info('Attempts to authenticate.')
# Post base 64-encoded credentials to get access-token.
credentials = '{0}:{1}'.format(self.key, self.secret)
encoded_creds = b64encode(credentials.encode('ascii')).decode('ascii')
headers = {'Authorization': 'Basic {}'.format(encoded_creds)}
payload = {'grant_type': 'client_credentials'}
response = requests.post(AUTH_URL, headers=headers, data=payload)
response.raise_for_status()
logging.info('Authentication succeeded.')
# Parse response.
content = response.json()
token = content['access_token']
expires_in = int(content['expires_in'])
expires = datetime.now() + timedelta(seconds=expires_in)
token = Token(token, expires)
return token
def post(self, service, *args, **kwargs):
""" Makes an auto-throttled POST to the OPS-API.
Parameters
----------
service : str
OPS-system called.
*args
Positional arguments passed to :py:`requests.post`
**kwargs
Keyword arguments passed to :py:`requests.post`
Returns
-------
requests.Response
"""
logging.debug(
'{} POST\nargs: {}\nkwargs: {}'.format(service,args, kwargs))
response = self._retry(self._throttled_call, service, requests.post,
*args, **kwargs)
return response
def get(self, service, *args, **kwargs):
""" Makes an auto-throttled GET-call to the OPS-API.
Parameters
----------
service : str
OPS-system called.
*args
Positional arguments passed to :py:`requests.get`
**kwargs
Keyword arguments passed to :py:`requests.get`
Returns
-------
requests.Response
"""
logging.debug(
'{} GET\nargs: {}\nkwargs: {}'.format(service, args, kwargs))
response = self._retry(self._throttled_call, service, requests.get,
*args, **kwargs)
return response
def _retry(self, request, *args, **kwargs):
""" Wrap `request` with retries at 500-responses.
Parameters
----------
request : Callable
Function which calls the OPS-API using `*args` and `**kwargs`.
*args
Positional arguments passed to `request`
**kwargs
Keyword arguments passed to :py:`request`
Returns
-------
Any
result from `request`
"""
for attempts_left in range(self.max_retries + 1, -1, -1):
try:
result = request(*args, **kwargs)
except requests.HTTPError as e:
if e.response.status_code >= 500 and attempts_left > 0:
logging.info(
'Server error ({} attempts left). Timeouts and retries '
'in {}.'.format(attempts_left, self.retry_timeout))
time.sleep(self.retry_timeout)
else:
raise
else:
break
return result
def _throttled_call(self, service, request, *args, **kwargs):
""" Wrap `request` with auto-throttle.
Parameters
----------
service : str
OPS-service to call.
request : Callable
Function which calls the OPS-API using `*args` and `**kwargs`.
*args
Positional arguments passed to `request`
**kwargs
Keyword arguments passed to :py:`request`
Returns
-------
requests.Response
"""
logging.debug('Throttle with: {}'.format(service))
if service not in self._last_call:
raise ValueError('Invalid service: {}'.format(service))
next_call = self._next_call[service]
now = datetime.now()
if next_call is not None and now < next_call:
diff = next_call - now
time.sleep(diff.seconds + diff.microseconds / 1e6)
self._last_call[service] = datetime.now()
response = request(*args, **kwargs)
try:
response.raise_for_status()
except requests.HTTPError as error:
if error.response.status_code == requests.codes.forbidden:
raise_for_quota_rejection(error.response)
raise error # Non-quota related rejection.
# The OPS-API sets its request-limit by minute, which is updated
# for each call. Therefore, the throttling delay is set to
# 60 sec / calls per minute.
throttle_header = response.headers['X-Throttling-Control']
pattern = r'{}=([a-z]+):(\d+)'.format(service)
color, n_str = re.search(pattern, throttle_header).groups()
n_per_min = int(n_str)
delay = 60.0 / n_per_min # Delay in seconds.
seconds = int(delay)
milliseconds = int((delay - seconds) * 1e3)
next_delta = timedelta(seconds=seconds, milliseconds=milliseconds)
self._next_call[service] = self._last_call[service] + next_delta
# Update quota used.
q_per_h = int(response.headers['X-IndividualQuotaPerHour-Used'])
q_per_w = int(response.headers['X-RegisteredQuotaPerWeek-Used'])
self.quota_per_hour_used = q_per_h
self.quota_per_week_used = q_per_w
return response
def _make_headers(self, extras=None):
""" Prepare request headers.
Parameters
----------
extras : dict, optional
Extra headers which should be used.
Returns
-------
dict
"""
headers = {'Accept': self.accept_type}
if self.token is not None:
if self.token is None or datetime.now() > self.token.expires:
# Refresh token if is expired or missing.
self.token = self.authenticate()
headers['Authorization'] = 'Bearer {}'.format(self.token.token)
headers.update(extras or dict())
return headers
def build_ops_url(service, reference_type=None, id_type=None,
endpoint=None, options=None):
""" Prepare an url for calling the OPS-API.
If `only_input_format` is False the URL will be formatted as::
:py:const:`URL_PREFIX`/service/reference-type/inputformat/input/[endpoint]/[constituent(s)]
Otherwise it will be formatted::
:py:const:`URL_PREFIX`/service/reference-type/inputformat/[endpoint]/[constituent(s)]
Parameters
----------
service : Services
OPS-service.
reference_type : ReferenceType, optional
Reference type to call.
endpoint : str, optional
Optional endpoint.
options : list, optional
Optional constituents.
Returns
-------
url : str
Formatted url.
"""
url_parts = [
URL_PREFIX,
service.value if service is not None else None,
reference_type.value if reference_type is not None else None,
id_type if input is not None else None,
endpoint,
','.join(options) if options is not None else None
]
present_parts = filter(None, url_parts)
url = '/'.join(present_parts)
logging.debug('Built url: {}'.format(url))
return url
def raise_for_quota_rejection(response):
""" Check the response for "X-Rejection-Reason"-header and
raise if quota exceeded.
Parameters
----------
response : requests.Response
Response-object to check.
Returns
-------
None
If quota isn't exceeded.
Raises
------
QuotaPerWeekExceeded
If rejection header is "RegisteredQuotaPerWeek".
QuotaPerHourExceeded
If rejection header is "IndividualQuotaPerHour"
"""
if response.status_code != requests.codes.forbidden:
return
rejection = response.headers.get('X-Rejection-Reason', None)
if rejection is None:
return
if rejection == 'RegisteredQuotaPerWeek':
logging.error('quota per week exceeded')
raise QuotaPerWeekExceeded(response.text)
elif rejection == 'IndividualQuotaPerHour':
logging.error('quota per hour exceeded')
raise QuotaPerHourExceeded(response.text)
else:
# Anonymous user-headers skipped since anonymous use will be
# discontinued and this package does not support anyways.
return
def _check_epoclient_input(accept_type, key, secret, cache,
cache_kwargs, max_retries, retry_timeout):
""" Check input for :class:`EPOClient`.
Parameters
----------
accept_type : str
Http accept type.
key : str, optional
EPO OPS user key.
secret : str, optional
EPO OPS user secret.
cache : bool
If True, try to use `requests_cache` for caching. Default False.
cache_kwargs : dict, optional.
Passed to :py:func:`requests_cache.install_cache` as keyword
arguments if provided.
max_retries : int
Number of allowed retries at 500-responses.
retry_timeout : float
Timeout in seconds between calls when retrying at 500-responses.
Raises
-------
AssertionError
If input is bad.
"""
assert isinstance(accept_type, str), 'accept_type must be str'
assert isinstance(key, str), 'key must be str'
assert isinstance(secret, str), 'secret must be str'
assert isinstance(cache, bool), 'cache must be boolean'
assert isinstance(cache_kwargs, dict) or cache_kwargs is None, \
'cache_kwargs must be dict or None'
assert isinstance(max_retries, int) and max_retries >= 0, \
'max_retries must be non-negative integer'
assert isinstance(retry_timeout, (float, int)) and max_retries >= 0,\
'retry_timeout must be non-negative number'
|
clicumu/epo_utils
|
epo_utils/api.py
|
Python
|
mit
| 20,708 | 0.000145 |
import os
import re
import sys
import json
import shlex
import logging
import inspect
import functools
import importlib
from pprint import pformat
from collections import namedtuple
from traceback import format_tb
from requests.exceptions import RequestException
import strutil
from cachely.loader import Loader
from .lib import library, interpreter_library, DataProxy
from . import utils
from . import core
from . import exceptions
logger = logging.getLogger(__name__)
BASE_LIBS = ['snagit.lib.text', 'snagit.lib.lines', 'snagit.lib.soup']
ReType = type(re.compile(''))
class Instruction(namedtuple('Instruction', 'cmd args kws line lineno')):
'''
``Instruction``'s take the form::
command [arg [arg ...]] [key=arg [key=arg ...]]
Where ``arg`` can be one of: single quoted string, double quoted string,
digit, True, False, None, or a simple, unquoted string.
'''
values_pat = r'''
[rj]?'(?:(\'|[^'])*?)' |
[r]?"(?:(\"|[^"])*?)" |
(\d+) |
(True|False|None) |
([^\s,]+)
'''
args_re = re.compile(
r'''^(
(?P<kwd>\w[\w\d-]*)=(?P<val>{0}) |
(?P<arg>{0}|([\s,]+))
)\s*'''.format(values_pat),
re.VERBOSE
)
value_dict = {'True': True, 'False': False, 'None': None}
def __str__(self):
def _repr(w):
if isinstance(w, ReType):
return 'r"{}"'.format(str(w.pattern))
return repr(w)
return '{}{}{}'.format(
self.cmd.upper(),
' {}'.format(
' '.join([_repr(c) for c in self.args]) if self.args else ''
),
' {}'.format(' '.join(
'{}={}'.format(k, _repr(v)) for k, v in self.kws.items()
) if self.kws else '')
)
@classmethod
def get_value(cls, s):
if s.isdigit():
return int(s)
elif s in cls.value_dict:
return cls.value_dict[s]
elif s.startswith(('r"', "r'")):
return re.compile(utils.escaped(s[2:-1]))
elif s.startswith("j'"):
return json.loads(utils.escaped(s[2:-1]))
elif s.startswith(('"', "'")):
return utils.escaped(s[1:-1])
else:
return s.strip()
@classmethod
def parse(cls, line, lineno):
args = []
kws = {}
cmd, text = strutil.splitter(line, expected=2, strip=True)
cmd = cmd.lower()
while text:
m = cls.args_re.search(text)
if not m:
break
gdict = m.groupdict()
kwd = gdict.get('kwd')
if kwd:
kws[kwd] = cls.get_value(gdict.get('val', ''))
else:
arg = gdict.get('arg', '').strip()
if arg != ',':
args.append(cls.get_value(arg))
text = text[len(m.group()):]
if text:
raise SyntaxError(
'Syntax error: "{}" (line {})'.format(text, lineno)
)
return cls(cmd, args, kws, line, lineno)
def lexer(code, lineno=0):
'''
Takes the script source code, scans it, and lexes it into
``Instructions``
'''
for chars in code.splitlines():
lineno += 1
line = chars.rstrip()
if not line or line.lstrip().startswith('#'):
continue
logger.debug('Lexed {} byte(s) line {}'.format(len(line), chars))
yield Instruction.parse(line, lineno)
def load_libraries(extensions=None):
if isinstance(extensions, str):
extensions = [extensions]
libs = BASE_LIBS + (extensions or [])
for lib in libs:
importlib.import_module(lib)
class Interpreter:
def __init__(
self,
contents=None,
loader=None,
use_cache=False,
do_pm=False,
extensions=None
):
self.use_cache = use_cache
self.loader = loader if loader else Loader(use_cache=use_cache)
self.contents = Contents(contents)
self.do_debug = False
self.do_pm = do_pm
self.instructions = []
load_libraries(extensions)
def load_sources(self, sources, use_cache=None):
use_cache = self.use_cache if use_cache is None else bool(use_cache)
contents = self.loader.load_sources(sources)
self.contents.update([
ct.decode() if isinstance(ct, bytes) else ct for ct in contents
])
def listing(self, linenos=False):
items = []
for instr in self.instructions:
items.append('{}{}'.format(
'{} '.format(instr.lineno) if linenos else '',
instr.line
))
return items
def lex(self, code):
lineno = self.instructions[-1].lineno if self.instructions else 0
instructions = list(lexer(code, lineno))
self.instructions.extend(instructions)
return instructions
def execute(self, code):
for instr in self.lex(code):
try:
self._execute_instruction(instr)
except exceptions.ProgramWarning as why:
print(why)
return self.contents
def _load_handler(self, instr):
if instr.cmd in library.registry:
func = library.registry[instr.cmd]
return self.contents, (func, instr.args, instr.kws)
elif instr.cmd in interpreter_library.registry:
func = interpreter_library.registry[instr.cmd]
return func, (self, instr.args, instr.kws)
raise exceptions.ProgramWarning(
'Unknown instruction (line {}): {}'.format(instr.lineno, instr.cmd)
)
def _execute_instruction(self, instr):
logger.debug('Executing {}'.format(instr.cmd))
handler, args = self._load_handler(instr)
do_debug, self.do_debug = self.do_debug, False
if do_debug:
utils.pdb.set_trace()
try:
handler(*args)
except Exception:
exc, value, tb = sys.exc_info()
if self.do_pm:
logger.error(
'Script exception, line {}: {} (Entering post_mortem)'.format( # noqa
instr.lineno,
value
)
)
utils.pdb.post_mortem(tb)
else:
raise
def execute_script(filename, contents=''):
code = utils.read_file(filename)
return execute_code(code, contents)
def execute_code(code, contents=''):
intrep = Interpreter(contents)
return str(intrep.execute(code))
class Contents:
def __init__(self, contents=None):
self.stack = []
self.set_contents(contents)
def __iter__(self):
return iter(self.contents)
def __len__(self):
return len(self.contents)
def __str__(self):
return '\n'.join(str(c) for c in self)
# def __getitem__(self, index):
# return self.contents[index]
def pop(self):
if self.stack:
self.contents = self.stack.pop()
def __call__(self, func, args, kws):
contents = []
for data in self:
result = func(data, args, kws)
contents.append(result)
self.update(contents)
def merge(self):
if self.contents:
first = self.contents[0]
data = first.merge(self.contents)
self.update([data])
def update(self, contents):
if self.contents:
self.stack.append(self.contents)
self.set_contents(contents)
def set_contents(self, contents):
self.contents = []
if isinstance(contents, (str, bytes)):
contents = [contents]
contents = contents or []
for ct in contents:
if isinstance(ct, (str, bytes)):
ct = DataProxy(ct)
self.contents.append(ct)
|
dakrauth/snarf
|
snagit/core.py
|
Python
|
mit
| 7,965 | 0.000126 |
import unittest
import os
from sqltxt.table import Table
from sqltxt.column import Column, ColumnName, AmbiguousColumnNameError
from sqltxt.expression import Expression
class TableTest(unittest.TestCase):
def setUp(self):
self.data_path = os.path.join(os.path.dirname(__file__), '../data')
table_header = ["col_a", "col_b"]
table_contents = """1,1
2,3
3,2"""
self.table_a = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e "{0}"'.format(table_contents),
columns = table_header
)
table_header = ["col_a", "col_b"]
table_contents = """1,w
2,x
2,y
5,z"""
self.table_b = Table.from_cmd(
name = 'table_b',
cmd = 'echo -e "{0}"'.format(table_contents),
columns = table_header
)
def test_subset_rows(self):
conditions = [
[Expression('col_b', '==', '1'), 'or', Expression('col_a', '==', '2')]
]
self.table_a.subset_rows(conditions)
cmds_actual = self.table_a.cmds
cmds_expected = [
'echo -e "1,1\n2,3\n3,2"',
"awk -F',' 'OFS=\",\" { if (($2 == 1 || $1 == 2)) { print $1,$2 } }'"]
self.assertEqual(cmds_actual, cmds_expected)
def test_order_columns(self):
col_name_order = [ColumnName('col_b'), ColumnName('col_a')]
self.table_a.order_columns(col_name_order)
cmds_actual = self.table_a.cmds
cmds_expected = ['echo -e "1,1\n2,3\n3,2"', "awk -F',' 'OFS=\",\" { print $2,$1 }'"]
self.assertEqual(cmds_actual, cmds_expected)
def test_sort(self):
sort_by_col_names = [ColumnName('col_a'), ColumnName('col_b')]
self.table_a.sort(sort_by_col_names)
cmds_actual = self.table_a.cmds
cmds_expected = ['echo -e "1,1\n2,3\n3,2"', "sort -t, -k 1,1 -k 2,2"]
self.assertEqual(cmds_actual, cmds_expected)
sort_by_cols = [self.table_a.get_column_for_name(cn) for cn in sort_by_col_names]
self.assertEqual(self.table_a.sorted_by, sort_by_cols)
def test_is_sorted_by(self):
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = ['col_a', 'col_b'])
table_from_cmd.sorted_by = [Column('table_a.col_a'), Column('table_a.col_b')]
self.assertTrue(table_from_cmd.is_sorted_by([0]))
self.assertFalse(table_from_cmd.is_sorted_by([1]))
self.assertTrue(table_from_cmd.is_sorted_by([0,1]))
def test_get_column_for_name_raises_on_ambiguity(self):
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = ['col_a', 'col_a'])
with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'):
table_from_cmd.get_column_for_name(ColumnName('col_a'))
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = ['ta.col_a', 'tb.col_a'])
with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'):
table_from_cmd.get_column_for_name(ColumnName('col_a'))
first_column = Column('ta.col_a')
first_column.add_name('col_alpha')
second_column = Column('tb.col_a')
table_from_cmd = Table.from_cmd(
name = 'table_a',
cmd = 'echo -e ""',
columns = [first_column, second_column])
with self.assertRaisesRegexp(AmbiguousColumnNameError, 'Ambiguous column reference'):
table_from_cmd.get_column_for_name(ColumnName('col_a'))
def test_sample_rows(self):
self.table_a.sample_rows(1)
cmds_actual = self.table_a.cmds
cmds_expected = ['echo -e "1,1\n2,3\n3,2"',
"""awk -v seed=$RANDOM -v n={0} '
BEGIN {{ srand(seed) }}
NR <= n {{ reservoir[NR] = $0 }}
NR > n {{ M = int(rand() * NR) + 1; if (M <= n) {{ reservoir[M] = $0 }}}}
END {{ for (key in reservoir) {{ print reservoir[key] }}}}'""".format(1)
]
self.assertEqual(cmds_actual, cmds_expected)
def test_get_cmd_str(self):
table_from_file = Table.from_file_path(os.path.join(self.data_path, 'table_a.txt'))
# output from a file-backed Table to STDOUT
cmd_actual = table_from_file.get_cmd_str()
cmd_expected = 'tail -n+2 {}/table_a.txt'.format(self.data_path)
self.assertEqual(cmd_actual, cmd_expected)
table_from_cmd = Table.from_cmd(
'table_a',
cmd = 'echo -e "1,2,3,4"',
columns = ['col_a', 'col_b', 'col_c', 'col_d'])
# output from a command-backed Table to STDOUT
cmd_actual = table_from_cmd.get_cmd_str()
cmd_expected = 'echo -e "1,2,3,4"'
self.assertEqual(cmd_actual, cmd_expected)
# add a command, then output
table_from_cmd.cmds += ['sort']
# to STDOUT
cmd_actual = table_from_cmd.get_cmd_str()
cmd_expected = 'echo -e "1,2,3,4" | sort'
self.assertEqual(cmd_actual, cmd_expected)
|
shahin/sqltxt
|
tests/unit/table_test.py
|
Python
|
mit
| 5,179 | 0.013516 |
__author__ = 'mramire8'
from collections import defaultdict
import copy
import numpy as np
from sklearn.linear_model import LogisticRegression
from baselearner import BaseLearner
from sklearn.feature_extraction.text import CountVectorizer
from datautil.textutils import StemTokenizer
show_utilitly = False
class RandomSamplingLearner(BaseLearner):
def __init__(self, model=None, accuracy_model=None, budget=None, seed=None):
super(RandomSamplingLearner, self).__init__(model=model, accuracy_model=accuracy_model, budget=budget,
seed=seed)
def pick_next(self, pool=None, k=1):
try:
list_pool = list(pool.remaining)
indices = self.randgen.permutation(len(pool.remaining))
return [list_pool[index] for index in indices[:k]]
except Exception, err:
print err
raise Exception("Invalid pool object")
#return random.sample(pool, k)
def __str__(self):
string = self.__class__.__name__ #% "(model=" % self.current_model % ", accuracy_model="% self.accuracy_model #\
#% ", budget=" % \
#str(self.budget) % \
#", seed=)"
##", seed=" % str(self.seed) % ")"
return string
def __repr__(self):
string = self.__class__.__name__ % "(model=" % self.current_model % ", accuracy_model=" % self.accuracy_model \
% ", budget=" % self.budget % ", seed=" % self.seed % ")"
return string
class BootstrapRandom(BaseLearner):
def __init__(self, random_state=None):
#model=None, cost_model=None, accuracy_model=None, budget=None, seed=1234567
super(BootstrapRandom, self).__init__(seed=random_state)
self.bootstrap = self.pick_next
def pick_next(self, pool=None, k=1):
try:
list_pool = list(pool.remaining)
indices = self.randgen.permutation(len(pool.remaining))
return [list_pool[index] for index in indices[:k]]
except Exception, err:
#raise Exception("Invalid pool object")
print "*" * 10
print "Invalid pool object"
print err
class BootstrapFromEach(BaseLearner):
def __init__(self, seed):
super(BootstrapFromEach, self).__init__(seed=seed)
def bootstrap(self, pool, k=2):
k = int(k / 2)
data = defaultdict(lambda: [])
for i in pool.remaining:
data[pool.target[i]].append(i)
chosen = []
for label in data.keys():
candidates = data[label]
indices = self.randgen.permutation(len(candidates))
chosen.extend([candidates[index] for index in indices[:k]])
return chosen
class UncertaintyLearner(BaseLearner):
# def __init__(self, seed=0, subpool=None):
# super(UncertaintyLearner, self).__init__(seed=seed)
# self.subpool = subpool
def __init__(self, model=None, accuracy_model=None, budget=None, seed=None, cost_model=None, subpool=None):
super(UncertaintyLearner, self).__init__(model=model, accuracy_model=accuracy_model, budget=budget, seed=seed,
cost_model=cost_model, subpool=subpool)
self.model = model
def pick_next(self, pool=None, k=1):
list_pool = list(pool.remaining)
indices = self.randgen.permutation(len(pool.remaining))
remaining = [list_pool[index] for index in indices]
candidates = [c for c in pool.data]
uncertainty = []
if self.subpool is None:
self.subpool = len(pool.remaining)
for i in remaining[:self.subpool]:
data_point = candidates[i]
prob = self.model.predict_proba(data_point)[0]
maxprob = max(prob)
uncertainty.append(1 - maxprob)
sorted_ind = np.argsort(uncertainty)[::-1]
chosen = [remaining[x] for x in sorted_ind][:k]
return chosen
def train(self, train_data=None, train_labels=None):
'''
Return a new copy of a retrain classifier based on paramters. If no data, return an un-trained classifier
:param train_data: training data
:param train_labels: target values
:return: trained classifier
'''
clf = super(UncertaintyLearner, self).train(train_data=train_data, train_labels=train_labels)
self.model = clf
return clf
def __str__(self):
string = "{0}(seed={seed})".format(self.__class__.__name__, seed=self.seed)
return string
def __repr__(self):
string = self.__class__.__name__ % "(model=" % self.current_model % ", accuracy_model=" % self.accuracy_model \
% ", budget=" % self.budget % ", seed=" % self.seed % ")"
return string
class AnytimeLearner(BaseLearner):
# def __init__(self, seed=0, subpool=None):
# super(UncertaintyLearner, self).__init__(seed=seed)
# self.subpool = subpool
def __init__(self, model=None, accuracy_model=None, budget=None, seed=None, vcn=None, subpool=None,
cost_model=None):
super(AnytimeLearner, self).__init__(model=model, accuracy_model=accuracy_model, budget=budget, seed=seed,
cost_model=cost_model)
self.model = model
self.neutral_model = LogisticRegression(penalty='l1')
self.base_neutral = LogisticRegression(penalty='l1')
self.neutral_train = None
self.vct_neutral = None
self.neutral_labels = np.array([])
self.vcn = vcn
self._kvalues = [10, 25, 50, 75, 100]
self.subpool = subpool
def pick_next(self, pool=None, step_size=1):
list_pool = list(pool.remaining)
indices = self.randgen.permutation(len(pool.remaining))
remaining = [list_pool[index] for index in indices]
uncertainty = []
if self.subpool is None:
self.subpool = len(pool.remaining)
for i in remaining[:self.subpool]:
# data_point = candidates[i]
utility, k, unc = self.x_utility(pool.data[i], pool.text[i])
if show_utilitly:
print "%s\t %s \t %.3f" % (i, k, utility)
uncertainty.append([utility, k, unc])
uncertainty = np.array(uncertainty)
unc_copy = uncertainty[:, 0]
sorted_ind = np.argsort(unc_copy, axis=0)[::-1]
chosen = [[remaining[x], uncertainty[x, 1]] for x in sorted_ind[:int(step_size)]] #index and k of chosen
util = [uncertainty[x] for x in sorted_ind[:int(step_size)]]
# print util
## chosen returns the chosen and the k value associated with it
return chosen, util
def x_utility(self, instance, instance_text):
prob = self.model.predict_proba(instance)
unc = 1 - prob.max()
utility = np.array([[self.obj_fn_p2(xik, k) * unc, k] for k, xik in self.getk(instance_text)])
order = np.argsort(utility[:, 0], axis=0)[::-1] ## descending order
utility_sorted = utility[order, :]
# print format_list(utility_sorted)
if show_utilitly:
print "\t{0:.5f}".format(unc),
return utility_sorted[0, 0], utility_sorted[0, 1], unc ## return the max
def obj_fn_p2(self, instance_k, k):
## change the text to use the vectorizer
# xik = self.vct_neutral.transform([instance_k])
xik = self.vcn.transform([instance_k])
# neu = self.neutral_model.predict_proba(xik) if self.neutral_model is not None else [1]
neu = 1
if self.neutral_model is not None:
neu = self.neutral_model.predict_proba(xik)[0, 1] # probability of being not-neutral
costk = self.predict_cost(k)
utility = neu / costk ## u(x) * N(xk) / C(xk)
# print utility
if show_utilitly:
print "\t{0:.3f}".format(neu),
print "\t{0:.3f}".format(costk),
return utility
def getk(self, doc_text):
'''
Return a set of subinstance of k words in classifier format
:param doc_text:
:return: set of subinstances of doc_text of fixk size
'''
qk = []
analize = self.vcn.build_tokenizer()
for k in self._kvalues:
qk.append(" ".join(analize(doc_text)[0:k]))
return zip(self._kvalues, qk)
def update_neutral(self, train_data, train_labels):
## add the neutral instance to the neutral set
## TODO: create the neutral dataset
## recompute neutral data
## eliminate features using the student model
# try:
# coef = self.model.coef_[0]
# names = self.vcn.get_feature_names()
#
# vocab = [names[j] for j in np.argsort(coef)[::-1] if coef[j] != 0]
#
# self.vct_neutral = CountVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=False, ngram_range=(1, 3),
# token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer(), vocabulary=vocab)
#
# train_x = self.vct_neutral.fit_transform(train_data)
# if not isinstance(train_data, list):
try:
clf = copy.copy(self.base_neutral)
clf.fit(train_data, train_labels)
# else:
# clf = None
except ValueError:
clf = None
# print clf
return clf
def train_all(self, train_data=None, train_labels=None, neu_train=None, neu_labels=None):
'''
Return a new copy of a retrain classifier based on paramters. If no data, return an un-trained classifier
:param train_data: training data
:param train_labels: target values
:return: trained classifier
@param neu_train:
@param neu_labels:
'''
clf = super(AnytimeLearner, self).train(train_data=train_data, train_labels=train_labels)
self.model = clf
self.neutral_model = self.update_neutral(neu_train, neu_labels)
return clf
def __str__(self):
string = "{0}(seed={seed})".format(self.__class__.__name__, seed=self.seed)
return string
def __repr__(self):
string = self.__class__.__name__ % "(model=" % self.current_model % ", accuracy_model=" % self.accuracy_model \
% ", budget=" % self.budget % ", seed=" % self.seed % ")"
return string
class AnytimeLearnerZeroUtility(AnytimeLearner):
def __init__(self, model=None, accuracy_model=None, budget=None, seed=None, vcn=None, subpool=None,
cost_model=None):
super(AnytimeLearnerZeroUtility, self).__init__(model=model, accuracy_model=accuracy_model, budget=budget, seed=seed,
cost_model=cost_model, vcn=vcn, subpool=subpool)
def x_utility(self, instance, instance_text):
utility = np.array([[self.obj_fn_p2(xik, k), k] for k, xik in self.getk(instance_text)])
order = np.argsort(utility[:, 0], axis=0)[::-1] ## descending order
utility_sorted = utility[order, :]
if show_utilitly:
print "\t{0:.5f}".format(1),
return utility_sorted[0, 0], utility_sorted[0, 1], 1 ## return the max
def obj_fn_p2(self, instance_k, k):
## change the text to use the vectorizer
xik = self.vcn.transform([instance_k])
# neu = self.neutral_model.predict_proba(xik) if self.neutral_model is not None else [1]
neu = 1
if self.neutral_model is not None:
neu = self.neutral_model.predict_proba(xik)[0, 1] # probability of being not-neutral
costk = self.predict_cost(k)
utility = neu / costk ## N(xk) / C(xk)
# print utility
if show_utilitly:
print "\t{0:.3f}".format(neu),
print "\t{0:.3f}".format(costk),
return utility
class AnytimeLearnerDiff(BaseLearner):
# def __init__(self, seed=0, subpool=None):
# super(UncertaintyLearner, self).__init__(seed=seed)
# self.subpool = subpool
def __init__(self, model=None, accuracy_model=None, budget=None, seed=None, vcn=None, subpool=None,
cost_model=None, lambda_value=None):
super(AnytimeLearnerDiff, self).__init__(model=model, accuracy_model=accuracy_model, budget=budget, seed=seed,
cost_model=cost_model)
self.model = model
self.neutral_model = LogisticRegression(penalty='l1')
self.base_neutral = LogisticRegression(penalty='l1')
self.neutral_train = None
self.vct_neutral = None
self.neutral_labels = np.array([])
self.vcn = vcn
self._kvalues = [10, 25, 50, 75, 100]
self.subpool = subpool
self.lambda_value = lambda_value
def pick_next(self, pool=None, step_size=1):
list_pool = list(pool.remaining)
indices = self.randgen.permutation(len(pool.remaining))
remaining = [list_pool[index] for index in indices]
uncertainty = []
if self.subpool is None:
self.subpool = len(pool.remaining)
for i in remaining[:self.subpool]:
# data_point = candidates[i]
utility, k, unc = self.x_utility(pool.data[i], pool.text[i])
if show_utilitly:
print "%s\t %s \t %.3f" % (i, k, utility)
uncertainty.append([utility, k, unc])
uncertainty = np.array(uncertainty)
unc_copy = uncertainty[:, 0]
sorted_ind = np.argsort(unc_copy, axis=0)[::-1]
chosen = [[remaining[x], uncertainty[x, 1]] for x in sorted_ind[:int(step_size)]]
util = [uncertainty[x] for x in sorted_ind[:int(step_size)]]
# print util
## chosen returns the chosen and the k value associated with it
return chosen, util
def x_utility(self, instance, instance_text):
prob = self.model.predict_proba(instance)
unc = 1 - prob.max()
utility = np.array([[self.obj_fn_p2(unc, xik, k), k] for k, xik in self.getk(instance_text)])
order = np.argsort(utility[:, 0], axis=0)[::-1] ## descending order
utility_sorted = utility[order, :]
# print format_list(utility_sorted)
if show_utilitly:
print "\t{0:.5f}".format(unc),
return utility_sorted[0, 0], utility_sorted[0, 1], unc ## return the max
def obj_fn_p2(self, uncertainty, instance_k, k):
## change the text to use the vectorizer
# xik = self.vct_neutral.transform([instance_k])
xik = self.vcn.transform([instance_k])
# neu = self.neutral_model.predict_proba(xik) if self.neutral_model is not None else [1]
neu = 1
if self.neutral_model is not None:
neu = self.neutral_model.predict_proba(xik)[0, 1] # probability of being not-neutral
costk = self.predict_cost(k)
utility = (uncertainty * neu) - (self.lambda_value *costk) ## u(x) * N(xk) / C(xk)
# print utility
if show_utilitly:
print "\t{0:.3f}".format(neu),
print "\t{0:.3f}".format(costk),
return utility
def getk(self, doc_text):
'''
Return a set of subinstance of k words in classifier format
:param doc_text:
:return: set of subinstances of doc_text of fixk size
'''
qk = []
analize = self.vcn.build_tokenizer()
for k in self._kvalues:
qk.append(" ".join(analize(doc_text)[0:k]))
return zip(self._kvalues, qk)
def update_neutral(self, train_data, train_labels):
## add the neutral instance to the neutral set
## TODO: create the neutral dataset
## recompute neutral data
## eliminate features using the student model
# try:
# coef = self.model.coef_[0]
# names = self.vcn.get_feature_names()
#
# vocab = [names[j] for j in np.argsort(coef)[::-1] if coef[j] != 0]
#
# self.vct_neutral = CountVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=False, ngram_range=(1, 3),
# token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer(), vocabulary=vocab)
#
# train_x = self.vct_neutral.fit_transform(train_data)
if not isinstance(train_data, list):
clf = copy.copy(self.base_neutral)
clf.fit(train_data, train_labels)
else:
clf = None
# except ValueError:
# clf = None
return clf
def train_all(self, train_data=None, train_labels=None, neu_train=None, neu_labels=None):
'''
Return a new copy of a retrain classifier based on paramters. If no data, return an un-trained classifier
:param train_data: training data
:param train_labels: target values
:return: trained classifier
@param neu_train:
@param neu_labels:
'''
clf = super(AnytimeLearnerDiff, self).train(train_data=train_data, train_labels=train_labels)
self.model = clf
self.neutral_model = self.update_neutral(neu_train, neu_labels)
return clf
def __str__(self):
string = "{0}(model={1}, neutral-model={2}, subpool={3}, lambda={4})".format(self.__class__.__name__,self.model,self.neutral_model, self.subpool, self.lambda_value)
return string
def __repr__(self):
string = "{0}(model={1}, neutral-model={2}, subpool={3}, lambda={4})".format(self.__class__.__name__,self.model,self.neutral_model, self.subpool, self.lambda_value)
return string
class AnytimeLearnerV2(BaseLearner):
def __init__(self, model=None, accuracy_model=None, budget=None, seed=None, vcn=None, subpool=None,
cost_model=None):
super(AnytimeLearnerV2, self).__init__(model=model, accuracy_model=accuracy_model, budget=budget, seed=seed,
cost_model=cost_model)
self.model = model
self.neutral_model = LogisticRegression(penalty='l1')
self.base_neutral = LogisticRegression(penalty='l1')
self.neutral_train = None
self._kvalues = [10, 25, 50, 75, 100]
self.subpool = subpool
def pick_next(self, pool=None, alldata=None, step_size=1):
list_pool = list(pool.remaining)
indices = self.randgen.permutation(len(pool.remaining))
remaining = [list_pool[index] for index in indices]
uncertainty = []
if self.subpool is None:
self.subpool = len(pool.remaining)
for i in remaining[:self.subpool]:
utility, k = self.x_utility(pool.data[i], i, alldata)
uncertainty.append([utility, k])
uncertainty = np.array(uncertainty)
unc_copy = uncertainty[:, 0]
sorted_ind = np.argsort(unc_copy, axis=0)[::-1]
chosen = [[remaining[x], uncertainty[x, 1]] for x in sorted_ind[:int(step_size)]]
## chosen returns the chosen and the k value associated with it
return chosen
def x_utility(self, instance, instance_index, alldata):
prob = self.model.predict_proba(instance)
unc = 1 - prob.max()
utility = np.array([[self.obj_fn_p2(xik, k) * unc, k] for k, xik in self.getk(instance_index, alldata)])
order = np.argsort(utility[:, 0], axis=0)[::-1] ## descending order
utility_sorted = utility[order, :]
# print utility_sorted
return utility_sorted[0, 0], utility_sorted[0, 1] ## return the max
def obj_fn_p2(self, instance_k, k):
## change the text to use the vectorizer
xik = self.vct_neutral.transform([instance_k])
# neu = self.neutral_model.predict_proba(xik) if self.neutral_model is not None else [1]
neu = 1
if self.neutral_model is not None:
neu = self.neutral_model.predict_proba(xik)[0, 1] # probability of being not-neutral
costk = self.predict_cost(k)
utility = neu / costk ## u(x) * N(xk) / C(xk)
# print utility
return utility
def getk(self, doc_index, alldata):
'''
Return a set of subinstance of k words in classifier format
:param doc_text:
:return: set of subinstances of doc_text of fixk size
'''
qk = []
for k in self._kvalues:
qk.append(alldata[k].bow.tocsr()[doc_index])
return zip(self._kvalues, qk)
def update_neutral(self, train_data, train_labels):
## add the neutral instance to the neutral set
## TODO: create the neutral dataset
## recompute neutral data
## eliminate features using the student model
try:
coef = self.model.coef_[0]
vocab = [j for j in np.argsort(coef)[::-1] if coef[j] != 0]
clf = copy.copy(self.base_neutral)
clf.fit(train_data.tocsc()[:, vocab], train_labels)
except Exception:
clf = None
return clf
def train_all(self, train_data=None, train_labels=None, neu_train=None, neu_labels=None):
'''
Return a new copy of a retrain classifier based on paramters. If no data, return an un-trained classifier
:param train_data: training data
:param train_labels: target values
:return: trained classifier
@param neu_train:
@param neu_labels:
'''
clf = super(AnytimeLearnerV2, self).train(train_data=train_data, train_labels=train_labels)
self.model = clf
self.neutral_model = self.update_neutral(neu_train, neu_labels)
return clf
def __str__(self):
string = "{0}(seed={seed})".format(self.__class__.__name__, seed=self.seed)
return string
def __repr__(self):
string = self.__class__.__name__ % "(model=" % self.current_model % ", accuracy_model=" % self.accuracy_model \
% ", budget=" % self.budget % ", seed=" % self.seed % ")"
return string
|
mramire8/active
|
strategy/randomsampling.py
|
Python
|
apache-2.0
| 21,847 | 0.006454 |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import random
import string
import time
import unittest
from google.appengine.ext import db
from mapreduce import control
from mapreduce import hooks
from mapreduce import model
from mapreduce import test_support
from testlib import testutil
from mapreduce.api import map_job
def random_string(length):
"""Generate a random string of given length."""
return "".join(
random.choice(string.letters + string.digits) for _ in range(length))
class TestEntity(db.Model):
"""Test entity class."""
class TestHooks(hooks.Hooks):
"""Test hooks class."""
enqueue_kickoff_task_calls = []
def enqueue_kickoff_task(self, task, queue_name):
TestHooks.enqueue_kickoff_task_calls.append((task, queue_name))
def test_handler(entity):
"""Test handler function."""
pass
class ControlTest(testutil.HandlerTestBase):
"""Tests for control module."""
QUEUE_NAME = "crazy-queue"
def setUp(self):
testutil.HandlerTestBase.setUp(self)
TestHooks.enqueue_kickoff_task_calls = []
def get_mapreduce_spec(self, task):
"""Get mapreduce spec form kickoff task payload."""
payload = test_support.decode_task_payload(task)
return model.MapreduceSpec.from_json_str(payload["mapreduce_spec"])
def validate_map_started(self, mapreduce_id, queue_name=None):
"""Tests that the map has been started."""
queue_name = queue_name or self.QUEUE_NAME
self.assertTrue(mapreduce_id)
# Note: only a kickoff job is pending at this stage, shards come later.
tasks = self.taskqueue.GetTasks(queue_name)
self.assertEquals(1, len(tasks))
# Checks that tasks are scheduled into the future.
task = tasks[0]
self.assertEqual("/mapreduce_base_path/kickoffjob_callback/" + mapreduce_id,
task["url"])
handler = test_support.execute_task(task)
self.assertEqual(mapreduce_id, handler.request.get("mapreduce_id"))
state = model.MapreduceState.get_by_job_id(mapreduce_id)
params = map_job.JobConfig._get_default_mr_params()
params.update({"foo": "bar",
"base_path": "/mapreduce_base_path",
"queue_name": queue_name})
self.assertEqual(state.mapreduce_spec.params, params)
return task["eta"]
def testStartMap(self):
"""Test start_map function.
Most of start_map functionality is already tested by handlers_test.
Just a smoke test is enough.
"""
TestEntity().put()
shard_count = 4
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count,
mapreduce_parameters={"foo": "bar"},
base_path="/mapreduce_base_path",
queue_name=self.QUEUE_NAME)
self.validate_map_started(mapreduce_id)
def testStartMap_Countdown(self):
"""Test that MR can be scheduled into the future.
Most of start_map functionality is already tested by handlers_test.
Just a smoke test is enough.
"""
TestEntity().put()
# MR should be scheduled into the future.
now_sec = long(time.time())
shard_count = 4
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count,
mapreduce_parameters={"foo": "bar"},
base_path="/mapreduce_base_path",
queue_name=self.QUEUE_NAME,
countdown=1000)
task_eta = self.validate_map_started(mapreduce_id)
eta_sec = time.mktime(time.strptime(task_eta, "%Y/%m/%d %H:%M:%S"))
self.assertTrue(now_sec + 1000 <= eta_sec)
def testStartMap_Eta(self):
"""Test that MR can be scheduled into the future.
Most of start_map functionality is already tested by handlers_test.
Just a smoke test is enough.
"""
TestEntity().put()
# MR should be scheduled into the future.
eta = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
shard_count = 4
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count,
mapreduce_parameters={"foo": "bar"},
base_path="/mapreduce_base_path",
queue_name=self.QUEUE_NAME,
eta=eta)
task_eta = self.validate_map_started(mapreduce_id)
self.assertEquals(eta.strftime("%Y/%m/%d %H:%M:%S"), task_eta)
def testStartMap_QueueEnvironment(self):
"""Test that the start_map inherits its queue from the enviornment."""
TestEntity().put()
shard_count = 4
os.environ["HTTP_X_APPENGINE_QUEUENAME"] = self.QUEUE_NAME
try:
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count,
mapreduce_parameters={"foo": "bar"},
base_path="/mapreduce_base_path")
finally:
del os.environ["HTTP_X_APPENGINE_QUEUENAME"]
self.validate_map_started(mapreduce_id)
def testStartMap_Hooks(self):
"""Tests that MR can be scheduled with a hook class installed.
Most of start_map functionality is already tested by handlers_test.
Just a smoke test is enough.
"""
TestEntity().put()
shard_count = 4
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count,
mapreduce_parameters={"foo": "bar"},
base_path="/mapreduce_base_path",
queue_name="crazy-queue",
hooks_class_name=__name__+"."+TestHooks.__name__)
self.assertTrue(mapreduce_id)
task, queue_name = TestHooks.enqueue_kickoff_task_calls[0]
self.assertEqual("/mapreduce_base_path/kickoffjob_callback/" + mapreduce_id,
task.url)
self.assertEqual("crazy-queue", queue_name)
def testStartMap_RaisingHooks(self):
"""Tests that MR can be scheduled with a dummy hook class installed.
The dummy hook class raises NotImplementedError for all method calls so the
default scheduling logic should be used.
Most of start_map functionality is already tested by handlers_test.
Just a smoke test is enough.
"""
TestEntity().put()
shard_count = 4
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count,
mapreduce_parameters={"foo": "bar"},
base_path="/mapreduce_base_path",
queue_name="crazy-queue",
hooks_class_name=hooks.__name__+"."+hooks.Hooks.__name__)
self.validate_map_started(mapreduce_id)
def testStartMap_HugePayload(self):
"""Test start_map function.
Most of start_map functionality is already tested by handlers_test.
Just a smoke test is enough.
"""
TestEntity().put()
shard_count = 4
mapreduce_id = ""
mapreduce_id = control.start_map(
"test_map",
__name__ + ".test_handler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
"huge_parameter": random_string(900000)
},
shard_count,
mapreduce_parameters={"foo": "bar"},
base_path="/mapreduce_base_path",
queue_name=self.QUEUE_NAME)
self.validate_map_started(mapreduce_id)
def testStartMapTransactional(self):
"""Test start_map function.
Most of start_map functionality is already tested by handlers_test.
Just a smoke test is enough.
"""
TestEntity().put()
shard_count = 4
mapreduce_id = ""
@db.transactional(xg=True)
def tx():
some_entity = TestEntity()
some_entity.put()
return control.start_map(
"test_map",
__name__ + ".test_handler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
},
shard_count,
mapreduce_parameters={"foo": "bar"},
base_path="/mapreduce_base_path",
queue_name=self.QUEUE_NAME,
in_xg_transaction=True)
mapreduce_id = tx()
self.validate_map_started(mapreduce_id)
def testStartMapTransactional_HugePayload(self):
"""Test start_map function.
Most of start_map functionality is already tested by handlers_test.
Just a smoke test is enough.
"""
TestEntity().put()
shard_count = 4
mapreduce_id = ""
@db.transactional(xg=True)
def tx():
some_entity = TestEntity()
some_entity.put()
return control.start_map(
"test_map",
__name__ + ".test_handler",
"mapreduce.input_readers.DatastoreInputReader",
{
"entity_kind": __name__ + "." + TestEntity.__name__,
"huge_parameter": random_string(900000)
},
shard_count,
mapreduce_parameters={"foo": "bar"},
base_path="/mapreduce_base_path",
queue_name=self.QUEUE_NAME,
in_xg_transaction=True)
mapreduce_id = tx()
self.validate_map_started(mapreduce_id)
if __name__ == "__main__":
unittest.main()
|
rolepoint/appengine-mapreduce
|
python/test/mapreduce/control_test.py
|
Python
|
apache-2.0
| 10,366 | 0.00328 |
# -*- coding: utf-8 -*-
#
# This file is part of Linux Show Player
#
# Copyright 2012-2016 Francesco Ceruti <ceppofrancy@gmail.com>
#
# Linux Show Player is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Linux Show Player is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Linux Show Player. If not, see <http://www.gnu.org/licenses/>.
import mido
from lisp.modules.midi.midi_common import MIDICommon
from lisp.modules.midi.midi_utils import mido_backend, mido_port_name
class MIDIOutput(MIDICommon):
def __init__(self, port_name='AppDefault'):
super().__init__(port_name=port_name)
def send_from_str(self, str_message):
self.send(mido.parse_string(str_message))
def send(self, message):
self._port.send(message)
def open(self):
port_name = mido_port_name(self._port_name, 'O')
self._port = mido_backend().open_output(port_name)
|
FrancescoCeruti/linux-show-player
|
lisp/modules/midi/midi_output.py
|
Python
|
gpl-3.0
| 1,339 | 0 |
# ! /usr/bin/env python
# _*_ coding:utf-8 _*_
"""
@author = lucas.wang
@create_time = 2018-01-12
"""
from xml.etree import ElementTree as ET
import fnmatch
class change_xml(object):
"""
xml main function
"""
names = ['iConn.CreateXmlTools.vshost.exe', 'AutoUpdater.dll', 'NewTonsoft.Json.dll',
'Oracle.ManagedDataAccess.dll', 'Renci.SshNet.dll', 'Renci.SshNet.xml', 'zxing.dll', 'Images/ARX_HK.png']
old_path_name = r"http://172.16.1.81:8081/UpdateClient/"
def __init__(self, path_name, file_path, file_name="AutoupdateService.xml"):
"""
Init file path name
:param fileName:
"""
self.file_path = file_path
self.file_name = file_name
self.tree = ET.parse(file_path + file_name)
self.path_name = path_name
def read_xml(self):
"""
Read xml file
:return:
"""
root = self.tree.getroot()
# print(root)
for item in root.getchildren():
# root.iter("file"):
# print(item.get("url"))
item.set("url", item.get('url').replace(self.old_path_name, self.path_name))
if fnmatch.filter(self.names, item.get('path')):
root.remove(item)
self.write_xml()
def write_xml(self):
self.tree.write(self.file_path + self.file_name)
print("Update xml file success. file: " + self.file_path + self.file_name)
if __name__ == '__main__':
"""
Test use
"""
read = change_xml(r'http://172.16.1.81:8081/UpdateClient/',
r'D:\\CodeWorkspace\\iConnAll\\Client-dev\\iConn.CreateXmlTools\\bin\\Release\\')
read.read_xml()
|
Lucas-Wong/ToolsProject
|
CodeReview/Change_Xml.py
|
Python
|
gpl-3.0
| 1,695 | 0.00767 |
from builtins import str
from builtins import object
from django.db import models
from django.db.utils import OperationalError, ProgrammingError
from django.core import checks
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
# FW_DOCUMENT_VERSION:
# Also defined in frontend
# document/static/js/modules/schema/index.js
FW_DOCUMENT_VERSION = 3.4
class DocumentTemplate(models.Model):
title = models.CharField(max_length=255, default="", blank=True)
import_id = models.CharField(max_length=255, default="", blank=True)
content = models.JSONField(default=dict)
doc_version = models.DecimalField(
max_digits=3, decimal_places=1, default=FW_DOCUMENT_VERSION
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
blank=True,
on_delete=models.deletion.CASCADE,
)
added = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
auto_delete = True
def __str__(self):
return self.title
def is_deletable(self):
reverse_relations = [
f
for f in self._meta.model._meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created
and not f.concrete
and f.name not in ["documentstyle", "exporttemplate"]
]
for r in reverse_relations:
if r.remote_field.model.objects.filter(
**{r.field.name: self}
).exists():
return False
return True
@classmethod
def check(cls, **kwargs):
errors = super().check(**kwargs)
errors.extend(cls._check_doc_versions(**kwargs))
return errors
@classmethod
def _check_doc_versions(cls, **kwargs):
try:
if len(
cls.objects.filter(doc_version__lt=str(FW_DOCUMENT_VERSION))
):
return [
checks.Warning(
"Document templates need to be upgraded. Please "
"navigate to /admin/document/document/maintenance/ "
"with a browser as a superuser and upgrade all "
"document templates on this server.",
obj=cls,
)
]
else:
return []
except (ProgrammingError, OperationalError):
# Database has not yet been initialized, so don't throw any error.
return []
class Document(models.Model):
title = models.CharField(max_length=255, default="", blank=True)
path = models.TextField(default="", blank=True)
content = models.JSONField(default=dict)
doc_version = models.DecimalField(
max_digits=3, decimal_places=1, default=FW_DOCUMENT_VERSION
)
# The doc_version is the version of the data format in the content field.
# We upgrade the content field in JavaScript and not migrations so that
# the same code can be used for migrations and for importing old fidus
# files that are being uploaded. This field is only used for upgrading data
# and is therefore not handed to the editor or document overview page.
version = models.PositiveIntegerField(default=0)
diffs = models.JSONField(default=list, blank=True)
# The last few diffs that were received and approved. The number of stored
# diffs should always be equivalent to or more than all the diffs since the
# last full save of the document.
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="owner",
on_delete=models.deletion.CASCADE,
)
added = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
comments = models.JSONField(default=dict, blank=True)
bibliography = models.JSONField(default=dict, blank=True)
# Whether or not document is listed on document overview list page.
# True by default and for all normal documents. Can be set to False when
# documents are added in plugins that list these documents somewhere else.
listed = models.BooleanField(default=True)
template = models.ForeignKey(
DocumentTemplate, on_delete=models.deletion.CASCADE
)
def __str__(self):
if len(self.title) > 0:
return "%(title)s (%(id)s)" % {"title": self.title, "id": self.id}
else:
return str(self.id)
class Meta(object):
ordering = ["-id"]
def clean(self, *args, **kwargs):
if self.comments is None:
self.comments = "{}"
if self.bibliography is None:
self.bibliography = "{}"
def save(self, *args, **kwargs):
self.clean()
super().save(*args, **kwargs)
def get_absolute_url(self):
return "/document/%i/" % self.id
def is_deletable(self):
reverse_relations = [
f
for f in self._meta.model._meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created
and not f.concrete
and f.name
not in [
"accessright",
"accessrightinvite",
"documentrevision",
"documentimage",
]
]
for r in reverse_relations:
if r.remote_field.model.objects.filter(
**{r.field.name: self}
).exists():
return False
return True
@classmethod
def check(cls, **kwargs):
errors = super().check(**kwargs)
errors.extend(cls._check_doc_versions(**kwargs))
return errors
@classmethod
def _check_doc_versions(cls, **kwargs):
try:
if len(
cls.objects.filter(doc_version__lt=str(FW_DOCUMENT_VERSION))
):
return [
checks.Warning(
"Documents need to be upgraded. Please navigate to "
"/admin/document/document/maintenance/ with a browser "
"as a superuser and upgrade all documents on this "
"server.",
obj=cls,
)
]
else:
return []
except (ProgrammingError, OperationalError):
# Database has not yet been initialized, so don't throw any error.
return []
RIGHTS_CHOICES = (
("write", "Writer"),
# Can write content and can read+write comments.
# Can chat with collaborators.
# Has read access to revisions.
("write-tracked", "Write with tracked changes"),
# Can write tracked content and can read/write comments.
# Cannot turn off tracked changes.
# Can chat with collaborators.
# Has read access to revisions.
("comment", "Commentator"),
# Can read content and can read+write comments.
# Can chat with collaborators.
# Has read access to revisions.
("review-tracked", "Reviewer who can write with tracked changes"),
# Can write tracked content and can read/write his own comments.
# Cannot turn off tracked changes.
# Cannot chat with collaborators.
# Has no access to revisions.
("review", "Reviewer"),
# Can read the content and can read/write his own comments.
# Comments by users with this access right only show the user's
# numeric ID, not their username.
# Cannot chat with collaborators nor see that they are connected.
# Has no access to revisions.
("read", "Reader"),
# Can read content, including comments
# Can chat with collaborators.
# Has read access to revisions.
("read-without-comments", "Reader without comment access"),
# Can read content, but not the comments.
# Cannot chat with collaborators.
# Has no access to revisions.
)
# Editor and Reviewer can only comment and not edit document
COMMENT_ONLY = ("review", "comment")
CAN_UPDATE_DOCUMENT = [
"write",
"write-tracked",
"review",
"review-tracked",
"comment",
]
# Whether the collaborator is allowed to know about other collaborators
# and communicate with them.
CAN_COMMUNICATE = ["read", "write", "comment", "write-tracked"]
class AccessRight(models.Model):
document = models.ForeignKey(Document, on_delete=models.deletion.CASCADE)
path = models.TextField(default="", blank=True)
holder_choices = models.Q(app_label="user", model="user") | models.Q(
app_label="user", model="userinvite"
)
holder_type = models.ForeignKey(
ContentType, on_delete=models.CASCADE, limit_choices_to=holder_choices
)
holder_id = models.PositiveIntegerField()
holder_obj = GenericForeignKey("holder_type", "holder_id")
rights = models.CharField(
max_length=21, choices=RIGHTS_CHOICES, blank=False
)
class Meta(object):
unique_together = (("document", "holder_type", "holder_id"),)
def __str__(self):
return "%(name)s %(rights)s on %(doc_id)d" % {
"name": self.holder_obj.readable_name,
"rights": self.rights,
"doc_id": self.document.id,
}
def revision_filename(instance, filename):
return "document-revisions/{id}.fidus".format(id=instance.pk)
class DocumentRevision(models.Model):
document = models.ForeignKey(Document, on_delete=models.deletion.CASCADE)
doc_version = models.DecimalField(
max_digits=3, decimal_places=1, default=FW_DOCUMENT_VERSION
)
note = models.CharField(max_length=255, default="", blank=True)
date = models.DateTimeField(auto_now=True)
file_object = models.FileField(upload_to=revision_filename)
file_name = models.CharField(max_length=255, default="", blank=True)
def save(self, *args, **kwargs):
if self.pk is None:
# We remove the file_object the first time so that we can use the
# pk as the name of the saved revision file.
file_object = self.file_object
self.file_object = None
super().save(*args, **kwargs)
self.file_object = file_object
kwargs.pop("force_insert", None)
super(DocumentRevision, self).save(*args, **kwargs)
def __str__(self):
if len(self.note) > 0:
return "%(note)s (%(id)s) of %(doc_id)s" % {
"note": self.note,
"id": self.id,
"doc_id": self.document.id,
}
else:
return "%(id)s of %(doc_id)s" % {
"id": self.id,
"doc_id": self.document.id,
}
@classmethod
def check(cls, **kwargs):
errors = super().check(**kwargs)
errors.extend(cls._check_doc_versions(**kwargs))
return errors
@classmethod
def _check_doc_versions(cls, **kwargs):
try:
if len(
cls.objects.filter(doc_version__lt=str(FW_DOCUMENT_VERSION))
):
return [
checks.Warning(
"Document revisions need to be upgraded. Please "
"navigate to /admin/document/document/maintenance/ "
"with a browser as a superuser and upgrade all "
"document revisions on this server.",
obj=cls,
)
]
else:
return []
except (ProgrammingError, OperationalError):
# Database has not yet been initialized, so don't throw any error.
return []
|
fiduswriter/fiduswriter
|
fiduswriter/document/models.py
|
Python
|
agpl-3.0
| 11,689 | 0 |
"""===========================
Geneset analysis
===========================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Overview
========
This pipeline performs gene set analysis of one or more
genesets.
Input data are two collections of files, genelists and pathways.
Genelists are tabular data with a gene for each row and associated
attributes in additional columns such as expression level, probability
of being called differentially expressed, etc.
Pathways are tabular data linking genes to pathways that they exist
in.
Generally, it performs the following tasks:
1. The pipeline merges separately prepared gene lists into
a single gene list matrix. There is a continuous scale
version (P-Values, expression values, ...) and a thresholded
version (0 and 1 for genelist membership).
2. The pipeline builds a matrix of gene list annotations to
test against. To this end, it collects:
ENSEMBL GO annotations
KEGG Pathways
User supplied pathways
GSEA database signatures
3. The pipeline performs various gene set enrichment analyses.
These are:
1. Hypergeometric GO analysis
2. Gene set enrichment analysis
4. The pipeline creates various QC metrics. To this end it looks
for biases in any of the gene lists supplied. Biases the pipeline
looks at are:
1. Gene length
2. Nucleotide composition
3. Gene intron/exon structure
4. User supplied table with biases.
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
Configuration
-------------
The pipeline requires a configured :file:`pipeline.ini` file.
The sphinxreport report requires a :file:`conf.py` and
:file:`sphinxreport.ini` file (see :ref:`PipelineReporting`). To start
with, use the files supplied with the Example_ data.
Input
-----
Optional inputs
+++++++++++++++
Requirements
------------
The pipeline requires the results from
:doc:`pipeline_annotations`. Set the configuration variable
:py:data:`annotations_database` and :py:data:`annotations_dir`.
On top of the default CGAT setup, the pipeline requires the following
software to be in the path:
+----------+-----------+---------------------------+
|*Program* |*Version* |*Purpose* |
+----------+-----------+---------------------------+
| | | |
+----------+-----------+---------------------------+
Pipeline output
===============
The major output is in the database file :file:`csvdb`.
Glossary
========
.. glossary::
Code
====
"""
from ruffus import *
import sys
import os
import sqlite3
import pandas
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import CGAT.Database as Database
import CGAT.SetTools as SetTools
import CGATPipelines.PipelineGO as PipelineGO
###################################################
###################################################
###################################################
# Pipeline configuration
###################################################
# load options from the config file
import CGATPipelines.Pipeline as P
PARAMS = P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"])
PARAMS.update(P.peekParameters(
PARAMS["annotations_dir"],
"pipeline_annotations.py",
prefix="annotations_",
update_interface=True))
# Update the PARAMS dictionary in any PipelineModules
# e.g.:
# import CGATPipelines.PipelineGeneset as PipelineGeneset
# PipelineGeneset.PARAMS = PARAMS
def connect():
'''connect to database.
Use this method to connect to additional databases.
Returns a database connection.
'''
dbh = sqlite3.connect(PARAMS["database_name"])
statement = '''ATTACH DATABASE '%s' as annotations''' % (
PARAMS["annotations_database"])
cc = dbh.cursor()
cc.execute(statement)
cc.close()
return dbh
@transform('genelists.dir/*.tsv.gz',
suffix(".tsv.gz"),
".load")
def loadGeneLists(infile, outfile):
'''load gene list data into database.'''
P.load(infile, outfile,
tablename="genelist_%s" % P.toTable(outfile))
@merge('genelists.dir/*.tsv.gz', 'genelists.tsv.gz')
def buildGeneListMatrix(infiles, outfile):
'''build a gene list matrix for simple pathway analysis
based on hypergeometric test.
A gene list is derived from a gene set by
applying thresholds to the input data set. The
thresholds are defined in the configuration file.
'''
genesets = []
backgrounds = []
headers = []
for infile in infiles:
genelist = pandas.read_csv(
IOTools.openFile(infile),
index_col=0,
sep='\t')
track = P.snip(os.path.basename(infile), ".tsv.gz")
headers.append(track)
field = PARAMS[P.matchParameter("%s_foreground_field" % track)]
min_threshold = PARAMS[P.matchParameter(
"%s_foreground_min_threshold" % track)]
max_threshold = PARAMS[P.matchParameter(
"%s_foreground_max_threshold" % track)]
genesets.append(set(genelist[
(genelist[field] >= min_threshold) &
(genelist[field] <= max_threshold)].index))
E.info('%s: foreground: %f <= %s <= %f' % (track,
min_threshold,
field,
max_threshold))
field = PARAMS[P.matchParameter("%s_background_field" % track)]
min_threshold = PARAMS[P.matchParameter(
"%s_background_min_threshold" % track)]
max_threshold = PARAMS[P.matchParameter(
"%s_background_max_threshold" % track)]
E.info('%s: background: %f <= %s <= %f' % (track,
min_threshold,
field,
max_threshold))
backgrounds.append(set(genelist[
(genelist[field] >= min_threshold) &
(genelist[field] <= max_threshold)].index))
E.info("%s: fg=%i, bg=%i" % (track,
len(genesets[-1]),
len(backgrounds[-1])))
E.info("writing gene list matrix")
with IOTools.openFile(outfile, "w") as outf:
SetTools.writeSets(outf, genesets, labels=headers)
with IOTools.openFile(outfile + ".bg.tsv.gz", "w") as outf:
SetTools.writeSets(outf, backgrounds, labels=headers)
E.info("writing intersection/union matrix")
# build set intersection matrix
matrix = SetTools.unionIntersectionMatrix(genesets)
with IOTools.openFile(outfile + ".matrix.gz", "w") as outf:
IOTools.writeMatrix(outf, matrix, headers, headers)
matrix = SetTools.unionIntersectionMatrix(backgrounds)
with IOTools.openFile(outfile + ".bg.matrix.gz", "w") as outf:
IOTools.writeMatrix(outf, matrix, headers, headers)
@transform(buildGeneListMatrix,
suffix(".tsv.gz"),
".load")
def loadGeneListMatrix(infile, outfile):
'''load fgene list matrix into table.'''
track = P.snip(infile, ".tsv.gz")
P.load(infile, outfile, tablename="%s_foreground" % track)
P.load(infile + ".bg.tsv.gz", outfile, tablename="%s_background" % track)
@transform('pathways.dir/*.tsv.gz',
regex('.*/(.*).tsv.gz'),
r"pathways_\1.load")
def loadPathways(infile, outfile):
'''load pathway information into database.'''
P.load(infile, outfile, "--add-index=gene_id --add-index=go_id")
@follows(mkdir('hypergeometric.dir'))
@transform('pathways.dir/*.tsv.gz',
regex('.*/(.*).tsv.gz'),
add_inputs(buildGeneListMatrix),
r'hypergeometric.dir/\1.tsv')
def runHypergeometricAnalysis(infiles, outfile):
'''run pathway analysis on pathway files in
the directory pathways.dir.
'''
infile_pathways, infile_genelist = infiles
infile_background = infile_genelist + ".bg.tsv.gz"
# TODO:
# gene annotations
# category annotations
#
# os.path.join(
# PARAMS["annotations_dir"],
# PARAMS_ANNOTATIONS["interface_go_obo"]),
PipelineGO.runGOFromFiles(
outfile=outfile,
outdir=outfile + ".dir",
fg_file=infile_genelist,
bg_file=infile_background,
go_file=infile_pathways,
ontology_file=None,
minimum_counts=PARAMS["hypergeometric_minimum_counts"],
pairs=False,
gene2name=None)
def computePathwayBiases(infile, outfile):
pass
@transform(runHypergeometricAnalysis,
suffix(".tsv"),
r"\1.load")
def loadHypergeometricAnalysis(infile, outfile):
'''load GO results.'''
track = P.toTable(outfile)
tablename = 'hypergeometric_%s_summary' % track
P.load(infile, outfile, tablename=tablename)
dbh = connect()
ontologies = [x[0] for x in Database.executewait(
dbh,
'''SELECT DISTINCT ontology FROM %s''' % tablename).fetchall()]
genelists = [x[0] for x in Database.executewait(
dbh,
'''SELECT DISTINCT genelist FROM %s''' % tablename).fetchall()]
# output files from runGO.py
sections = ('results', 'parameters', 'withgenes')
for section in sections:
tablename = 'hypergeometric_%s_%s' % (track, section)
load_statement = P.build_load_statement(
tablename=tablename)
statement = '''
cgat combine_tables
--cat=track
--regex-filename="hypergeometric.dir/%(track)s.tsv.dir/(\S+).%(section)s"
hypergeometric.dir/%(track)s.tsv.dir/*.%(section)s
| %(load_statement)s
>> %(outfile)s'''
P.run()
for ontology in ontologies:
fn = os.path.join(infile + ".dir", "all_alldesc.%s.l2fold" % ontology)
if not os.path.exists(fn):
E.warn("file %s does not exist" % fn)
continue
P.load(fn,
outfile,
tablename='hypergeometric_%s_%s_l2fold' % (track, ontology),
options='--allow-empty-file')
fn = os.path.join(
infile + ".dir", "all_alldesc.%s.l10pvalue" % ontology)
P.load(fn,
outfile,
tablename='hypergeometric_%s_%s_l10pvalue' % (track, ontology),
options='--allow-empty-file')
fn = os.path.join(
infile + ".dir", "all_alldesc.%s.l10qvalue" % ontology)
P.load(fn,
outfile,
tablename='hypergeometric_%s_%s_l10qvalue' % (track, ontology),
options='--allow-empty-file')
@merge(runHypergeometricAnalysis,
"hypergeometric_summary.load")
def loadHypergeometricResultsSummary(infiles, outfile):
'''load GO summary results.'''
infiles = glob.glob("hypergeometric.dir/*/*.parameters")
P.mergeAndLoad(infiles, outfile)
@collate("hypergeometric.dir/go.tsv.dir/*.results",
regex(r"hypergeometric.dir/go.tsv.dir/(.*)\.(.*).results"),
r"hypergeometric.go.dir/go.tsv.dir/\1.revigo")
def plotGOResults(infiles, outfile):
'''.'''
infiles = " ".join(infiles)
track = P.snip(outfile, ".revigo")
statement = '''
cat %(infiles)s
| cgat revigo
--go-tsv-file=%(annotations_filename_go)s
--output-filename-pattern=%(track)s.%%s
--ontology=all
--max-similarity=0.5
--reverse-palette
--force-output
-v 2
> %(outfile)s
'''
P.run()
@follows(loadPathways,
loadGeneLists,
loadGeneListMatrix,
loadHypergeometricAnalysis)
def full():
pass
@follows(mkdir("report"))
def build_report():
'''build report from scratch.'''
E.info("starting report build process from scratch")
P.run_report(clean=True)
@follows(mkdir("report"))
def update_report():
'''update report.'''
E.info("updating report")
P.run_report(clean=False)
@follows(update_report)
def publish_report():
'''publish report.'''
E.info("publishing report")
P.publish_report()
if __name__ == "__main__":
# P.checkFiles( ("genome.fasta", "genome.idx" ) )
sys.exit(P.main(sys.argv))
|
CGATOxford/CGATPipelines
|
obsolete/pipeline_genesets.py
|
Python
|
mit
| 12,292 | 0.000163 |
import dns
import requests
import socket
from recursortests import RecursorTest
class RootNXTrustRecursorTest(RecursorTest):
def getOutgoingQueriesCount(self):
headers = {'x-api-key': self._apiKey}
url = 'http://127.0.0.1:' + str(self._wsPort) + '/api/v1/servers/localhost/statistics'
r = requests.get(url, headers=headers, timeout=self._wsTimeout)
self.assertTrue(r)
self.assertEquals(r.status_code, 200)
self.assertTrue(r.json())
content = r.json()
for entry in content:
if entry['name'] == 'all-outqueries':
return int(entry['value'])
return 0
class testRootNXTrustDisabled(RootNXTrustRecursorTest):
_confdir = 'RootNXTrustDisabled'
_wsPort = 8042
_wsTimeout = 2
_wsPassword = 'secretpassword'
_apiKey = 'secretapikey'
_config_template = """
root-nx-trust=no
qname-minimization=no
webserver=yes
webserver-port=%d
webserver-address=127.0.0.1
webserver-password=%s
api-key=%s
""" % (_wsPort, _wsPassword, _apiKey)
def testRootNXTrust(self):
"""
Check that, with root-nx-trust disabled, we still query the root for www2.nx-example.
after receiving a NXD from "." for nx-example. as an answer for www.nx-example.
"""
# first query nx.example.
before = self.getOutgoingQueriesCount()
query = dns.message.make_query('www.nx-example.', 'A')
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NXDOMAIN)
print(res)
self.assertAuthorityHasSOA(res)
# check that we sent one query to the root
after = self.getOutgoingQueriesCount()
self.assertEqual(after, before + 1)
# then query nx2.example.
before = after
query = dns.message.make_query('www2.nx-example.', 'A')
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NXDOMAIN)
self.assertAuthorityHasSOA(res)
after = self.getOutgoingQueriesCount()
self.assertEqual(after, before + 1)
class testRootNXTrustEnabled(RootNXTrustRecursorTest):
_confdir = 'RootNXTrustEnabled'
_wsPort = 8042
_wsTimeout = 2
_wsPassword = 'secretpassword'
_apiKey = 'secretapikey'
_config_template = """
root-nx-trust=yes
webserver=yes
webserver-port=%d
webserver-address=127.0.0.1
webserver-password=%s
api-key=%s
""" % (_wsPort, _wsPassword, _apiKey)
def testRootNXTrust(self):
"""
Check that, with root-nx-trust enabled, we don't query the root for www2.nx-example.
after receiving a NXD from "." for nx-example. as an answer for www.nx-example.
"""
# first query nx.example.
before = self.getOutgoingQueriesCount()
query = dns.message.make_query('www.nx-example.', 'A')
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NXDOMAIN)
print(res)
self.assertAuthorityHasSOA(res)
# check that we sent one query to the root
after = self.getOutgoingQueriesCount()
self.assertEqual(after, before + 1)
# then query nx2.example.
before = after
query = dns.message.make_query('www2.nx-example.', 'A')
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NXDOMAIN)
self.assertAuthorityHasSOA(res)
after = self.getOutgoingQueriesCount()
self.assertEqual(after, before)
|
cyclops1982/pdns
|
regression-tests.recursor-dnssec/test_RootNXTrust.py
|
Python
|
gpl-2.0
| 3,474 | 0.002303 |
# -*- coding: utf-8 -*-
import json
def json_pre_process_hook(action, request, *args, **kwargs):
json_data = request.body
if not json_data:
action.ret('002').msg('json_params_required')
return False
try:
param_dict = json.loads(json_data)
except ValueError:
action.ret('003').msg('json_params_invalid')
return False
for key, value in param_dict.items():
setattr(action, key, value)
return True
def query_pre_process_hook(action, request, *args, **kwargs):
params_dict = request.GET
if not params_dict:
return True
for key, value in params_dict.items():
setattr(action, key, value)
return True
def form_pre_process_hook(action, request, *args, **kwargs):
param_dict = request.POST
if not param_dict:
action.ret('004').msg('form_params_required')
return False
for key, value in param_dict.items():
setattr(action, key, value)
return True
def jsonp_post_render_hook(action):
if action.jsonp_callback:
action.resp_data_json(
action.jsonp_callback + '('
+ action.resp_data_json + ')',
)
else:
action.ret('005').msg('jsonp_callback_required')
if action._data:
del action._data
action.render()
return False
return True
|
Alexoner/health-care-demo
|
careHealth/earth/action/hooks.py
|
Python
|
gpl-2.0
| 1,374 | 0 |
#!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
import logging
import json
import sys
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--uuid', type=str, default='')
args = parser.parse_args()
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
notebook_config = dict()
try:
notebook_config['exploratory_name'] = os.environ['exploratory_name']
except:
notebook_config['exploratory_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
notebook_config['key_name'] = os.environ['conf_key_name']
notebook_config['user_keyname'] = os.environ['edge_user_name']
notebook_config['instance_name'] = '{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
os.environ['edge_user_name'],
notebook_config['exploratory_name'], args.uuid)
notebook_config['expected_image_name'] = '{}-{}-notebook-image'.format(notebook_config['service_base_name'],
os.environ['application'])
notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
notebook_config['role_profile_name'] = '{}-{}-nb-de-Profile' \
.format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['edge_user_name'])
notebook_config['security_group_name'] = '{}-{}-nb-SG'.format(notebook_config['service_base_name'],
os.environ['edge_user_name'])
notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
# generating variables regarding EDGE proxy on Notebook instance
instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
edge_instance_name = os.environ['conf_service_base_name'] + "-" + os.environ['edge_user_name'] + '-edge'
edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get('Public')
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
try:
if os.environ['conf_os_family'] == 'debian':
initial_user = 'ubuntu'
sudo_group = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
initial_user = 'ec2-user'
sudo_group = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
(instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
notebook_config['dlab_ssh_user'], sudo_group)
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed creating ssh user 'dlab'.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
try:
logging.info('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
print('[CONFIGURE PROXY ON JUPYTER INSTANCE]')
additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to configure proxy.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
try:
logging.info('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
print('[INSTALLING PREREQUISITES TO JUPYTER NOTEBOOK INSTANCE]')
params = "--hostname {} --keyfile {} --user {} --region {}".\
format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], os.environ['aws_region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed installing apps: apt & pip.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring jupiter and all dependencies
try:
logging.info('[CONFIGURE JUPYTER NOTEBOOK INSTANCE]')
print('[CONFIGURE JUPYTER NOTEBOOK INSTANCE]')
params = "--hostname {} " \
"--keyfile {} " \
"--region {} " \
"--spark_version {} " \
"--hadoop_version {} " \
"--os_user {} " \
"--scala_version {} " \
"--r_mirror {} " \
"--exploratory_name {}".\
format(instance_hostname,
keyfile_name,
os.environ['aws_region'],
os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'],
notebook_config['dlab_ssh_user'],
os.environ['notebook_scala_version'],
os.environ['notebook_r_mirror'],
notebook_config['exploratory_name'])
try:
local("~/scripts/{}.py {}".format('configure_jupyter_node', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to configure jupyter.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
print('[INSTALLING USERs KEY]')
logging.info('[INSTALLING USERs KEY]')
additional_config = {"user_keyname": notebook_config['user_keyname'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
instance_hostname, keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
append_result("Failed installing users key")
raise Exception
except Exception as err:
append_result("Failed installing users key.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
print('[SETUP USER GIT CREDENTIALS]')
logging.info('[SETUP USER GIT CREDENTIALS]')
params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
.format(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
try:
local("~/scripts/{}.py {}".format('common_download_git_certfile', params))
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
append_result("Failed setup git credentials")
raise Exception
except Exception as err:
append_result("Failed to setup git credentials.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
logging.info('[POST CONFIGURING PROCESS]')
print('[POST CONFIGURING PROCESS')
if notebook_config['notebook_image_name'] not in [notebook_config['expected_image_name'], 'None']:
params = "--hostname {} --keyfile {} --os_user {} --nb_tag_name {} --nb_tag_value {}" \
.format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'],
notebook_config['tag_name'], notebook_config['instance_name'])
try:
local("~/scripts/{}.py {}".format('common_remove_remote_kernels', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to post configuring instance.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
additional_info = {
'instance_hostname': instance_hostname,
'tensor': False
}
params = "--edge_hostname {} " \
"--keyfile {} " \
"--os_user {} " \
"--type {} " \
"--exploratory_name {} " \
"--additional_info '{}'"\
.format(edge_instance_hostname,
keyfile_name,
notebook_config['dlab_ssh_user'],
'jupyter',
notebook_config['exploratory_name'],
json.dumps(additional_info))
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
append_result("Failed to set edge reverse proxy template.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['shared_image_enabled'] == 'true':
try:
print('[CREATING AMI]')
ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
if ami_id == '':
print("Looks like it's first time we configure notebook server. Creating image.")
image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
instance_name=notebook_config['instance_name'],
image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
except Exception as err:
append_result("Failed creating image.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# generating output information
ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
jupyter_dns_url = "http://" + dns_name + ":8888/{}/".format(notebook_config['exploratory_name'])
jupyter_notebook_acces_url = "http://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
jupyter_ungit_acces_url = "http://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
print('[SUMMARY]')
logging.info('[SUMMARY]')
print("Instance name: {}".format(notebook_config['instance_name']))
print("Private DNS: {}".format(dns_name))
print("Private IP: {}".format(ip_address))
print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
print("Instance type: {}".format(notebook_config['instance_type']))
print("Key name: {}".format(notebook_config['key_name']))
print("User key name: {}".format(notebook_config['user_keyname']))
print("Image name: {}".format(notebook_config['notebook_image_name']))
print("Profile name: {}".format(notebook_config['role_profile_name']))
print("SG name: {}".format(notebook_config['security_group_name']))
print("Jupyter URL: {}".format(jupyter_ip_url))
print("Jupyter URL: {}".format(jupyter_dns_url))
print("Ungit URL: {}".format(ungit_ip_url))
print("ReverseProxyNotebook".format(jupyter_notebook_acces_url))
print("ReverseProxyUngit".format(jupyter_ungit_acces_url))
print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.
format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.
format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
with open("/root/result.json", 'w') as result:
res = {"hostname": dns_name,
"ip": ip_address,
"instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
"master_keyname": os.environ['conf_key_name'],
"notebook_name": notebook_config['instance_name'],
"notebook_image_name": notebook_config['notebook_image_name'],
"Action": "Create new notebook server",
"exploratory_url": [
{"description": "Jupyter",
"url": jupyter_notebook_acces_url},
{"description": "Ungit",
"url": jupyter_ungit_acces_url},
{"description": "Jupyter (via tunnel)",
"url": jupyter_ip_url},
{"description": "Ungit (via tunnel)",
"url": ungit_ip_url}
]}
result.write(json.dumps(res))
|
epam/DLab
|
infrastructure-provisioning/src/general/scripts/aws/jupyter_configure.py
|
Python
|
apache-2.0
| 15,517 | 0.004705 |
# -*- coding:utf8 -*-
# Author: shizhenyu96@gamil.com
# github: https://github.com/imndszy
from flask import Blueprint
admin = Blueprint('admin', __name__)
from . import views
|
imndszy/voluntary
|
app/admin/__init__.py
|
Python
|
mit
| 178 | 0.005618 |
#!/usr/bin/env python
import sys
from setuptools import setup
if sys.hexversion < 0x030200a1:
print ("LightSweeper requires python 3.2 or higher.")
print("Exiting...")
sys.exit(1)
setup(name='LightSweeper',
version='0.6b',
description='The LightSweeper API',
author='The LightSweeper Team',
author_email='codewizards@lightsweeper.org',
url='http://www.lightsweeper.org',
packages=['lightsweeper'],
package_data={"lightsweeper" : ["sounds/*.wav"]},
include_package_data=True
)
|
lightsweeper/lightsweeper-api
|
setup.py
|
Python
|
mit
| 528 | 0.020833 |
from megaera import local, json
from oauth import signed_url
from google.appengine.api import urlfetch
__TWITTER_API__ = "http://api.twitter.com/1"
def tweet(status, **credentials):
if not credentials:
# shortcut for no-credentials case
credentials = local.config_get('twitter')
if not credentials:
return
update_url = "%s/statuses/update.json" % __TWITTER_API__
fetch_url = signed_url(url=update_url, method='POST', status=status, **credentials)
response = urlfetch.fetch(fetch_url, method=urlfetch.POST)
try:
content = json.read(response.content)
return content.get('id')
except json.ReadException:
pass
def untweet(status_id, **credentials):
if not credentials:
# shortcut for no-credentials case
credentials = local.config_get('twitter')
if not credentials:
return
destroy_url = "%s/statuses/destroy.json" % __TWITTER_API__
fetch_url = signed_url(url=destroy_url, method='POST', id=status_id, **credentials)
response = urlfetch.fetch(fetch_url, method=urlfetch.POST)
try:
content = json.read(response.content)
return content.get('id')
except json.ReadException:
pass
|
tantalor/emend
|
app/emend/twitter.py
|
Python
|
mit
| 1,161 | 0.014643 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-06 11:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NewsItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('content', models.TextField()),
],
),
]
|
nmunro/azathoth
|
news/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 603 | 0.001658 |
import unittest
import logging
logging.getLogger().setLevel(logging.DEBUG)
from ...auction.worker import Worker
from ...database import Database
from ...rc import sql
class TestCase(unittest.TestCase):
def setUp(self):
self.db = Database.pymysql(**sql)
self.ob = Worker(self.db, fail=True)
def test_init(self):
pass
|
LegionXI/pydarkstar
|
pydarkstar/tests/auction/test_worker.py
|
Python
|
mit
| 350 | 0.014286 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.db.models import Q
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase, override_settings
from django.utils import timezone
from zerver.lib import bugdown
from zerver.decorator import JsonableError
from zerver.lib.test_runner import slow
from zilencer.models import Deployment
from zerver.lib.message import (
MessageDict,
message_to_dict,
)
from zerver.lib.test_helpers import (
get_user_messages,
make_client,
message_ids, message_stream_count,
most_recent_message,
most_recent_usermessage,
queries_captured,
)
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.models import (
MAX_MESSAGE_LENGTH, MAX_SUBJECT_LENGTH,
Message, Realm, Recipient, Stream, UserMessage, UserProfile, Attachment, RealmAlias,
get_realm_by_string_id, get_stream, get_user_profile_by_email,
Reaction, sew_messages_and_reactions
)
from zerver.lib.actions import (
check_message, check_send_message,
do_create_user,
get_client,
get_recipient,
)
from zerver.lib.upload import create_attachment
from zerver.views.messages import create_mirrored_message_users
import datetime
import DNS
import mock
import time
import ujson
from six.moves import range
from typing import Any, Optional, Text
class TopicHistoryTest(ZulipTestCase):
def test_topics_history(self):
# type: () -> None
# verified: int(UserMessage.flags.read) == 1
email = 'iago@zulip.com'
stream_name = 'Verona'
self.login(email)
user_profile = get_user_profile_by_email(email)
stream = Stream.objects.get(name=stream_name)
recipient = get_recipient(Recipient.STREAM, stream.id)
def create_test_message(topic, read, starred=False):
# type: (str, bool, bool) -> None
hamlet = get_user_profile_by_email('hamlet@zulip.com')
message = Message.objects.create(
sender=hamlet,
recipient=recipient,
subject=topic,
content='whatever',
pub_date=timezone.now(),
sending_client=get_client('whatever'),
)
flags = 0
if read:
flags |= UserMessage.flags.read
# use this to make sure our query isn't confused
# by other flags
if starred:
flags |= UserMessage.flags.starred
UserMessage.objects.create(
user_profile=user_profile,
message=message,
flags=flags,
)
create_test_message('topic2', read=False)
create_test_message('toPIc1', read=False, starred=True)
create_test_message('topic2', read=False)
create_test_message('topic2', read=True)
create_test_message('topic2', read=False, starred=True)
create_test_message('Topic2', read=False)
create_test_message('already_read', read=True)
endpoint = '/json/users/me/%d/topics' % (stream.id,)
result = self.client_get(endpoint, dict())
self.assert_json_success(result)
history = ujson.loads(result.content)['topics']
# We only look at the most recent three topics, because
# the prior fixture data may be unreliable.
self.assertEqual(history[:3], [
[u'already_read', 0],
[u'Topic2', 4],
[u'toPIc1', 1],
])
def test_bad_stream_id(self):
# type: () -> None
email = 'iago@zulip.com'
self.login(email)
# non-sensible stream id
endpoint = '/json/users/me/9999999999/topics'
result = self.client_get(endpoint, dict())
self.assert_json_error(result, 'Invalid stream id')
# out of realm
bad_stream = self.make_stream(
'mit_stream',
realm=get_realm_by_string_id('mit')
)
endpoint = '/json/users/me/%s/topics' % (bad_stream.id,)
result = self.client_get(endpoint, dict())
self.assert_json_error(result, 'Invalid stream id')
# private stream to which I am not subscribed
private_stream = self.make_stream(
'private_stream',
invite_only=True
)
endpoint = '/json/users/me/%s/topics' % (private_stream.id,)
result = self.client_get(endpoint, dict())
self.assert_json_error(result, 'Invalid stream id')
class TestCrossRealmPMs(ZulipTestCase):
def make_realm(self, domain):
# type: (Text) -> Realm
realm = Realm.objects.create(string_id=domain, domain=domain, invite_required=False)
RealmAlias.objects.create(realm=realm, domain=domain)
return realm
def setUp(self):
# type: () -> None
dep = Deployment()
dep.base_api_url = "https://zulip.com/api/"
dep.base_site_url = "https://zulip.com/"
# We need to save the object before we can access
# the many-to-many relationship 'realms'
dep.save()
dep.realms = [get_realm_by_string_id("zulip")]
dep.save()
def create_user(self, email):
# type: (Text) -> UserProfile
username, domain = email.split('@')
self.register(username, 'test', domain=domain)
return get_user_profile_by_email(email)
@override_settings(CROSS_REALM_BOT_EMAILS=['feedback@zulip.com',
'support@3.example.com'])
def test_realm_scenarios(self):
# type: () -> None
r1 = self.make_realm('1.example.com')
r2 = self.make_realm('2.example.com')
r3 = self.make_realm('3.example.com')
deployment = Deployment.objects.filter()[0]
deployment.realms.add(r1)
deployment.realms.add(r2)
deployment.realms.add(r3)
def assert_message_received(to_user, from_user):
# type: (UserProfile, UserProfile) -> None
messages = get_user_messages(to_user)
self.assertEqual(messages[-1].sender.pk, from_user.pk)
def assert_disallowed():
# type: () -> Any
return self.assertRaisesRegex(
JsonableError,
'You can\'t send private messages outside of your organization.')
random_zulip_email = 'random@zulip.com'
user1_email = 'user1@1.example.com'
user1a_email = 'user1a@1.example.com'
user2_email = 'user2@2.example.com'
user3_email = 'user3@3.example.com'
feedback_email = 'feedback@zulip.com'
support_email = 'support@3.example.com' # note: not zulip.com
self.create_user(random_zulip_email)
user1 = self.create_user(user1_email)
user1a = self.create_user(user1a_email)
user2 = self.create_user(user2_email)
self.create_user(user3_email)
feedback_bot = get_user_profile_by_email(feedback_email)
support_bot = self.create_user(support_email)
# Users can PM themselves
self.send_message(user1_email, user1_email, Recipient.PERSONAL)
assert_message_received(user1, user1)
# Users on the same realm can PM each other
self.send_message(user1_email, user1a_email, Recipient.PERSONAL)
assert_message_received(user1a, user1)
# Cross-realm bots in the zulip.com realm can PM any realm
self.send_message(feedback_email, user2_email, Recipient.PERSONAL)
assert_message_received(user2, feedback_bot)
# All users can PM cross-realm bots in the zulip.com realm
self.send_message(user1_email, feedback_email, Recipient.PERSONAL)
assert_message_received(feedback_bot, user1)
# Users can PM cross-realm bots on non-zulip realms.
# (The support bot represents some theoretical bot that we may
# create in the future that does not have zulip.com as its realm.)
self.send_message(user1_email, [support_email], Recipient.PERSONAL)
assert_message_received(support_bot, user1)
# Allow sending PMs to two different cross-realm bots simultaneously.
# (We don't particularly need this feature, but since users can
# already individually send PMs to cross-realm bots, we shouldn't
# prevent them from sending multiple bots at once. We may revisit
# this if it's a nuisance for huddles.)
self.send_message(user1_email, [feedback_email, support_email],
Recipient.PERSONAL)
assert_message_received(feedback_bot, user1)
assert_message_received(support_bot, user1)
# Prevent old loophole where I could send PMs to other users as long
# as I copied a cross-realm bot from the same realm.
with assert_disallowed():
self.send_message(user1_email, [user3_email, support_email], Recipient.PERSONAL)
# Users on three different realms can't PM each other,
# even if one of the users is a cross-realm bot.
with assert_disallowed():
self.send_message(user1_email, [user2_email, feedback_email],
Recipient.PERSONAL)
with assert_disallowed():
self.send_message(feedback_email, [user1_email, user2_email],
Recipient.PERSONAL)
# Users on the different realms can not PM each other
with assert_disallowed():
self.send_message(user1_email, user2_email, Recipient.PERSONAL)
# Users on non-zulip realms can't PM "ordinary" Zulip users
with assert_disallowed():
self.send_message(user1_email, random_zulip_email, Recipient.PERSONAL)
# Users on three different realms can not PM each other
with assert_disallowed():
self.send_message(user1_email, [user2_email, user3_email], Recipient.PERSONAL)
class PersonalMessagesTest(ZulipTestCase):
def test_auto_subbed_to_personals(self):
# type: () -> None
"""
Newly created users are auto-subbed to the ability to receive
personals.
"""
self.register("test", "test")
user_profile = get_user_profile_by_email('test@zulip.com')
old_messages_count = message_stream_count(user_profile)
self.send_message("test@zulip.com", "test@zulip.com", Recipient.PERSONAL)
new_messages_count = message_stream_count(user_profile)
self.assertEqual(new_messages_count, old_messages_count + 1)
recipient = Recipient.objects.get(type_id=user_profile.id,
type=Recipient.PERSONAL)
message = most_recent_message(user_profile)
self.assertEqual(message.recipient, recipient)
with mock.patch('zerver.models.get_display_recipient', return_value='recip'):
self.assertEqual(str(message),
u'<Message: recip / / '
'<UserProfile: test@zulip.com <Realm: zulip.com 1>>>')
user_message = most_recent_usermessage(user_profile)
self.assertEqual(str(user_message),
u'<UserMessage: recip / test@zulip.com ([])>'
)
@slow("checks several profiles")
def test_personal_to_self(self):
# type: () -> None
"""
If you send a personal to yourself, only you see it.
"""
old_user_profiles = list(UserProfile.objects.all())
self.register("test1", "test1")
old_messages = []
for user_profile in old_user_profiles:
old_messages.append(message_stream_count(user_profile))
self.send_message("test1@zulip.com", "test1@zulip.com", Recipient.PERSONAL)
new_messages = []
for user_profile in old_user_profiles:
new_messages.append(message_stream_count(user_profile))
self.assertEqual(old_messages, new_messages)
user_profile = get_user_profile_by_email("test1@zulip.com")
recipient = Recipient.objects.get(type_id=user_profile.id, type=Recipient.PERSONAL)
self.assertEqual(most_recent_message(user_profile).recipient, recipient)
def assert_personal(self, sender_email, receiver_email, content="test content"):
# type: (Text, Text, Text) -> None
"""
Send a private message from `sender_email` to `receiver_email` and check
that only those two parties actually received the message.
"""
sender = get_user_profile_by_email(sender_email)
receiver = get_user_profile_by_email(receiver_email)
sender_messages = message_stream_count(sender)
receiver_messages = message_stream_count(receiver)
other_user_profiles = UserProfile.objects.filter(~Q(email=sender_email) &
~Q(email=receiver_email))
old_other_messages = []
for user_profile in other_user_profiles:
old_other_messages.append(message_stream_count(user_profile))
self.send_message(sender_email, receiver_email, Recipient.PERSONAL, content)
# Users outside the conversation don't get the message.
new_other_messages = []
for user_profile in other_user_profiles:
new_other_messages.append(message_stream_count(user_profile))
self.assertEqual(old_other_messages, new_other_messages)
# The personal message is in the streams of both the sender and receiver.
self.assertEqual(message_stream_count(sender),
sender_messages + 1)
self.assertEqual(message_stream_count(receiver),
receiver_messages + 1)
recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
self.assertEqual(most_recent_message(sender).recipient, recipient)
self.assertEqual(most_recent_message(receiver).recipient, recipient)
@slow("assert_personal checks several profiles")
def test_personal(self):
# type: () -> None
"""
If you send a personal, only you and the recipient see it.
"""
self.login("hamlet@zulip.com")
self.assert_personal("hamlet@zulip.com", "othello@zulip.com")
@slow("assert_personal checks several profiles")
def test_non_ascii_personal(self):
# type: () -> None
"""
Sending a PM containing non-ASCII characters succeeds.
"""
self.login("hamlet@zulip.com")
self.assert_personal("hamlet@zulip.com", "othello@zulip.com", u"hümbüǵ")
class StreamMessagesTest(ZulipTestCase):
def assert_stream_message(self, stream_name, subject="test subject",
content="test content"):
# type: (Text, Text, Text) -> None
"""
Check that messages sent to a stream reach all subscribers to that stream.
"""
realm = get_realm_by_string_id('zulip')
subscribers = self.users_subscribed_to_stream(stream_name, realm)
old_subscriber_messages = []
for subscriber in subscribers:
old_subscriber_messages.append(message_stream_count(subscriber))
non_subscribers = [user_profile for user_profile in UserProfile.objects.all()
if user_profile not in subscribers]
old_non_subscriber_messages = []
for non_subscriber in non_subscribers:
old_non_subscriber_messages.append(message_stream_count(non_subscriber))
non_bot_subscribers = [user_profile for user_profile in subscribers
if not user_profile.is_bot]
a_subscriber_email = non_bot_subscribers[0].email
self.login(a_subscriber_email)
self.send_message(a_subscriber_email, stream_name, Recipient.STREAM,
subject, content)
# Did all of the subscribers get the message?
new_subscriber_messages = []
for subscriber in subscribers:
new_subscriber_messages.append(message_stream_count(subscriber))
# Did non-subscribers not get the message?
new_non_subscriber_messages = []
for non_subscriber in non_subscribers:
new_non_subscriber_messages.append(message_stream_count(non_subscriber))
self.assertEqual(old_non_subscriber_messages, new_non_subscriber_messages)
self.assertEqual(new_subscriber_messages, [elt + 1 for elt in old_subscriber_messages])
def test_not_too_many_queries(self):
# type: () -> None
recipient_list = ['hamlet@zulip.com', 'iago@zulip.com', 'cordelia@zulip.com', 'othello@zulip.com']
for email in recipient_list:
self.subscribe_to_stream(email, "Denmark")
sender_email = 'hamlet@zulip.com'
sender = get_user_profile_by_email(sender_email)
message_type_name = "stream"
sending_client = make_client(name="test suite")
stream = 'Denmark'
subject = 'foo'
content = 'whatever'
realm = sender.realm
def send_message():
# type: () -> None
check_send_message(sender, sending_client, message_type_name, [stream],
subject, content, forwarder_user_profile=sender, realm=realm)
send_message() # prime the caches
with queries_captured() as queries:
send_message()
self.assert_max_length(queries, 8)
def test_stream_message_unicode(self):
# type: () -> None
user_profile = get_user_profile_by_email("iago@zulip.com")
self.subscribe_to_stream(user_profile.email, "Denmark")
self.send_message("hamlet@zulip.com", "Denmark", Recipient.STREAM,
content="whatever", subject="my topic")
message = most_recent_message(user_profile)
self.assertEqual(str(message),
u'<Message: Denmark / my topic / '
'<UserProfile: hamlet@zulip.com <Realm: zulip.com 1>>>')
def test_message_mentions(self):
# type: () -> None
user_profile = get_user_profile_by_email("iago@zulip.com")
self.subscribe_to_stream(user_profile.email, "Denmark")
self.send_message("hamlet@zulip.com", "Denmark", Recipient.STREAM,
content="test @**Iago** rules")
message = most_recent_message(user_profile)
assert(UserMessage.objects.get(user_profile=user_profile, message=message).flags.mentioned.is_set)
def test_stream_message_mirroring(self):
# type: () -> None
from zerver.lib.actions import do_change_is_admin
email = "iago@zulip.com"
user_profile = get_user_profile_by_email(email)
do_change_is_admin(user_profile, True, 'api_super_user')
result = self.client_post("/api/v1/messages", {"type": "stream",
"to": "Verona",
"sender": "cordelia@zulip.com",
"client": "test suite",
"subject": "announcement",
"content": "Everyone knows Iago rules",
"forged": "true"},
**self.api_auth(email))
self.assert_json_success(result)
do_change_is_admin(user_profile, False, 'api_super_user')
result = self.client_post("/api/v1/messages", {"type": "stream",
"to": "Verona",
"sender": "cordelia@zulip.com",
"client": "test suite",
"subject": "announcement",
"content": "Everyone knows Iago rules",
"forged": "true"},
**self.api_auth(email))
self.assert_json_error(result, "User not authorized for this query")
@slow('checks all users')
def test_message_to_stream(self):
# type: () -> None
"""
If you send a message to a stream, everyone subscribed to the stream
receives the messages.
"""
self.assert_stream_message("Scotland")
@slow('checks all users')
def test_non_ascii_stream_message(self):
# type: () -> None
"""
Sending a stream message containing non-ASCII characters in the stream
name, subject, or message body succeeds.
"""
self.login("hamlet@zulip.com")
# Subscribe everyone to a stream with non-ASCII characters.
non_ascii_stream_name = u"hümbüǵ"
realm = get_realm_by_string_id("zulip")
stream = self.make_stream(non_ascii_stream_name)
for user_profile in UserProfile.objects.filter(realm=realm):
self.subscribe_to_stream(user_profile.email, stream.name)
self.assert_stream_message(non_ascii_stream_name, subject=u"hümbüǵ",
content=u"hümbüǵ")
class MessageDictTest(ZulipTestCase):
@slow('builds lots of messages')
def test_bulk_message_fetching(self):
# type: () -> None
sender = get_user_profile_by_email('othello@zulip.com')
receiver = get_user_profile_by_email('hamlet@zulip.com')
pm_recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
stream_name = u'Çiğdem'
stream = self.make_stream(stream_name)
stream_recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
sending_client = make_client(name="test suite")
for i in range(300):
for recipient in [pm_recipient, stream_recipient]:
message = Message(
sender=sender,
recipient=recipient,
subject='whatever',
content='whatever %d' % i,
pub_date=timezone.now(),
sending_client=sending_client,
last_edit_time=timezone.now(),
edit_history='[]'
)
message.save()
Reaction.objects.create(user_profile=sender, message=message,
emoji_name='simple_smile')
ids = [row['id'] for row in Message.objects.all().values('id')]
num_ids = len(ids)
self.assertTrue(num_ids >= 600)
t = time.time()
with queries_captured() as queries:
rows = list(Message.get_raw_db_rows(ids))
for row in rows:
MessageDict.build_dict_from_raw_db_row(row, False)
delay = time.time() - t
# Make sure we don't take longer than 1ms per message to extract messages.
self.assertTrue(delay < 0.001 * num_ids)
self.assert_max_length(queries, 7)
self.assertEqual(len(rows), num_ids)
def test_applying_markdown(self):
# type: () -> None
sender = get_user_profile_by_email('othello@zulip.com')
receiver = get_user_profile_by_email('hamlet@zulip.com')
recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
sending_client = make_client(name="test suite")
message = Message(
sender=sender,
recipient=recipient,
subject='whatever',
content='hello **world**',
pub_date=timezone.now(),
sending_client=sending_client,
last_edit_time=timezone.now(),
edit_history='[]'
)
message.save()
# An important part of this test is to get the message through this exact code path,
# because there is an ugly hack we need to cover. So don't just say "row = message".
row = Message.get_raw_db_rows([message.id])[0]
dct = MessageDict.build_dict_from_raw_db_row(row, apply_markdown=True)
expected_content = '<p>hello <strong>world</strong></p>'
self.assertEqual(dct['content'], expected_content)
message = Message.objects.get(id=message.id)
self.assertEqual(message.rendered_content, expected_content)
self.assertEqual(message.rendered_content_version, bugdown.version)
def test_reaction(self):
# type: () -> None
sender = get_user_profile_by_email('othello@zulip.com')
receiver = get_user_profile_by_email('hamlet@zulip.com')
recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
sending_client = make_client(name="test suite")
message = Message(
sender=sender,
recipient=recipient,
subject='whatever',
content='hello **world**',
pub_date=timezone.now(),
sending_client=sending_client,
last_edit_time=timezone.now(),
edit_history='[]'
)
message.save()
reaction = Reaction.objects.create(
message=message, user_profile=sender,
emoji_name='simple_smile')
row = Message.get_raw_db_rows([message.id])[0]
msg_dict = MessageDict.build_dict_from_raw_db_row(
row, apply_markdown=True)
self.assertEqual(msg_dict['reactions'][0]['emoji_name'],
reaction.emoji_name)
self.assertEqual(msg_dict['reactions'][0]['user']['id'],
sender.id)
self.assertEqual(msg_dict['reactions'][0]['user']['email'],
sender.email)
self.assertEqual(msg_dict['reactions'][0]['user']['full_name'],
sender.full_name)
class SewMessageAndReactionTest(ZulipTestCase):
def test_sew_messages_and_reaction(self):
# type: () -> None
sender = get_user_profile_by_email('othello@zulip.com')
receiver = get_user_profile_by_email('hamlet@zulip.com')
pm_recipient = Recipient.objects.get(type_id=receiver.id, type=Recipient.PERSONAL)
stream_name = u'Çiğdem'
stream = self.make_stream(stream_name)
stream_recipient = Recipient.objects.get(type_id=stream.id, type=Recipient.STREAM)
sending_client = make_client(name="test suite")
needed_ids = []
for i in range(5):
for recipient in [pm_recipient, stream_recipient]:
message = Message(
sender=sender,
recipient=recipient,
subject='whatever',
content='whatever %d' % i,
pub_date=timezone.now(),
sending_client=sending_client,
last_edit_time=timezone.now(),
edit_history='[]'
)
message.save()
needed_ids.append(message.id)
reaction = Reaction(user_profile=sender, message=message,
emoji_name='simple_smile')
reaction.save()
messages = Message.objects.filter(id__in=needed_ids).values(
*['id', 'content'])
reactions = Reaction.get_raw_db_rows(needed_ids)
tied_data = sew_messages_and_reactions(messages, reactions)
for data in tied_data:
self.assertEqual(len(data['reactions']), 1)
self.assertEqual(data['reactions'][0]['emoji_name'],
'simple_smile')
self.assertTrue(data['id'])
self.assertTrue(data['content'])
class MessagePOSTTest(ZulipTestCase):
def test_message_to_self(self):
# type: () -> None
"""
Sending a message to a stream to which you are subscribed is
successful.
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"subject": "Test subject"})
self.assert_json_success(result)
def test_api_message_to_self(self):
# type: () -> None
"""
Same as above, but for the API view
"""
email = "hamlet@zulip.com"
result = self.client_post("/api/v1/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"subject": "Test subject"},
**self.api_auth(email))
self.assert_json_success(result)
def test_api_message_with_default_to(self):
# type: () -> None
"""
Sending messages without a to field should be sent to the default
stream for the user_profile.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email(email)
user_profile.default_sending_stream = get_stream('Verona', user_profile.realm)
user_profile.save()
result = self.client_post("/api/v1/messages", {"type": "stream",
"client": "test suite",
"content": "Test message no to",
"subject": "Test subject"},
**self.api_auth(email))
self.assert_json_success(result)
sent_message = self.get_last_message()
self.assertEqual(sent_message.content, "Test message no to")
def test_message_to_nonexistent_stream(self):
# type: () -> None
"""
Sending a message to a nonexistent stream fails.
"""
self.login("hamlet@zulip.com")
self.assertFalse(Stream.objects.filter(name="nonexistent_stream"))
result = self.client_post("/json/messages", {"type": "stream",
"to": "nonexistent_stream",
"client": "test suite",
"content": "Test message",
"subject": "Test subject"})
self.assert_json_error(result, "Stream 'nonexistent_stream' does not exist")
def test_message_to_nonexistent_stream_with_bad_characters(self):
# type: () -> None
"""
Nonexistent stream name with bad characters should be escaped properly.
"""
self.login("hamlet@zulip.com")
self.assertFalse(Stream.objects.filter(name="""&<"'><non-existent>"""))
result = self.client_post("/json/messages", {"type": "stream",
"to": """&<"'><non-existent>""",
"client": "test suite",
"content": "Test message",
"subject": "Test subject"})
self.assert_json_error(result, "Stream '&<"'><non-existent>' does not exist")
def test_personal_message(self):
# type: () -> None
"""
Sending a personal message to a valid username is successful.
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": "othello@zulip.com"})
self.assert_json_success(result)
def test_personal_message_to_nonexistent_user(self):
# type: () -> None
"""
Sending a personal message to an invalid email returns error JSON.
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": "nonexistent"})
self.assert_json_error(result, "Invalid email 'nonexistent'")
def test_invalid_type(self):
# type: () -> None
"""
Sending a message of unknown type returns error JSON.
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/messages", {"type": "invalid type",
"content": "Test message",
"client": "test suite",
"to": "othello@zulip.com"})
self.assert_json_error(result, "Invalid message type")
def test_empty_message(self):
# type: () -> None
"""
Sending a message that is empty or only whitespace should fail
"""
self.login("hamlet@zulip.com")
result = self.client_post("/json/messages", {"type": "private",
"content": " ",
"client": "test suite",
"to": "othello@zulip.com"})
self.assert_json_error(result, "Message must not be empty")
def test_mirrored_huddle(self):
# type: () -> None
"""
Sending a mirrored huddle message works
"""
self.login("starnine@mit.edu")
result = self.client_post("/json/messages", {"type": "private",
"sender": "sipbtest@mit.edu",
"content": "Test message",
"client": "zephyr_mirror",
"to": ujson.dumps(["starnine@mit.edu",
"espuser@mit.edu"])})
self.assert_json_success(result)
def test_mirrored_personal(self):
# type: () -> None
"""
Sending a mirrored personal message works
"""
self.login("starnine@mit.edu")
result = self.client_post("/json/messages", {"type": "private",
"sender": "sipbtest@mit.edu",
"content": "Test message",
"client": "zephyr_mirror",
"to": "starnine@mit.edu"})
self.assert_json_success(result)
def test_duplicated_mirrored_huddle(self):
# type: () -> None
"""
Sending two mirrored huddles in the row return the same ID
"""
msg = {"type": "private",
"sender": "sipbtest@mit.edu",
"content": "Test message",
"client": "zephyr_mirror",
"to": ujson.dumps(["espuser@mit.edu",
"starnine@mit.edu"])}
with mock.patch('DNS.dnslookup', return_value=[['starnine:*:84233:101:Athena Consulting Exchange User,,,:/mit/starnine:/bin/bash']]):
self.login("starnine@mit.edu")
result1 = self.client_post("/json/messages", msg)
with mock.patch('DNS.dnslookup', return_value=[['espuser:*:95494:101:Esp Classroom,,,:/mit/espuser:/bin/athena/bash']]):
self.login("espuser@mit.edu")
result2 = self.client_post("/json/messages", msg)
self.assertEqual(ujson.loads(result1.content)['id'],
ujson.loads(result2.content)['id'])
def test_long_message(self):
# type: () -> None
"""
Sending a message longer than the maximum message length succeeds but is
truncated.
"""
self.login("hamlet@zulip.com")
long_message = "A" * (MAX_MESSAGE_LENGTH + 1)
post_data = {"type": "stream", "to": "Verona", "client": "test suite",
"content": long_message, "subject": "Test subject"}
result = self.client_post("/json/messages", post_data)
self.assert_json_success(result)
sent_message = self.get_last_message()
self.assertEqual(sent_message.content,
"A" * (MAX_MESSAGE_LENGTH - 3) + "...")
def test_long_topic(self):
# type: () -> None
"""
Sending a message with a topic longer than the maximum topic length
succeeds, but the topic is truncated.
"""
self.login("hamlet@zulip.com")
long_topic = "A" * (MAX_SUBJECT_LENGTH + 1)
post_data = {"type": "stream", "to": "Verona", "client": "test suite",
"content": "test content", "subject": long_topic}
result = self.client_post("/json/messages", post_data)
self.assert_json_success(result)
sent_message = self.get_last_message()
self.assertEqual(sent_message.topic_name(),
"A" * (MAX_SUBJECT_LENGTH - 3) + "...")
def test_send_forged_message_as_not_superuser(self):
# type: () -> None
self.login("hamlet@zulip.com")
result = self.client_post("/json/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"subject": "Test subject",
"forged": True})
self.assert_json_error(result, "User not authorized for this query")
def test_send_message_as_not_superuser_to_different_domain(self):
# type: () -> None
self.login("hamlet@zulip.com")
result = self.client_post("/json/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"subject": "Test subject",
"domain": "mit.edu"})
self.assert_json_error(result, "User not authorized for this query")
def test_send_message_as_superuser_to_domain_that_dont_exist(self):
# type: () -> None
email = "emailgateway@zulip.com"
user = get_user_profile_by_email(email)
password = "test_password"
user.set_password(password)
user.is_api_super_user = True
user.save()
self.login(email, password)
result = self.client_post("/json/messages", {"type": "stream",
"to": "Verona",
"client": "test suite",
"content": "Test message",
"subject": "Test subject",
"domain": "non-existing"})
user.is_api_super_user = False
user.save()
self.assert_json_error(result, "Unknown domain non-existing")
def test_send_message_when_sender_is_not_set(self):
# type: () -> None
self.login("starnine@mit.edu")
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "zephyr_mirror",
"to": "starnine@mit.edu"})
self.assert_json_error(result, "Missing sender")
def test_send_message_as_not_superuser_when_type_is_not_private(self):
# type: () -> None
self.login("starnine@mit.edu")
result = self.client_post("/json/messages", {"type": "not-private",
"sender": "sipbtest@mit.edu",
"content": "Test message",
"client": "zephyr_mirror",
"to": "starnine@mit.edu"})
self.assert_json_error(result, "User not authorized for this query")
@mock.patch("zerver.views.messages.create_mirrored_message_users")
def test_send_message_create_mirrored_message_user_returns_invalid_input(self, create_mirrored_message_users_mock):
# type: (Any) -> None
create_mirrored_message_users_mock.return_value = (False, True)
self.login("starnine@mit.edu")
result = self.client_post("/json/messages", {"type": "private",
"sender": "sipbtest@mit.edu",
"content": "Test message",
"client": "zephyr_mirror",
"to": "starnine@mit.edu"})
self.assert_json_error(result, "Invalid mirrored message")
@mock.patch("zerver.views.messages.create_mirrored_message_users")
def test_send_message_when_client_is_zephyr_mirror_but_domain_is_not_mit_edu(self, create_mirrored_message_users_mock):
# type: (Any) -> None
create_mirrored_message_users_mock.return_value = (True, True)
email = "starnine@mit.edu"
user = get_user_profile_by_email(email)
domain = user.realm.domain
user.realm.domain = 'not_mit.edu'
user.realm.save()
self.login("starnine@mit.edu")
result = self.client_post("/json/messages", {"type": "private",
"sender": "sipbtest@mit.edu",
"content": "Test message",
"client": "zephyr_mirror",
"to": "starnine@mit.edu"}, name='gownooo')
self.assert_json_error(result, "Invalid mirrored realm")
user.realm.domain = domain
user.realm.save()
class EditMessageTest(ZulipTestCase):
def check_message(self, msg_id, subject=None, content=None):
# type: (int, Optional[Text], Optional[Text]) -> Message
msg = Message.objects.get(id=msg_id)
cached = message_to_dict(msg, False)
uncached = MessageDict.to_dict_uncached_helper(msg, False)
self.assertEqual(cached, uncached)
if subject:
self.assertEqual(msg.topic_name(), subject)
if content:
self.assertEqual(msg.content, content)
return msg
def test_save_message(self):
# type: () -> None
"""This is also tested by a client test, but here we can verify
the cache against the database"""
self.login("hamlet@zulip.com")
msg_id = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="editing", content="before edit")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'content': 'after edit'
})
self.assert_json_success(result)
self.check_message(msg_id, content="after edit")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'subject': 'edited'
})
self.assert_json_success(result)
self.check_message(msg_id, subject="edited")
def test_fetch_raw_message(self):
# type: () -> None
self.login("hamlet@zulip.com")
msg_id = self.send_message("hamlet@zulip.com", "cordelia@zulip.com", Recipient.PERSONAL,
subject="editing", content="**before** edit")
result = self.client_get('/json/messages/' + str(msg_id))
self.assert_json_success(result)
data = ujson.loads(result.content)
self.assertEqual(data['raw_content'], '**before** edit')
# Test error cases
result = self.client_get('/json/messages/999999')
self.assert_json_error(result, 'Invalid message(s)')
self.login("cordelia@zulip.com")
result = self.client_get('/json/messages/' + str(msg_id))
self.assert_json_success(result)
self.login("othello@zulip.com")
result = self.client_get('/json/messages/' + str(msg_id))
self.assert_json_error(result, 'Invalid message(s)')
def test_fetch_raw_message_stream_wrong_realm(self):
# type: () -> None
email = "hamlet@zulip.com"
self.login(email)
stream = self.make_stream('public_stream')
self.subscribe_to_stream(email, stream.name)
msg_id = self.send_message(email, stream.name, Recipient.STREAM,
subject="test", content="test")
result = self.client_get('/json/messages/' + str(msg_id))
self.assert_json_success(result)
self.login("sipbtest@mit.edu")
result = self.client_get('/json/messages/' + str(msg_id))
self.assert_json_error(result, 'Invalid message(s)')
def test_fetch_raw_message_private_stream(self):
# type: () -> None
email = "hamlet@zulip.com"
self.login(email)
stream = self.make_stream('private_stream', invite_only=True)
self.subscribe_to_stream(email, stream.name)
msg_id = self.send_message(email, stream.name, Recipient.STREAM,
subject="test", content="test")
result = self.client_get('/json/messages/' + str(msg_id))
self.assert_json_success(result)
self.login("othello@zulip.com")
result = self.client_get('/json/messages/' + str(msg_id))
self.assert_json_error(result, 'Invalid message(s)')
def test_edit_message_no_changes(self):
# type: () -> None
self.login("hamlet@zulip.com")
msg_id = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="editing", content="before edit")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
})
self.assert_json_error(result, "Nothing to change")
def test_edit_message_no_topic(self):
# type: () -> None
self.login("hamlet@zulip.com")
msg_id = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="editing", content="before edit")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'subject': ' '
})
self.assert_json_error(result, "Topic can't be empty")
def test_edit_message_no_content(self):
# type: () -> None
self.login("hamlet@zulip.com")
msg_id = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="editing", content="before edit")
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id,
'content': ' '
})
self.assert_json_success(result)
content = Message.objects.filter(id=msg_id).values_list('content', flat = True)[0]
self.assertEqual(content, "(deleted)")
def test_edit_message_content_limit(self):
# type: () -> None
def set_message_editing_params(allow_message_editing,
message_content_edit_limit_seconds):
# type: (bool, int) -> None
result = self.client_patch("/json/realm", {
'allow_message_editing': ujson.dumps(allow_message_editing),
'message_content_edit_limit_seconds': message_content_edit_limit_seconds
})
self.assert_json_success(result)
def do_edit_message_assert_success(id_, unique_str, topic_only = False):
# type: (int, Text, bool) -> None
new_subject = 'subject' + unique_str
new_content = 'content' + unique_str
params_dict = {'message_id': id_, 'subject': new_subject}
if not topic_only:
params_dict['content'] = new_content
result = self.client_patch("/json/messages/" + str(id_), params_dict)
self.assert_json_success(result)
if topic_only:
self.check_message(id_, subject=new_subject)
else:
self.check_message(id_, subject=new_subject, content=new_content)
def do_edit_message_assert_error(id_, unique_str, error, topic_only = False):
# type: (int, Text, Text, bool) -> None
message = Message.objects.get(id=id_)
old_subject = message.topic_name()
old_content = message.content
new_subject = 'subject' + unique_str
new_content = 'content' + unique_str
params_dict = {'message_id': id_, 'subject': new_subject}
if not topic_only:
params_dict['content'] = new_content
result = self.client_patch("/json/messages/" + str(id_), params_dict)
message = Message.objects.get(id=id_)
self.assert_json_error(result, error)
self.check_message(id_, subject=old_subject, content=old_content)
self.login("iago@zulip.com")
# send a message in the past
id_ = self.send_message("iago@zulip.com", "Scotland", Recipient.STREAM,
content="content", subject="subject")
message = Message.objects.get(id=id_)
message.pub_date = message.pub_date - datetime.timedelta(seconds=180)
message.save()
# test the various possible message editing settings
# high enough time limit, all edits allowed
set_message_editing_params(True, 240)
do_edit_message_assert_success(id_, 'A')
# out of time, only topic editing allowed
set_message_editing_params(True, 120)
do_edit_message_assert_success(id_, 'B', True)
do_edit_message_assert_error(id_, 'C', "The time limit for editing this message has past")
# infinite time, all edits allowed
set_message_editing_params(True, 0)
do_edit_message_assert_success(id_, 'D')
# without allow_message_editing, nothing is allowed
set_message_editing_params(False, 240)
do_edit_message_assert_error(id_, 'E', "Your organization has turned off message editing.", True)
set_message_editing_params(False, 120)
do_edit_message_assert_error(id_, 'F', "Your organization has turned off message editing.", True)
set_message_editing_params(False, 0)
do_edit_message_assert_error(id_, 'G', "Your organization has turned off message editing.", True)
def test_propagate_topic_forward(self):
# type: () -> None
self.login("hamlet@zulip.com")
id1 = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
id2 = self.send_message("iago@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
id3 = self.send_message("iago@zulip.com", "Rome", Recipient.STREAM,
subject="topic1")
id4 = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="topic2")
id5 = self.send_message("iago@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
result = self.client_patch("/json/messages/" + str(id1), {
'message_id': id1,
'subject': 'edited',
'propagate_mode': 'change_later'
})
self.assert_json_success(result)
self.check_message(id1, subject="edited")
self.check_message(id2, subject="edited")
self.check_message(id3, subject="topic1")
self.check_message(id4, subject="topic2")
self.check_message(id5, subject="edited")
def test_propagate_all_topics(self):
# type: () -> None
self.login("hamlet@zulip.com")
id1 = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
id2 = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
id3 = self.send_message("iago@zulip.com", "Rome", Recipient.STREAM,
subject="topic1")
id4 = self.send_message("hamlet@zulip.com", "Scotland", Recipient.STREAM,
subject="topic2")
id5 = self.send_message("iago@zulip.com", "Scotland", Recipient.STREAM,
subject="topic1")
id6 = self.send_message("iago@zulip.com", "Scotland", Recipient.STREAM,
subject="topic3")
result = self.client_patch("/json/messages/" + str(id2), {
'message_id': id2,
'subject': 'edited',
'propagate_mode': 'change_all'
})
self.assert_json_success(result)
self.check_message(id1, subject="edited")
self.check_message(id2, subject="edited")
self.check_message(id3, subject="topic1")
self.check_message(id4, subject="topic2")
self.check_message(id5, subject="edited")
self.check_message(id6, subject="topic3")
class MirroredMessageUsersTest(TestCase):
class Request(object):
pass
def test_invalid_sender(self):
# type: () -> None
user = get_user_profile_by_email('hamlet@zulip.com')
recipients = [] # type: List[Text]
request = self.Request()
request.POST = dict() # no sender
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user, recipients)
self.assertEqual(valid_input, False)
self.assertEqual(mirror_sender, None)
def test_invalid_client(self):
# type: () -> None
client = get_client(name='banned_mirror') # Invalid!!!
user = get_user_profile_by_email('hamlet@zulip.com')
sender = user
recipients = [] # type: List[Text]
request = self.Request()
request.POST = dict(
sender=sender.email,
type='private')
request.client = client
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user, recipients)
self.assertEqual(valid_input, False)
self.assertEqual(mirror_sender, None)
def test_invalid_email(self):
# type: () -> None
invalid_email = 'alice AT example.com'
recipients = [invalid_email]
# We use an MIT user here to maximize code coverage
user = get_user_profile_by_email('starnine@mit.edu')
sender = user
for client_name in ['zephyr_mirror', 'irc_mirror', 'jabber_mirror']:
client = get_client(name=client_name)
request = self.Request()
request.POST = dict(
sender=sender.email,
type='private')
request.client = client
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user, recipients)
self.assertEqual(valid_input, False)
self.assertEqual(mirror_sender, None)
@mock.patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_zephyr_mirror_new_recipient(self, ignored):
# type: (Any) -> None
"""Test mirror dummy user creation for PM recipients"""
client = get_client(name='zephyr_mirror')
user = get_user_profile_by_email('starnine@mit.edu')
sender = get_user_profile_by_email('sipbtest@mit.edu')
new_user_email = 'bob_the_new_user@mit.edu'
recipients = [user.email, new_user_email]
# Now make the request.
request = self.Request()
request.POST = dict(
sender=sender.email,
type='private')
request.client = client
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user, recipients)
self.assertTrue(valid_input)
self.assertEqual(mirror_sender, sender)
realm_users = UserProfile.objects.filter(realm=sender.realm)
realm_emails = {user.email for user in realm_users}
self.assertIn(user.email, realm_emails)
self.assertIn(new_user_email, realm_emails)
bob = get_user_profile_by_email(new_user_email)
self.assertTrue(bob.is_mirror_dummy)
@mock.patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_zephyr_mirror_new_sender(self, ignored):
# type: (Any) -> None
"""Test mirror dummy user creation for sender when sending to stream"""
client = get_client(name='zephyr_mirror')
user = get_user_profile_by_email('starnine@mit.edu')
sender_email = 'new_sender@mit.edu'
recipients = ['stream_name']
# Now make the request.
request = self.Request()
request.POST = dict(
sender=sender_email,
type='stream')
request.client = client
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user, recipients)
self.assertTrue(valid_input)
self.assertEqual(mirror_sender.email, sender_email)
self.assertTrue(mirror_sender.is_mirror_dummy)
def test_irc_mirror(self):
# type: () -> None
client = get_client(name='irc_mirror')
sender = get_user_profile_by_email('hamlet@zulip.com')
user = sender
recipients = ['alice@zulip.com', 'bob@irc.zulip.com', 'cordelia@zulip.com']
# Now make the request.
request = self.Request()
request.POST = dict(
sender=sender.email,
type='private')
request.client = client
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user, recipients)
self.assertEqual(valid_input, True)
self.assertEqual(mirror_sender, sender)
realm_users = UserProfile.objects.filter(realm=sender.realm)
realm_emails = {user.email for user in realm_users}
self.assertIn('alice@zulip.com', realm_emails)
self.assertIn('bob@irc.zulip.com', realm_emails)
bob = get_user_profile_by_email('bob@irc.zulip.com')
self.assertTrue(bob.is_mirror_dummy)
def test_jabber_mirror(self):
# type: () -> None
client = get_client(name='jabber_mirror')
sender = get_user_profile_by_email('hamlet@zulip.com')
user = sender
recipients = ['alice@zulip.com', 'bob@zulip.com', 'cordelia@zulip.com']
# Now make the request.
request = self.Request()
request.POST = dict(
sender=sender.email,
type='private')
request.client = client
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user, recipients)
self.assertEqual(valid_input, True)
self.assertEqual(mirror_sender, sender)
realm_users = UserProfile.objects.filter(realm=sender.realm)
realm_emails = {user.email for user in realm_users}
self.assertIn('alice@zulip.com', realm_emails)
self.assertIn('bob@zulip.com', realm_emails)
bob = get_user_profile_by_email('bob@zulip.com')
self.assertTrue(bob.is_mirror_dummy)
class StarTests(ZulipTestCase):
def change_star(self, messages, add=True):
# type: (List[int], bool) -> HttpResponse
return self.client_post("/json/messages/flags",
{"messages": ujson.dumps(messages),
"op": "add" if add else "remove",
"flag": "starred"})
def test_change_star(self):
# type: () -> None
"""
You can set a message as starred/un-starred through
POST /json/messages/flags.
"""
self.login("hamlet@zulip.com")
message_ids = [self.send_message("hamlet@zulip.com", "hamlet@zulip.com",
Recipient.PERSONAL, "test")]
# Star a message.
result = self.change_star(message_ids)
self.assert_json_success(result)
for msg in self.get_old_messages():
if msg['id'] in message_ids:
self.assertEqual(msg['flags'], ['starred'])
else:
self.assertEqual(msg['flags'], ['read'])
result = self.change_star(message_ids, False)
self.assert_json_success(result)
# Remove the stars.
for msg in self.get_old_messages():
if msg['id'] in message_ids:
self.assertEqual(msg['flags'], [])
def test_change_star_public_stream_historical(self):
# type: () -> None
"""
You can set a message as starred/un-starred through
POST /json/messages/flags.
"""
stream_name = "new_stream"
self.subscribe_to_stream("hamlet@zulip.com", stream_name)
self.login("hamlet@zulip.com")
message_ids = [self.send_message("hamlet@zulip.com", stream_name,
Recipient.STREAM, "test")]
# Now login as another user who wasn't on that stream
self.login("cordelia@zulip.com")
# We can't change flags other than "starred" on historical messages:
result = self.client_post("/json/messages/flags",
{"messages": ujson.dumps(message_ids),
"op": "add",
"flag": "read"})
self.assert_json_error(result, 'Invalid message(s)')
# Trying to change a list of more than one historical message fails
result = self.change_star(message_ids * 2)
self.assert_json_error(result, 'Invalid message(s)')
# Confirm that one can change the historical flag now
result = self.change_star(message_ids)
self.assert_json_success(result)
for msg in self.get_old_messages():
if msg['id'] in message_ids:
self.assertEqual(set(msg['flags']), {'starred', 'historical', 'read'})
else:
self.assertEqual(msg['flags'], ['read'])
result = self.change_star(message_ids, False)
self.assert_json_success(result)
# But it still doesn't work if you're in another realm
self.login("sipbtest@mit.edu")
result = self.change_star(message_ids)
self.assert_json_error(result, 'Invalid message(s)')
def test_change_star_private_message_security(self):
# type: () -> None
"""
You can set a message as starred/un-starred through
POST /json/messages/flags.
"""
self.login("hamlet@zulip.com")
message_ids = [self.send_message("hamlet@zulip.com", "hamlet@zulip.com",
Recipient.PERSONAL, "test")]
# Starring private messages you didn't receive fails.
self.login("cordelia@zulip.com")
result = self.change_star(message_ids)
self.assert_json_error(result, 'Invalid message(s)')
def test_change_star_private_stream_security(self):
# type: () -> None
stream_name = "private_stream"
self.make_stream(stream_name, invite_only=True)
self.subscribe_to_stream("hamlet@zulip.com", stream_name)
self.login("hamlet@zulip.com")
message_ids = [self.send_message("hamlet@zulip.com", stream_name,
Recipient.STREAM, "test")]
# Starring private stream messages you received works
result = self.change_star(message_ids)
self.assert_json_success(result)
# Starring private stream messages you didn't receive fails.
self.login("cordelia@zulip.com")
result = self.change_star(message_ids)
self.assert_json_error(result, 'Invalid message(s)')
def test_new_message(self):
# type: () -> None
"""
New messages aren't starred.
"""
test_email = "hamlet@zulip.com"
self.login(test_email)
content = "Test message for star"
self.send_message(test_email, "Verona", Recipient.STREAM,
content=content)
sent_message = UserMessage.objects.filter(
user_profile=get_user_profile_by_email(test_email)
).order_by("id").reverse()[0]
self.assertEqual(sent_message.message.content, content)
self.assertFalse(sent_message.flags.starred)
class AttachmentTest(ZulipTestCase):
def test_basics(self):
# type: () -> None
self.assertFalse(Message.content_has_attachment('whatever'))
self.assertFalse(Message.content_has_attachment('yo http://foo.com'))
self.assertTrue(Message.content_has_attachment('yo\n https://staging.zulip.com/user_uploads/'))
self.assertTrue(Message.content_has_attachment('yo\n /user_uploads/1/wEAnI-PEmVmCjo15xxNaQbnj/photo-10.jpg foo'))
self.assertFalse(Message.content_has_image('whatever'))
self.assertFalse(Message.content_has_image('yo http://foo.com'))
self.assertFalse(Message.content_has_image('yo\n /user_uploads/1/wEAnI-PEmVmCjo15xxNaQbnj/photo-10.pdf foo'))
for ext in [".bmp", ".gif", ".jpg", "jpeg", ".png", ".webp", ".JPG"]:
content = 'yo\n /user_uploads/1/wEAnI-PEmVmCjo15xxNaQbnj/photo-10.%s foo' % (ext,)
self.assertTrue(Message.content_has_image(content))
self.assertFalse(Message.content_has_link('whatever'))
self.assertTrue(Message.content_has_link('yo\n http://foo.com'))
self.assertTrue(Message.content_has_link('yo\n https://example.com?spam=1&eggs=2'))
self.assertTrue(Message.content_has_link('yo /user_uploads/1/wEAnI-PEmVmCjo15xxNaQbnj/photo-10.pdf foo'))
def test_claim_attachment(self):
# type: () -> None
# Create dummy DB entry
sender_email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email(sender_email)
dummy_files = [
('zulip.txt', '1/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt'),
('temp_file.py', '1/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py'),
('abc.py', '1/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py')
]
for file_name, path_id in dummy_files:
create_attachment(file_name, path_id, user_profile)
# Send message referring the attachment
self.subscribe_to_stream(sender_email, "Denmark")
body = "Some files here ...[zulip.txt](http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt)" + \
"http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py.... Some more...." + \
"http://localhost:9991/user_uploads/1/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py"
self.send_message(sender_email, "Denmark", Recipient.STREAM, body, "test")
for file_name, path_id in dummy_files:
attachment = Attachment.objects.get(path_id=path_id)
self.assertTrue(attachment.is_claimed())
class LogDictTest(ZulipTestCase):
def test_to_log_dict(self):
# type: () -> None
email = 'hamlet@zulip.com'
stream_name = 'Denmark'
topic_name = 'Copenhagen'
content = 'find me some good coffee shops'
# self.login("hamlet@zulip.com")
message_id = self.send_message(email, stream_name,
message_type=Recipient.STREAM,
subject=topic_name,
content=content)
message = Message.objects.get(id=message_id)
dct = message.to_log_dict()
self.assertTrue('timestamp' in dct)
self.assertEqual(dct['content'], 'find me some good coffee shops')
self.assertEqual(dct['id'], message.id)
self.assertEqual(dct['recipient'], 'Denmark')
self.assertEqual(dct['sender_domain'], 'zulip.com')
self.assertEqual(dct['sender_email'], 'hamlet@zulip.com')
self.assertEqual(dct['sender_full_name'], 'King Hamlet')
self.assertEqual(dct['sender_id'], get_user_profile_by_email(email).id)
self.assertEqual(dct['sender_short_name'], 'hamlet')
self.assertEqual(dct['sending_client'], 'test suite')
self.assertEqual(dct['subject'], 'Copenhagen')
self.assertEqual(dct['type'], 'stream')
class CheckMessageTest(ZulipTestCase):
def test_basic_check_message_call(self):
# type: () -> None
sender = get_user_profile_by_email('othello@zulip.com')
client = make_client(name="test suite")
stream_name = u'España y Francia'
self.make_stream(stream_name)
message_type_name = 'stream'
message_to = None
message_to = [stream_name]
subject_name = 'issue'
message_content = 'whatever'
ret = check_message(sender, client, message_type_name, message_to,
subject_name, message_content)
self.assertEqual(ret['message'].sender.email, 'othello@zulip.com')
def test_bot_pm_feature(self):
# type: () -> None
"""We send a PM to a bot's owner if their bot sends a message to
an unsubscribed stream"""
parent = get_user_profile_by_email('othello@zulip.com')
bot = do_create_user(
email='othello-bot@zulip.com',
password='',
realm=parent.realm,
full_name='',
short_name='',
active=True,
bot_type=UserProfile.DEFAULT_BOT,
bot_owner=parent
)
bot.last_reminder = None
sender = bot
client = make_client(name="test suite")
stream_name = u'Россия'
message_type_name = 'stream'
message_to = None
message_to = [stream_name]
subject_name = 'issue'
message_content = 'whatever'
old_count = message_stream_count(parent)
# Try sending to stream that doesn't exist sends a reminder to
# the sender
with self.assertRaises(JsonableError):
check_message(sender, client, message_type_name, message_to,
subject_name, message_content)
new_count = message_stream_count(parent)
self.assertEqual(new_count, old_count + 1)
self.assertIn("that stream does not yet exist.", most_recent_message(parent).content)
# Try sending to stream that exists with no subscribers soon
# after; due to rate-limiting, this should send nothing.
self.make_stream(stream_name)
ret = check_message(sender, client, message_type_name, message_to,
subject_name, message_content)
new_count = message_stream_count(parent)
self.assertEqual(new_count, old_count + 1)
# Try sending to stream that exists with no subscribers longer
# after; this should send an error to the bot owner that the
# stream doesn't exist
sender.last_reminder = sender.last_reminder - datetime.timedelta(hours=1)
sender.save(update_fields=["last_reminder"])
ret = check_message(sender, client, message_type_name, message_to,
subject_name, message_content)
new_count = message_stream_count(parent)
self.assertEqual(new_count, old_count + 2)
self.assertEqual(ret['message'].sender.email, 'othello-bot@zulip.com')
self.assertIn("there are no subscribers to that stream", most_recent_message(parent).content)
|
Juanvulcano/zulip
|
zerver/tests/test_messages.py
|
Python
|
apache-2.0
| 72,787 | 0.001924 |
#
# Copyright (c) 2009--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import sys
from optparse import Option, OptionParser
from spacewalk.common import rhnTB
from spacewalk.common.rhnConfig import CFG, initCFG
from spacewalk.server import rhnSQL
import satCerts
DEFAULT_TRUSTED_CERT = 'RHN-ORG-TRUSTED-SSL-CERT'
def fetchTraceback(method=None, req=None, extra=None):
""" a cheat for snagging just the string value of a Traceback
NOTE: this tool may be needed for RHN Satellite 3.2 as well,
which doesn't have a fetchTraceback. So... this if for
compatibility.
"""
from cStringIO import StringIO
exc = StringIO()
rhnTB.Traceback(method=method, req=req, mail=0, ostream=exc,
extra=extra, severity=None)
return exc.getvalue()
def processCommandline():
initCFG('server.satellite')
options = [
Option('--ca-cert', action='store', default=DEFAULT_TRUSTED_CERT, type="string", help='public CA certificate, default is %s' % DEFAULT_TRUSTED_CERT),
Option('--label', action='store', default='RHN-ORG-TRUSTED-SSL-CERT', type="string", help='FOR TESTING ONLY - alternative database label for this CA certificate, default is "RHN-ORG-TRUSTED-SSL-CERT"'),
Option('-v','--verbose', action='count', help='be verbose (accumulable: -vvv means "be *really* verbose").'),
]
values, args = OptionParser(option_list=options).parse_args()
# we take no extra commandline arguments that are not linked to an option
if args:
msg = ("ERROR: these arguments make no sense in this context (try "
"--help): %s\n" % repr(args))
raise ValueError(msg)
if not os.path.exists(values.ca_cert):
sys.stderr.write("ERROR: can't find CA certificate at this location: "
"%s\n" % values.ca_cert)
sys.exit(10)
try:
db_backend = CFG.DB_BACKEND
db_host = CFG.DB_HOST
db_port = CFG.DB_PORT
db_user = CFG.DB_user
db_password = CFG.DB_PASSWORD
db_name = CFG.DB_NAME
rhnSQL.initDB(backend=db_backend, host=db_host, port=db_port,
username=db_user, password=db_password, database=db_name)
except:
sys.stderr.write("""\
ERROR: there was a problem trying to initialize the database:
%s\n""" % fetchTraceback())
sys.exit(11)
if values.verbose:
print 'Public CA SSL certificate: %s' % values.ca_cert
return values
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def main():
""" main routine
10 CA certificate not found
11 DB initialization failure
12 no Organization ID. Something very bad is going on.
13 Couldn't insert the certificate for whatever reason.
"""
values = processCommandline()
def writeError(e):
sys.stderr.write('\nERROR: %s\n' % e)
try:
satCerts.store_rhnCryptoKey(values.label, values.ca_cert, verbosity=values.verbose)
except satCerts.NoOrgIdError, e:
writeError("no organization ID!?!\n\n%s\n" % fetchTraceback())
sys.exit(12)
except satCerts.CaCertInsertionError, e:
writeError("no organization ID!?!\n\n%s\n" % fetchTraceback())
sys.exit(13)
return 0
#-------------------------------------------------------------------------------
if __name__ == "__main__":
sys.stderr.write('\nWARNING: intended to be wrapped by another executable\n'
' calling program.\n')
sys.exit(main() or 0)
#===============================================================================
|
colloquium/spacewalk
|
backend/satellite_tools/rhn_ssl_dbstore.py
|
Python
|
gpl-2.0
| 4,252 | 0.005174 |
# -*- coding: utf-8 -*-
# +---------------------------------------------------------------------------+
# | 01001110 01100101 01110100 01111010 01101111 01100010 |
# | |
# | Netzob : Inferring communication protocols |
# +---------------------------------------------------------------------------+
# | Copyright (C) 2011-2014 Georges Bossert and Frédéric Guihéry |
# | This program is free software: you can redistribute it and/or modify |
# | it under the terms of the GNU General Public License as published by |
# | the Free Software Foundation, either version 3 of the License, or |
# | (at your option) any later version. |
# | |
# | This program is distributed in the hope that it will be useful, |
# | but WITHOUT ANY WARRANTY; without even the implied warranty of |
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
# | GNU General Public License for more details. |
# | |
# | You should have received a copy of the GNU General Public License |
# | along with this program. If not, see <http://www.gnu.org/licenses/>. |
# +---------------------------------------------------------------------------+
# | @url : http://www.netzob.org |
# | @contact : contact@netzob.org |
# | @sponsors : Amossys, http://www.amossys.fr |
# | Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
# +---------------------------------------------------------------------------+
# +---------------------------------------------------------------------------+
# | File contributors : |
# | - Georges Bossert <georges.bossert (a) supelec.fr> |
# | - Frédéric Guihéry <frederic.guihery (a) amossys.fr> |
# +---------------------------------------------------------------------------+
# +---------------------------------------------------------------------------+
# | Standard library imports |
# +---------------------------------------------------------------------------+
import random
import os
from bitarray import bitarray
# +---------------------------------------------------------------------------+
# | Related third party imports |
# +---------------------------------------------------------------------------+
# +---------------------------------------------------------------------------+
# | Local application imports |
# +---------------------------------------------------------------------------+
from netzob.Common.Models.Types.AbstractType import AbstractType
class Raw(AbstractType):
"""Raw netzob data type expressed in bytes.
For instance, we can use this type to parse any raw field of 2 bytes:
>>> from netzob.all import *
>>> f = Field(Raw(nbBytes=2))
or with a specific value (default is little endianness)
>>> f = Field(Raw('\x01\x02\x03'))
>>> print f.domain.dataType
Raw='\\x01\\x02\\x03' ((0, 24))
>>> f.domain.dataType.endianness = AbstractType.ENDIAN_BIG
>>> print f.domain.dataType
Raw='\\x01\\x02\\x03' ((0, 24))
"""
def __init__(self, value=None, nbBytes=None, unitSize=AbstractType.defaultUnitSize(), endianness=AbstractType.defaultEndianness(), sign=AbstractType.defaultSign()):
if value is not None and not isinstance(value, bitarray):
from netzob.Common.Models.Types.TypeConverter import TypeConverter
from netzob.Common.Models.Types.BitArray import BitArray
value = TypeConverter.convert(value, Raw, BitArray)
nbBits = self._convertNbBytesinNbBits(nbBytes)
super(Raw, self).__init__(self.__class__.__name__, value, nbBits, unitSize=unitSize, endianness=endianness, sign=sign)
def __str__(self):
if self.value is not None:
from netzob.Common.Models.Types.TypeConverter import TypeConverter
from netzob.Common.Models.Types.BitArray import BitArray
from netzob.Common.Models.Types.HexaString import HexaString
return "{0}={1} ({2})".format(self.typeName, repr(TypeConverter.convert(self.value, BitArray, Raw)), self.size)
else:
return "{0}={1} ({2})".format(self.typeName, self.value, self.size)
def __repr__(self):
if self.value is not None:
from netzob.Common.Models.Types.TypeConverter import TypeConverter
from netzob.Common.Models.Types.BitArray import BitArray
return str(TypeConverter.convert(self.value, BitArray, self.__class__))
else:
return str(self.value)
def _convertNbBytesinNbBits(self, nbBytes):
nbMinBit = None
nbMaxBit = None
if nbBytes is not None:
if isinstance(nbBytes, int):
nbMinBit = nbBytes * 8
nbMaxBit = nbMinBit
else:
if nbBytes[0] is not None:
nbMinBit = nbBytes[0] * 8
if nbBytes[1] is not None:
nbMaxBit = nbBytes[1] * 8
return (nbMinBit, nbMaxBit)
def generate(self, generationStrategy=None):
"""Generates a random Raw that respects the requested size.
>>> from netzob.all import *
>>> a = Raw(nbBytes=(10))
>>> gen = a.generate()
>>> print len(gen)
80
>>> from netzob.all import *
>>> a = Raw(nbBytes=(10, 20))
>>> gen = a.generate()
>>> print 10<=len(gen) and 20<=len(gen)
True
"""
from netzob.Common.Models.Types.TypeConverter import TypeConverter
from netzob.Common.Models.Types.BitArray import BitArray
minSize, maxSize = self.size
if maxSize is None:
maxSize = AbstractType.MAXIMUM_GENERATED_DATA_SIZE
if minSize is None:
minSize = 0
generatedSize = random.randint(minSize, maxSize)
return TypeConverter.convert(os.urandom(generatedSize / 8), Raw, BitArray)
@staticmethod
def decode(data, unitSize=AbstractType.defaultUnitSize(), endianness=AbstractType.defaultEndianness(), sign=AbstractType.defaultSign()):
return data
@staticmethod
def encode(data, unitSize=AbstractType.defaultUnitSize(), endianness=AbstractType.defaultEndianness(), sign=AbstractType.defaultSign()):
return data
@staticmethod
def canParse(data):
"""Computes if specified data can be parsed as raw which is always the case if the data is at least 1 length and aligned on a byte.
>>> from netzob.all import *
>>> Raw.canParse(TypeConverter.convert("hello netzob", ASCII, BitArray))
True
The ascii table is defined from 0 to 127:
>>> Raw.canParse(TypeConverter.convert(128, Decimal, BitArray, src_sign=AbstractType.SIGN_UNSIGNED))
True
:param data: the data to check
:type data: python raw
:return: True if data can be parsed as a Raw which is always the case (if len(data)>0)
:rtype: bool
:raise: TypeError if the data is None
"""
if data is None:
raise TypeError("data cannot be None")
if len(data) == 0:
return False
if len(data) % 8 != 0:
return False
return True
|
dasbruns/netzob
|
src/netzob/Common/Models/Types/Raw.py
|
Python
|
gpl-3.0
| 7,876 | 0.001398 |
import unittest
import maya.cmds as mc
import vrayformayaUtils as vfm
class TestMeshAttributes(unittest.TestCase):
"""
This is a generic TestCase for most v-ray mesh attributes.
Note that it doesn't test every single case of changes, but it should capture overall changes of the code.
"""
def setUp(self):
self.mesh = mc.polyCube()[0]
def test_subdivision(self):
transform = self.mesh
shapes = mc.listRelatives(transform, children=True, shapes=True)
vfm.attributes.vray_subdivision(self.mesh, state=True, smartConvert=True)
for shape in shapes:
self.assertTrue(mc.objExists("{0}.vraySubdivEnable".format(shape)))
vfm.attributes.vray_subdivision(self.mesh, state=True, smartConvert=True, vraySubdivEnable=False)
for shape in shapes:
self.assertEqual(mc.getAttr("{0}.vraySubdivEnable".format(shape)), False)
vfm.attributes.vray_subdivision(self.mesh, state=True, smartConvert=True, vraySubdivEnable=True)
for shape in shapes:
self.assertEqual(mc.getAttr("{0}.vraySubdivEnable".format(shape)), True)
vfm.attributes.vray_subdivision(self.mesh, state=False)
for shape in shapes:
self.assertFalse(mc.objExists("{0}.vraySubdivEnable".format(shape)))
# Apply to transform without smart convert (should not work)
vfm.attributes.vray_subdivision(self.mesh, state=True, smartConvert=False)
for shape in shapes:
self.assertFalse(mc.objExists("{0}.vraySubdivEnable".format(shape)))
for shape in shapes:
vfm.attributes.vray_subdivision(shape, state=True, smartConvert=False)
self.assertTrue(mc.objExists("{0}.vraySubdivEnable".format(shape)))
vfm.attributes.vray_subdivision(shape, state=False, smartConvert=False)
self.assertFalse(mc.objExists("{0}.vraySubdivEnable".format(shape)))
def test_vray_subquality(self):
transform = self.mesh
shapes = mc.listRelatives(transform, children=True, shapes=True)
# should run without errors:
vfm.attributes.vray_subquality(transform, vrayEdgeLength=1, vrayMaxSubdivs=1, vrayOverrideGlobalSubQual=1, vrayViewDep=1)
vfm.attributes.vray_subquality(transform, vrayEdgeLength=0, vrayMaxSubdivs=0, vrayOverrideGlobalSubQual=0, vrayViewDep=0)
for shape in shapes:
self.assertTrue(mc.objExists("{0}.vrayEdgeLength".format(shape)))
self.assertTrue(mc.objExists("{0}.vrayMaxSubdivs".format(shape)))
self.assertTrue(mc.objExists("{0}.vrayOverrideGlobalSubQual".format(shape)))
self.assertTrue(mc.objExists("{0}.vrayViewDep".format(shape)))
vfm.attributes.vray_subquality(shapes, smartConvert=False, state=False)
for shape in shapes:
self.assertFalse(mc.objExists("{0}.vrayEdgeLength".format(shape)))
self.assertFalse(mc.objExists("{0}.vrayMaxSubdivs".format(shape)))
self.assertFalse(mc.objExists("{0}.vrayOverrideGlobalSubQual".format(shape)))
self.assertFalse(mc.objExists("{0}.vrayViewDep".format(shape)))
def test_vray_user_attributes(self):
transform = self.mesh
shapes = mc.listRelatives(transform, children=True, shapes=True)
value = "Testing this attribute"
vfm.attributes.vray_user_attributes(transform, vrayUserAttributes=value)
for shape in shapes:
self.assertEqual(mc.getAttr("{0}.vrayUserAttributes".format(shape)), value)
value2 = "Aaaaaaap"
for shape in shapes:
self.assertNotEqual(mc.getAttr("{0}.vrayUserAttributes".format(shape)), value2)
vfm.attributes.vray_user_attributes(transform, vrayUserAttributes=value2)
for shape in shapes:
self.assertEqual(mc.getAttr("{0}.vrayUserAttributes".format(shape)), value2)
vfm.attributes.vray_user_attributes(shapes, state=False)
for shape in shapes:
self.assertFalse(mc.objExists("{0}.vrayUserAttributes".format(shape)))
def test_vray_displacement(self):
transform = self.mesh
shapes = mc.listRelatives(transform, children=True, shapes=True)
vfm.attributes.vray_displacement(transform, vrayDisplacementAmount=10)
vfm.attributes.vray_displacement(transform, vrayDisplacementShift=5)
vfm.attributes.vray_displacement(transform, vrayDisplacementType=2, vrayDisplacementUseBounds=True,
vrayEnableWaterLevel=True, vrayWaterLevel=2.0)
for shape in shapes:
self.assertTrue(mc.objExists("{0}.vrayDisplacementNone".format(shape)))
vfm.attributes.vray_displacement(shapes, state=False)
for shape in shapes:
self.assertFalse(mc.objExists("{0}.vrayDisplacementNone".format(shape)))
vfm.attributes.vray_displacement(shapes, state=0)
for shape in shapes:
self.assertFalse(mc.objExists("{0}.vrayDisplacementNone".format(shape)))
def tearDown(self):
mc.delete(self.mesh)
#import unittest
#import vrayformayaUtils_tests.attributes_tests as attrTest
#reload(attrTest)
#suite = unittest.TestLoader().loadTestsFromTestCase(attrTest.TestMeshAttributes)
#unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
unittest.main()
|
BigRoy/vrayformayaUtils
|
tests/attributes_tests.py
|
Python
|
gpl-2.0
| 5,376 | 0.006696 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Post-installation configuration helpers
# Copyright (C) 2015 OpusVL (<http://opusvl.com/>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""Common code for scripting installation of a chart of accounts
into a company.
The function you probably want to use is setup_company_accounts()
"""
from datetime import date
import logging
from . import confutil
_logger = logging.getLogger(__name__)
def setup_company_accounts(cr, registry, uid, company, chart_template, code_digits=None, context=None):
"""This sets up accounts, fiscal year and periods for the given company.
company: A res.company object
chart_template: An account.chart.template object
code_digits: The number of digits (the default is usually 6)
context: e.g. {'lang': 'en_GB', 'tz': False, 'uid': openerp.SUPERUSER_ID}
A financial year is set up starting this year on 1st Jan and ending this year on 31st Dec.
"""
unconfigured_companies = unconfigured_company_ids(cr, registry, uid, context=context)
if company.id in unconfigured_companies:
setup_chart_of_accounts(cr, registry, uid,
company_id=company.id,
chart_template_id=chart_template.id,
code_digits=code_digits,
context=context,
)
today = date.today()
fy_name = today.strftime('%Y')
fy_code = 'FY' + fy_name
account_start = today.strftime('%Y-01-01')
account_end = today.strftime('%Y-12-31')
create_fiscal_year(cr, registry, uid,
company_id=company.id,
name=fy_name,
code=fy_code,
start_date=account_start,
end_date=account_end,
context=context,
)
confutil.set_account_settings(cr, registry, uid,
company=company,
changes={
'date_start': account_start,
'date_stop': account_end,
'period': 'month',
},
context=context,
)
def unconfigured_company_ids(cr, registry, uid, context=None):
"""Return list of ids of companies without a chart of accounts.
"""
account_installer = registry['account.installer']
return account_installer.get_unconfigured_cmp(cr, uid, context=context)
def setup_chart_of_accounts(cr, registry, uid, company_id, chart_template_id, code_digits=None, context=None):
chart_wizard = registry['wizard.multi.charts.accounts']
defaults = chart_wizard.default_get(cr, uid, ['bank_accounts_id', 'currency_id'], context=context)
bank_accounts_spec = defaults.pop('bank_accounts_id')
bank_accounts_id = [(0, False, i) for i in bank_accounts_spec]
data = defaults.copy()
data.update({
"chart_template_id": chart_template_id,
'company_id': company_id,
'bank_accounts_id': bank_accounts_id,
})
onchange = chart_wizard.onchange_chart_template_id(cr, uid, [], data['chart_template_id'], context=context)
data.update(onchange['value'])
if code_digits:
data.update({'code_digits': code_digits})
conf_id = chart_wizard.create(cr, uid, data, context=context)
chart_wizard.execute(cr, uid, [conf_id], context=context)
def create_fiscal_year(cr, registry, uid, company_id, name, code, start_date, end_date, context=None):
fy_model = registry['account.fiscalyear']
fy_data = fy_model.default_get(cr, uid, ['state', 'company_id'], context=context).copy()
fy_data.update({
'company_id': company_id,
'name': name,
'code': code,
'date_start': start_date,
'date_stop': end_date,
})
fy_id = fy_model.create(cr, uid, fy_data, context=context)
fy_model.create_period(cr, uid, [fy_id], context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
OpusVL/odoo-confutil
|
confutil/account_setup.py
|
Python
|
agpl-3.0
| 4,594 | 0.00566 |
import cherrypy, json
from bottle import request, get
from ring_api.server.api import ring, user
class Root(object):
def __init__(self, dring):
self.dring = dring
self.user = user.User(dring)
@cherrypy.expose
def index(self):
return 'todo'
@cherrypy.expose
def routes(self):
return 'todo'
|
Bl4ckb0ne/ring-api
|
ring_api/extra/servers/cherrypy/api/root.py
|
Python
|
gpl-3.0
| 347 | 0.008646 |
import os
import shutil
usage = {}
usage['import'] = """catmap import <mkm-file>
Open a *.mkm project file and work with it interactively.
"""
def get_options(args=None, get_parser=False):
import optparse
import os
from glob import glob
import catmap
parser = optparse.OptionParser(
'Usage: %prog [help] ('
+ '|'.join(sorted(usage.keys()))
+ ') [options]',
version=catmap.__version__)
if args is not None:
options, args = parser.parse_args(args.split())
else:
options, args = parser.parse_args()
if len(args) < 1:
parser.error('Command expected')
if get_parser:
return options, args, parser
else:
return options, args
def match_keys(arg, usage, parser):
"""Try to match part of a command against
the set of commands from usage. Throws
an error if not successful.
"""
possible_args = [key for key in usage if key.startswith(arg)]
if len(possible_args) == 0:
parser.error('Command "%s" not understood.' % arg)
elif len(possible_args) > 1:
parser.error(('Command "%s" ambiguous.\n'
'Could be one of %s\n\n') % (arg, possible_args))
else:
return possible_args[0]
def main(args=None):
"""The CLI main entry point function.
The optional argument args, can be used to
directly supply command line argument like
$ catmap <args>
otherwise args will be taken from STDIN.
"""
from glob import glob
options, args, parser = get_options(args, get_parser=True)
if not args[0] in usage.keys():
args[0] = match_keys(args[0], usage, parser)
elif args[0] == 'import':
if len(args) < 2:
parser.error('mkm filename expected.')
from catmap import ReactionModel
mkm_file = args[1]
global model
model = ReactionModel(setup_file=mkm_file)
sh(banner='Note: model = catmap.ReactionModel(setup_file=\'%s\')\n# do model.run()\nfor a fully initialized model.' %
args[1])
def sh(banner):
"""Wrapper around interactive ipython shell
that factors out ipython version depencies.
"""
from distutils.version import LooseVersion
import IPython
if hasattr(IPython, 'release'):
try:
from IPython.terminal.embed import InteractiveShellEmbed
InteractiveShellEmbed(banner1=banner)()
except ImportError:
try:
from IPython.frontend.terminal.embed \
import InteractiveShellEmbed
InteractiveShellEmbed(banner1=banner)()
except ImportError:
from IPython.Shell import IPShellEmbed
IPShellEmbed(banner=banner)()
else:
from IPython.Shell import IPShellEmbed
IPShellEmbed(banner=banner)()
|
charlietsai/catmap
|
catmap/cli.py
|
Python
|
gpl-3.0
| 2,874 | 0.000348 |
# Copyright (c) weykent <weykent@weasyl.com>
# See COPYING for details.
import pytest
import runit_sv as _runit_sv_module
SETTABLE_MASK = _runit_sv_module.SETTABLE_MASK
idempotent = pytest.mark.idempotent
def pytest_generate_tests(metafunc):
if 'idempotency_state' not in metafunc.fixturenames:
return
states = ['regular']
if getattr(metafunc.function, 'idempotent', False):
states.append('checked')
metafunc.parametrize(('idempotency_state'), states)
def assert_no_local_failure(contacted):
assert not contacted['local'].get('failed')
def assert_local_failure(contacted):
assert contacted['local'].get('failed')
class FakeAnsibleModuleBailout(BaseException):
def __init__(self, success, params):
super(FakeAnsibleModuleBailout, self).__init__(success, params)
self.success = success
self.params = params
class FakeAnsibleModule(object):
def __init__(self, params, check_mode):
self.params = params
self.check_mode = check_mode
def __call__(self, argument_spec, supports_check_mode):
self.argument_spec = argument_spec
for name, spec in self.argument_spec.iteritems():
if name not in self.params:
self.params[name] = spec.get('default')
return self
def exit_json(self, **params):
raise FakeAnsibleModuleBailout(success=True, params=params)
def fail_json(self, **params):
raise FakeAnsibleModuleBailout(success=False, params=params)
def setup_change_checker(params):
must_change = params.pop('_must_change', False)
must_not_change = params.pop('_must_not_change', False)
if must_change and must_not_change:
raise ValueError('invalid request: must change and must not change')
if must_change:
def check(changed):
assert changed
elif must_not_change:
def check(changed):
assert not changed
else:
check = None
return check
@pytest.fixture(params=['real', 'fake'])
def runit_sv(request, idempotency_state):
if request.param == 'real':
ansible_module = request.getfuncargvalue('ansible_module')
def do(**params):
should_fail = params.pop('_should_fail', False)
params['_runner_kwargs'] = {
'check': params.pop('_check', False),
}
check_change = setup_change_checker(params)
contacted = ansible_module.runit_sv(**params)
if should_fail:
assert_local_failure(contacted)
else:
assert_no_local_failure(contacted)
if check_change is not None:
check_change(contacted['local']['changed'])
elif request.param == 'fake':
def do(**params):
should_fail = params.pop('_should_fail', False)
check = params.pop('_check', False)
check_change = setup_change_checker(params)
module = FakeAnsibleModule(params, check)
with pytest.raises(FakeAnsibleModuleBailout) as excinfo:
_runit_sv_module.main(module)
assert excinfo.value.success != should_fail
if check_change is not None:
check_change(excinfo.value.params['changed'])
else:
raise ValueError('unknown param', request.param)
if idempotency_state == 'checked':
_do = do
def do(**params):
_do(_must_change=True, **params)
_do(_must_not_change=True, **params)
return do
@pytest.fixture
def basedir(tmpdir):
tmpdir.join('sv').mkdir()
tmpdir.join('service').mkdir()
tmpdir.join('init.d').mkdir()
return tmpdir
def base_directories(basedir, **overrides):
ret = {'sv_directory': [basedir.join('sv').strpath],
'service_directory': [basedir.join('service').strpath],
'init_d_directory': [basedir.join('init.d').strpath]}
ret.update(overrides)
return ret
def settable_mode(path):
return path.stat().mode & SETTABLE_MASK
def assert_file(path, contents, mode):
assert path.read() == contents and settable_mode(path) == mode
@idempotent
def test_basic_runscript(runit_sv, basedir):
"""
A basic invocation with name and runscript creates the sv directory
containing just the runscript, links the service directory, and links an
LSB service.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
**base_directories(basedir))
sv = basedir.join('sv', 'testsv')
assert len(sv.listdir()) == 1
assert_file(sv.join('run'), contents='spam eggs', mode=0o755)
assert basedir.join('service', 'testsv').readlink() == sv.strpath
assert basedir.join('init.d', 'testsv').readlink() == '/usr/bin/sv'
@idempotent
def test_log_runscript(runit_sv, basedir):
"""
Adding a log_runscript creates a log/run script as well.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
log_runscript='eggs spam',
**base_directories(basedir))
sv_log = basedir.join('sv', 'testsv', 'log')
assert len(sv_log.listdir()) == 1
assert_file(sv_log.join('run'), contents='eggs spam', mode=0o755)
@idempotent
def test_supervise_link(runit_sv, basedir):
"""
The supervise_link option will create a link to some arbitrary location.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
supervise_link='/spam/eggs',
**base_directories(basedir))
sv = basedir.join('sv', 'testsv')
assert len(sv.listdir()) == 2
assert sv.join('supervise').readlink() == '/spam/eggs'
@idempotent
def test_log_supervise_link(runit_sv, basedir):
"""
The log_supervise_link option will also create a link to some arbitrary
location.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
log_runscript='eggs spam',
log_supervise_link='/eggs/spam',
**base_directories(basedir))
sv_log = basedir.join('sv', 'testsv', 'log')
assert len(sv_log.listdir()) == 2
assert sv_log.join('supervise').readlink() == '/eggs/spam'
@idempotent
def test_extra_files(runit_sv, basedir):
"""
Adding extra_files will copy additional files into the sv directory.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
extra_files={
'spam': 'eggs',
'eggs': 'spam',
},
**base_directories(basedir))
sv = basedir.join('sv', 'testsv')
assert len(sv.listdir()) == 3
assert_file(sv.join('spam'), contents='eggs', mode=0o644)
assert_file(sv.join('eggs'), contents='spam', mode=0o644)
@idempotent
def test_extra_scripts(runit_sv, basedir):
"""
Adding extra_scripts will copy additional scripts into the sv directory.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
extra_scripts={
'spam': 'eggs',
'eggs': 'spam',
},
**base_directories(basedir))
sv = basedir.join('sv', 'testsv')
assert len(sv.listdir()) == 3
assert_file(sv.join('spam'), contents='eggs', mode=0o755)
assert_file(sv.join('eggs'), contents='spam', mode=0o755)
@idempotent
def test_extra_files_and_scripts(runit_sv, basedir):
"""
Adding extra_files and extra_scripts both will create both additional files
and additional scripts.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
extra_files={
'spam': 'eggs',
'eggs': 'spam',
},
extra_scripts={
'spams': 'eggs',
'eggss': 'spam',
},
**base_directories(basedir))
sv = basedir.join('sv', 'testsv')
assert len(sv.listdir()) == 5
assert_file(sv.join('spam'), contents='eggs', mode=0o644)
assert_file(sv.join('eggs'), contents='spam', mode=0o644)
assert_file(sv.join('spams'), contents='eggs', mode=0o755)
assert_file(sv.join('eggss'), contents='spam', mode=0o755)
def test_no_overlapping_extra_files_and_scripts(runit_sv, basedir):
"""
If extra_files and extra_scripts both touch the same path, there's an
immediate failure.
"""
runit_sv(
_should_fail=True,
name='testsv',
runscript='spam eggs',
extra_files={
'spam': 'eggs',
},
extra_scripts={
'spam': 'eggs',
},
**base_directories(basedir))
def test_no_overlapping_extra_scripts_with_runscripts(runit_sv, basedir):
"""
Similarly if extra_scripts specifies the name of a runscript there's an
immediate failure.
"""
runit_sv(
_should_fail=True,
name='testsv',
runscript='spam eggs',
extra_scripts={
'run': 'eggs',
},
**base_directories(basedir))
@idempotent
def test_extra_files_and_scripts_with_umask(runit_sv, basedir):
"""
Setting a umask will mask the modes used on all files.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
extra_files={
'spam': 'eggs',
},
extra_scripts={
'eggs': 'spam',
},
umask=0o007,
**base_directories(basedir))
sv = basedir.join('sv', 'testsv')
assert len(sv.listdir()) == 3
assert_file(sv.join('spam'), contents='eggs', mode=0o660)
assert_file(sv.join('eggs'), contents='spam', mode=0o770)
assert_file(sv.join('run'), contents='spam eggs', mode=0o770)
@idempotent
def test_envdir(runit_sv, basedir):
"""
Adding an envdir option will create an env directory.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
envdir={
'spam': 'eggs',
'eggs': 'spam',
},
**base_directories(basedir))
envdir = basedir.join('sv', 'testsv', 'env')
assert len(envdir.listdir()) == 2
assert_file(envdir.join('spam'), contents='eggs', mode=0o644)
assert_file(envdir.join('eggs'), contents='spam', mode=0o644)
@idempotent
def test_no_lsb_service(runit_sv, basedir):
"""
Setting lsb_service=absent will prevent the creation of an LSB-style init.d
script.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
lsb_service='absent',
**base_directories(basedir))
assert not basedir.join('init.d', 'testsv').exists()
@idempotent
def test_no_lsb_service_or_service_directory(runit_sv, basedir):
"""
Setting state=absent will prevent the creation of both a service directory
and an LSB-style init.d script.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
state='absent',
**base_directories(basedir))
assert not basedir.join('service', 'testsv').exists()
assert not basedir.join('init.d', 'testsv').exists()
@idempotent
def test_down_state(runit_sv, basedir):
"""
Setting state=down creates everything as usual, but marks a service as down
by default.
"""
runit_sv(
name='testsv',
runscript='spam eggs',
state='down',
**base_directories(basedir))
sv = basedir.join('sv', 'testsv')
assert len(sv.listdir()) == 2
assert_file(sv.join('run'), contents='spam eggs', mode=0o755)
assert_file(sv.join('down'), contents='', mode=0o644)
assert basedir.join('service', 'testsv').readlink() == sv.strpath
assert basedir.join('init.d', 'testsv').readlink() == '/usr/bin/sv'
def test_check_skips_everything(runit_sv, basedir):
"""
If the module is run in check mode, nothing is done.
"""
runit_sv(
_check=True,
name='testsv',
runscript='spam eggs',
**base_directories(basedir))
assert len(basedir.join('sv').listdir()
+ basedir.join('service').listdir()
+ basedir.join('init.d').listdir()) == 0
def test_log_supervise_link_without_log_runscript(runit_sv, basedir):
"""
If log_supervise_link is specified without log_runscript also specified,
the module will fail.
"""
runit_sv(
_should_fail=True,
name='testsv',
runscript='spam eggs',
log_supervise_link='/eggs/spam',
**base_directories(basedir))
def test_lsb_service_present_with_state_absent(runit_sv, basedir):
"""
If lsb_service is set to present when state is set to absent, the module
will fail.
"""
runit_sv(
_should_fail=True,
name='testsv',
runscript='spam eggs',
lsb_service='present',
state='absent',
**base_directories(basedir))
def test_lsb_service_present_with_no_init_d(runit_sv, basedir):
"""
If lsb_service is set to present when there are no init.d directories, the
module will fail.
"""
runit_sv(
_should_fail=True,
name='testsv',
runscript='spam eggs',
lsb_service='present',
**base_directories(basedir, init_d_directory=[]))
def test_supervise_already_exists(runit_sv, basedir):
"""
If a supervise directory is in the service directory, it will continue to
exist there after runit_sv finishes running.
"""
supervise = basedir.join('sv', 'testsv', 'supervise')
supervise.ensure(dir=True)
runit_sv(
name='testsv',
runscript='spam eggs',
**base_directories(basedir))
assert supervise.check(dir=True)
def test_log_supervise_already_exists(runit_sv, basedir):
"""
If a supervise directory is in the service's log directory, it will
continue to exist there after runit_sv finishes running.
"""
log_supervise = basedir.join('sv', 'testsv', 'log', 'supervise')
log_supervise.ensure(dir=True)
runit_sv(
name='testsv',
runscript='spam eggs',
log_runscript='eggs spam',
**base_directories(basedir))
assert log_supervise.check(dir=True)
@idempotent
@pytest.mark.parametrize('extra_stuff', ['extra_files', 'extra_scripts'])
def test_extra_stuff_does_not_clobber_service_names(
runit_sv, basedir, extra_stuff):
"""
Passing extra_files or extra_scripts does not interfere with the creation
of the service or init.d links.
This is a regression test; at one point, this failed.
"""
kwargs = base_directories(basedir)
kwargs[extra_stuff] = {'spam': 'eggs'}
runit_sv(
name='testsv',
runscript='spam eggs',
**kwargs)
sv = basedir.join('sv', 'testsv')
assert basedir.join('service', 'testsv').readlink() == sv.strpath
assert basedir.join('init.d', 'testsv').readlink() == '/usr/bin/sv'
|
weykent/ansible-runit-sv
|
tests/test_runit_sv.py
|
Python
|
isc
| 14,619 | 0 |
"""
Parse string to create Regex object.
TODO:
- Support \: \001, \x00, \0, \ \[, \(, \{, etc.
- Support Python extensions: (?:...), (?P<name>...), etc.
- Support \<, \>, \s, \S, \w, \W, \Z <=> $, \d, \D, \A <=> ^, \b, \B, [[:space:]], etc.
"""
from hachoir_regex import (RegexString, RegexEmpty, RegexRepeat,
RegexDot, RegexWord, RegexStart, RegexEnd,
RegexRange, RegexRangeItem, RegexRangeCharacter)
import re
REGEX_COMMAND_CHARACTERS = '.^$[](){}|+?*\\'
def parseRange(text, start):
r"""
>>> parseRange('[a]b', 1)
(<RegexRange '[a]'>, 3)
>>> parseRange('[a-z]b', 1)
(<RegexRange '[a-z]'>, 5)
>>> parseRange('[^a-z-]b', 1)
(<RegexRange '[^a-z-]'>, 7)
>>> parseRange('[^]-]b', 1)
(<RegexRange '[^]-]'>, 5)
>>> parseRange(r'[\]abc]', 1)
(<RegexRange '[]a-c]'>, 7)
>>> parseRange(r'[a\-x]', 1)
(<RegexRange '[ax-]'>, 6)
"""
index = start
char_range = []
exclude = False
if text[index] == '^':
exclude = True
index += 1
if text[index] == ']':
char_range.append(RegexRangeCharacter(']'))
index += 1
while index < len(text) and text[index] != ']':
if index+1 < len(text) \
and text[index] == '\\':
char_range.append(RegexRangeCharacter(text[index+1]))
index += 2
elif index+1 < len(text) \
and text[index] == '-' and text[index+1] == ']':
break
elif index+3 < len(text) \
and text[index+1] == '-' \
and text[index+2] != ']':
char_range.append(RegexRangeItem(ord(text[index]), ord(text[index+2])))
index += 3
else:
char_range.append(RegexRangeCharacter(text[index]))
index += 1
if index < len(text) and text[index] == '-':
char_range.append(RegexRangeCharacter('-'))
index += 1
if index == len(text) or text[index] != ']':
raise SyntaxError('Invalid range: %s' % text[start-1:index])
return RegexRange(char_range, exclude), index+1
def parseOr(text, start):
"""
>>> parseOr('(a)', 1)
(<RegexString 'a'>, 3)
>>> parseOr('(a|c)', 1)
(<RegexRange '[ac]'>, 5)
>>> parseOr(' (a|[bc]|d)', 2)
(<RegexRange '[a-d]'>, 11)
"""
index = start
# (?:...): Skip Python prefix '?:'
if text[index:index+2] == '?:':
index += 2
if text[index] == '?':
raise NotImplementedError("Doesn't support Python extension (?...)")
regex = None
while True:
new_regex, index = _parse(text, index, "|)")
if regex:
regex = regex | new_regex
else:
regex = new_regex
if len(text) <= index:
raise SyntaxError('Missing closing parenthesis')
if text[index] == ')':
break
index += 1
index += 1
if regex is None:
regex = RegexEmpty()
return regex, index
REPEAT_REGEX = re.compile("([0-9]+)(,[0-9]*)?}")
def parseRepeat(text, start):
"""
>>> parseRepeat('a{0,1}b', 2)
(0, 1, 6)
>>> parseRepeat('a{12}', 2)
(12, 12, 5)
"""
match = REPEAT_REGEX.match(text, start)
if not match:
raise SyntaxError('Unable to parse repetition '+text[start:])
rmin = int(match.group(1))
if match.group(2):
text = match.group(2)[1:]
if text:
rmax = int(text)
else:
rmax = None
else:
rmax = rmin
return (rmin, rmax, match.end(0))
CHAR_TO_FUNC = {'[': parseRange, '(': parseOr}
CHAR_TO_CLASS = {'.': RegexDot, '^': RegexStart, '$': RegexEnd}
CHAR_TO_REPEAT = {'*': (0, None), '?': (0, 1), '+': (1, None)}
def _parse(text, start=0, until=None):
if len(text) == start:
return RegexEmpty(), 0
index = start
regex = RegexEmpty()
last = None
done = False
while index < len(text):
char = text[index]
if until and char in until:
done = True
break
if char in REGEX_COMMAND_CHARACTERS:
if char in CHAR_TO_FUNC:
new_regex, index = CHAR_TO_FUNC[char] (text, index+1)
elif char in CHAR_TO_CLASS:
new_regex = CHAR_TO_CLASS[char]()
index += 1
elif char == '{':
rmin, rmax, index = parseRepeat(text, index+1)
new_regex = RegexRepeat(last, rmin, rmax)
last = None
elif char in CHAR_TO_REPEAT:
rmin, rmax = CHAR_TO_REPEAT[char]
if last is None:
raise SyntaxError('Repetition character (%s) without previous expression' % text[index])
new_regex = RegexRepeat(last, rmin, rmax)
last = None
index += 1
elif char == "\\":
index += 1
if index == len(text):
raise SyntaxError("Antislash (\\) without escaped character")
char = text[index]
if char == 'b':
new_regex = RegexWord()
else:
if not(char in REGEX_COMMAND_CHARACTERS or char in " '"):
raise SyntaxError("Operator '\\%s' is not supported" % char)
new_regex = RegexString(char)
index += 1
else:
raise NotImplementedError("Operator '%s' is not supported" % char)
if last:
regex = regex + last
last = new_regex
else:
subtext = text[index]
index += 1
if last:
regex = regex + last
last = RegexString(subtext)
if last:
regex = regex + last
return regex, index
def parse(text):
r"""
>>> parse('')
<RegexEmpty ''>
>>> parse('abc')
<RegexString 'abc'>
>>> parse("chats?")
<RegexAnd 'chats?'>
>>> parse('[bc]d')
<RegexAnd '[bc]d'>
>>> parse("\\.")
<RegexString '\.'>
"""
regex, index = _parse(text)
assert index == len(text)
return regex
if __name__ == "__main__":
import doctest
doctest.testmod()
|
foreni-packages/hachoir-regex
|
hachoir_regex/parser.py
|
Python
|
gpl-2.0
| 6,130 | 0.006199 |
from gpu import *
LAMP_TYPES = [
GPU_DYNAMIC_LAMP_DYNVEC,
GPU_DYNAMIC_LAMP_DYNCO,
GPU_DYNAMIC_LAMP_DYNIMAT,
GPU_DYNAMIC_LAMP_DYNPERSMAT,
GPU_DYNAMIC_LAMP_DYNENERGY,
GPU_DYNAMIC_LAMP_DYNENERGY,
GPU_DYNAMIC_LAMP_DYNCOL,
GPU_DYNAMIC_LAMP_DISTANCE,
GPU_DYNAMIC_LAMP_ATT1,
GPU_DYNAMIC_LAMP_ATT2,
GPU_DYNAMIC_LAMP_SPOTSIZE,
GPU_DYNAMIC_LAMP_SPOTBLEND,
]
MIST_TYPES = [
GPU_DYNAMIC_MIST_ENABLE,
GPU_DYNAMIC_MIST_START,
GPU_DYNAMIC_MIST_DISTANCE,
GPU_DYNAMIC_MIST_INTENSITY,
GPU_DYNAMIC_MIST_TYPE,
GPU_DYNAMIC_MIST_COLOR,
]
WORLD_TYPES = [
GPU_DYNAMIC_HORIZON_COLOR,
GPU_DYNAMIC_AMBIENT_COLOR,
]
MATERIAL_TYPES = [
GPU_DYNAMIC_MAT_DIFFRGB,
GPU_DYNAMIC_MAT_REF,
GPU_DYNAMIC_MAT_SPECRGB,
GPU_DYNAMIC_MAT_SPEC,
GPU_DYNAMIC_MAT_HARD,
GPU_DYNAMIC_MAT_EMIT,
GPU_DYNAMIC_MAT_AMB,
GPU_DYNAMIC_MAT_ALPHA,
]
TYPE_TO_NAME = {
GPU_DYNAMIC_OBJECT_VIEWMAT : 'view_mat',
GPU_DYNAMIC_OBJECT_MAT : 'model_mat',
GPU_DYNAMIC_OBJECT_VIEWIMAT : 'inv_view_mat',
GPU_DYNAMIC_OBJECT_IMAT : 'inv_model_mat',
GPU_DYNAMIC_OBJECT_COLOR : 'color',
GPU_DYNAMIC_OBJECT_AUTOBUMPSCALE : 'auto_bump_scale',
GPU_DYNAMIC_MIST_ENABLE : 'use_mist',
GPU_DYNAMIC_MIST_START : 'start',
GPU_DYNAMIC_MIST_DISTANCE : 'depth',
GPU_DYNAMIC_MIST_INTENSITY : 'intensity',
GPU_DYNAMIC_MIST_TYPE : 'falloff',
GPU_DYNAMIC_MIST_COLOR : 'color',
GPU_DYNAMIC_HORIZON_COLOR : 'horizon_color',
GPU_DYNAMIC_AMBIENT_COLOR : 'ambient_color',
GPU_DYNAMIC_LAMP_DYNVEC : 'dynvec',
GPU_DYNAMIC_LAMP_DYNCO : 'dynco',
GPU_DYNAMIC_LAMP_DYNIMAT : 'dynimat',
GPU_DYNAMIC_LAMP_DYNPERSMAT : 'dynpersmat',
GPU_DYNAMIC_LAMP_DYNENERGY : 'energy',
GPU_DYNAMIC_LAMP_DYNCOL : 'color',
GPU_DYNAMIC_LAMP_DISTANCE : 'distance',
GPU_DYNAMIC_LAMP_ATT1 : 'linear_attenuation',
GPU_DYNAMIC_LAMP_ATT2 : 'quadratic_attenuation',
GPU_DYNAMIC_LAMP_SPOTSIZE : 'spot_size',
GPU_DYNAMIC_LAMP_SPOTBLEND : 'spot_blend',
GPU_DYNAMIC_MAT_DIFFRGB : 'diffuse_color',
GPU_DYNAMIC_MAT_REF : 'diffuse_intensity',
GPU_DYNAMIC_MAT_SPECRGB : 'specular_color',
GPU_DYNAMIC_MAT_SPEC : 'specular_intensity',
GPU_DYNAMIC_MAT_HARD : 'specular_hardness',
GPU_DYNAMIC_MAT_EMIT : 'emit',
GPU_DYNAMIC_MAT_AMB : 'ambient',
GPU_DYNAMIC_MAT_ALPHA : 'alpha',
}
TYPE_TO_SEMANTIC = {
GPU_DYNAMIC_LAMP_DYNVEC : 'BL_DYNVEC',
GPU_DYNAMIC_LAMP_DYNCO : 'BL_DYNCO',
GPU_DYNAMIC_LAMP_DYNIMAT : 'BL_DYNIMAT',
GPU_DYNAMIC_LAMP_DYNPERSMAT : 'BL_DYNPERSMAT',
CD_ORCO: 'POSITION',
-1: 'NORMAL' # Hack until the gpu module has something for normals
}
DATATYPE_TO_CONVERTER = {
GPU_DATA_1I : lambda x : x,
GPU_DATA_1F : lambda x : x,
GPU_DATA_2F : lambda x : list(x),
GPU_DATA_3F : lambda x : list(x),
GPU_DATA_4F : lambda x : list(x),
}
DATATYPE_TO_GLTF_TYPE = {
GPU_DATA_1I : 5124, # INT
GPU_DATA_1F : 5126, # FLOAT
GPU_DATA_2F : 35664, # FLOAT_VEC2
GPU_DATA_3F : 35665, # FLOAT_VEC3
GPU_DATA_4F : 35666, # FLOAT_VEC4
}
|
lukesanantonio/blendergltf
|
gpu_luts.py
|
Python
|
apache-2.0
| 3,110 | 0.018328 |
import os.path
from tornado import ioloop, httpserver, web, websocket, template
from config import GameConfig
OS = os.path.dirname(__file__)
def server_path(uri):
return os.path.join(OS, uri)
def static_path(uri):
return { "path": server_path("static/" + uri) }
level_1 = GameConfig()
class TarmHandler(web.RequestHandler):
def get(self):
self.render(server_path("html/game.html"), config = level_1)
def write_error(self, code, **kwargs):
self.render(server_path("html/error.html"))
class TarmSocket(websocket.WebSocketHandler):
def open(self, *args):
self.stream.set_nodelay(True)
print("Socket opened.")
def on_message(self, message):
print("Message from browser:", message)
if "load-config" in message:
self.write_message(template.Loader('html').load('config.html').generate(config=level_1))
elif "load-about" in message:
self.write_message(template.Loader('html').load('about.html').generate())
elif "load-audio" in message:
self.write_message(template.Loader('html').load('audio.html').generate())
def start_server():
tarm_app = web.Application(handlers=[
(r"/", TarmHandler),
(r"/socket", TarmSocket),
(r"/images/(.*)", web.StaticFileHandler, static_path("images")),
(r"/textures/(.*)", web.StaticFileHandler, static_path("textures")),
(r"/music/(.*)", web.StaticFileHandler, static_path("audio"))
],
debug=True, gzip=True, static_path=server_path("static"))
httpserver.HTTPServer(tarm_app).listen(8000)
print("Starting server.")
ioloop.IOLoop.instance().start()
if __name__ == "__main__":
start_server()
|
msartintarm/site
|
server.py
|
Python
|
mit
| 1,592 | 0.029523 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._policy_tracked_resources_operations import PolicyTrackedResourcesOperations
from ._remediations_operations import RemediationsOperations
from ._policy_events_operations import PolicyEventsOperations
from ._policy_states_operations import PolicyStatesOperations
from ._operations import Operations
from ._policy_metadata_operations import PolicyMetadataOperations
from ._policy_restrictions_operations import PolicyRestrictionsOperations
from ._attestations_operations import AttestationsOperations
__all__ = [
'PolicyTrackedResourcesOperations',
'RemediationsOperations',
'PolicyEventsOperations',
'PolicyStatesOperations',
'Operations',
'PolicyMetadataOperations',
'PolicyRestrictionsOperations',
'AttestationsOperations',
]
|
Azure/azure-sdk-for-python
|
sdk/policyinsights/azure-mgmt-policyinsights/azure/mgmt/policyinsights/operations/__init__.py
|
Python
|
mit
| 1,234 | 0.002431 |
import hashlib
import mock
import uuid
from django.test import TestCase
from ..models import Commander
class CommanderTestCase(TestCase):
def test_generate_token(self):
with mock.patch.object(uuid, 'uuid4', return_value='a_test'):
cmdr = Commander(
name='Branch'
)
self.assertEqual(
cmdr.generate_token(),
hashlib.md5('a_test').hexdigest()
)
def test_save(self):
# We need to ensure tokens get auto-populated here.
cmdr = Commander.objects.create(
name='Branch'
)
self.assertTrue(len(cmdr.api_token) > 0)
|
toastdriven/eliteracing
|
cmdrs/tests/test_models.py
|
Python
|
bsd-3-clause
| 666 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.