text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import os, sys, array
import numpy as np
class BigFile:
def __init__(self, datadir):
self.nr_of_images, self.ndims = map(int, open(os.path.join(datadir,'shape.txt')).readline().split())
id_file = os.path.join(datadir, "id.txt")
self.names = open(id_file).read().strip().split()
assert(len(self.names) == self.nr_of_images)
self.name2index = dict(zip(self.names, range(self.nr_of_images)))
self.binary_file = os.path.join(datadir, "feature.bin")
print ("[%s] %dx%d instances loaded from %s" % (self.__class__.__name__, self.nr_of_images, self.ndims, datadir))
def read(self, requested, isname=True):
requested = set(requested)
if isname:
index_name_array = [(self.name2index[x], x) for x in requested if x in self.name2index]
else:
assert(min(requested)>=0)
assert(max(requested)<len(self.names))
index_name_array = [(x, self.names[x]) for x in requested]
if len(index_name_array) == 0:
return [], []
index_name_array.sort(key=lambda v:v[0])
sorted_index = [x[0] for x in index_name_array]
nr_of_images = len(index_name_array)
vecs = [None] * nr_of_images
offset = np.float32(1).nbytes * self.ndims
res = array.array('f')
fr = open(self.binary_file, 'rb')
fr.seek(index_name_array[0][0] * offset)
res.fromfile(fr, self.ndims)
previous = index_name_array[0][0]
for next in sorted_index[1:]:
move = (next-1-previous) * offset
#print next, move
fr.seek(move, 1)
res.fromfile(fr, self.ndims)
previous = next
fr.close()
return [x[1] for x in index_name_array], [ res[i*self.ndims:(i+1)*self.ndims].tolist() for i in range(nr_of_images) ]
def read_one(self, name):
renamed, vectors = self.read([name])
return vectors[0]
def shape(self):
return [self.nr_of_images, self.ndims]
class StreamFile:
def __init__(self, datadir):
self.feat_dir = datadir
self.nr_of_images, self.ndims = map(int, open(os.path.join(datadir,'shape.txt')).readline().split())
id_file = os.path.join(datadir, "id.txt")
self.names = open(id_file).read().strip().split()
assert(len(self.names) == self.nr_of_images)
self.name2index = dict(zip(self.names, range(self.nr_of_images)))
self.binary_file = os.path.join(datadir, "feature.bin")
print ("[%s] %dx%d instances loaded from %s" % (self.__class__.__name__, self.nr_of_images, self.ndims, datadir))
self.fr = None
self.current = 0
def open(self):
self.fr = open(os.path.join(self.feat_dir,'feature.bin'), 'rb')
self.current = 0
def close(self):
if self.fr:
self.fr.close()
self.fr = None
def __iter__(self):
return self
def next(self):
if self.current >= self.nr_of_images:
self.close()
raise StopIteration
else:
res = array.array('f')
res.fromfile(self.fr, self.ndims)
_id = self.names[self.current]
self.current += 1
return _id, res.tolist()
if __name__ == '__main__':
bigfile = BigFile('toydata/FeatureData/f1')
imset = str.split('b z a a b c')
renamed, vectors = bigfile.read(imset)
for name,vec in zip(renamed, vectors):
print name, vec
|
li-xirong/jingwei
|
util/simpleknn/bigfile.py
|
Python
|
mit
| 3,569 | 0.009526 |
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy.sparse.csgraph import breadth_first_tree, depth_first_tree,\
csgraph_to_dense, csgraph_from_dense
def test_graph_breadth_first():
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0, 1, 2, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 7, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
for directed in [True, False]:
bfirst_test = breadth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first():
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
dfirst = np.array([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 0, 0],
[0, 0, 7, 0, 0],
[0, 0, 0, 1, 0]])
for directed in [True, False]:
dfirst_test = depth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(dfirst_test),
dfirst)
def test_graph_breadth_first_trivial_graph():
csgraph = np.array([[0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0]])
for directed in [True, False]:
bfirst_test = breadth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first_trivial_graph():
csgraph = np.array([[0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0]])
for directed in [True, False]:
bfirst_test = depth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
|
jlcarmic/producthunt_simulator
|
venv/lib/python2.7/site-packages/scipy/sparse/csgraph/tests/test_traversal.py
|
Python
|
mit
| 2,390 | 0 |
from HG_Code.HG_Test import UnitTest as UT
from HG_Code import Model as mo
from HG_Code.Visualize import Visualizer
from HG_Code.SimManager import sim_manager
from HG_Code.Hunger_Grid import hunger_grid
from HG_Code.Kat import Kat
from HG_Code import hg_settings
from HG_Code import Mutate as mu
unitTest = UT.Run_Unit_Test()
unitTest.run_test()
#mo.run_model(from_lava = .02, # START LAVA CHANCE
# to_lava = .02, # END FROM LAVA CHANCE
# from_berry = .05, # START BERRY CHANCE
# to_berry = .05, # END BERRY CHANCE
# from_mut=10, # START MUTATION CHANCE
# to_mut=10, # END MUTATION CHANCE
# from_gen = 33, # START GENERATE CHANCE
# to_gen = 33, # END GENERATE CHANCE
#t_name = 'Default' # TITLE OF TEST
# frames = -1 # Defaults to -1 (-1:Don't, 0:Only Last, N:every N)
#mo.run_model() #Default
mo.run_model(.02,.5,.05,.05, 10, 10, 33, 33, 'Lava World', -1)
#mo.run_model(.2,.2,.05,.01, 10, 50, 33, 33, 'Nuclear Wasteland')
#mo.run_model(.02,.5,.05,.5, 10, 10, 33, 33, 'Berry World')
#mo.run_model(.00,.00,.1,.1, 10, 10, 33, 33, "No Lava")
#mo.run_model(.1,.1,0.0,0.0, 10, 10, 33, 33, "No Berries")
#mo.run_model(.1,.1,.1,.1,10,10,33,33,"Lava & Berries")
|
TeamADEA/Hunger_Games
|
HG_Driver.py
|
Python
|
mit
| 1,353 | 0.01626 |
"""Start a tcp gateway."""
import click
from mysensors.cli.helper import (
common_gateway_options,
handle_msg,
run_async_gateway,
run_gateway,
)
from mysensors.gateway_tcp import AsyncTCPGateway, TCPGateway
def common_tcp_options(func):
"""Supply common tcp gateway options."""
func = click.option(
"-p",
"--port",
default=5003,
show_default=True,
type=int,
help="TCP port of the connection.",
)(func)
func = click.option(
"-H", "--host", required=True, help="TCP address of the gateway."
)(func)
return func
@click.command(options_metavar="<options>")
@common_tcp_options
@common_gateway_options
def tcp_gateway(**kwargs):
"""Start a tcp gateway."""
gateway = TCPGateway(event_callback=handle_msg, **kwargs)
run_gateway(gateway)
@click.command(options_metavar="<options>")
@common_tcp_options
@common_gateway_options
def async_tcp_gateway(**kwargs):
"""Start an async tcp gateway."""
gateway = AsyncTCPGateway(event_callback=handle_msg, **kwargs)
run_async_gateway(gateway)
|
theolind/pymysensors
|
mysensors/cli/gateway_tcp.py
|
Python
|
mit
| 1,102 | 0 |
"""
Type variables for Parametric polymorphism.
Cretonne instructions and instruction transformations can be specified to be
polymorphic by using type variables.
"""
from __future__ import absolute_import
import math
from . import types, is_power_of_two
from copy import copy
try:
from typing import Tuple, Union, Iterable, Any, Set, TYPE_CHECKING # noqa
if TYPE_CHECKING:
from srcgen import Formatter # noqa
Interval = Tuple[int, int]
# An Interval where `True` means 'everything'
BoolInterval = Union[bool, Interval]
# Set of special types: None, False, True, or iterable.
SpecialSpec = Union[bool, Iterable[types.SpecialType]]
except ImportError:
pass
MAX_LANES = 256
MAX_BITS = 64
MAX_BITVEC = MAX_BITS * MAX_LANES
def int_log2(x):
# type: (int) -> int
return int(math.log(x, 2))
def intersect(a, b):
# type: (Interval, Interval) -> Interval
"""
Given two `(min, max)` inclusive intervals, compute their intersection.
Use `(None, None)` to represent the empty interval on input and output.
"""
if a[0] is None or b[0] is None:
return (None, None)
lo = max(a[0], b[0])
assert lo is not None
hi = min(a[1], b[1])
assert hi is not None
if lo <= hi:
return (lo, hi)
else:
return (None, None)
def is_empty(intv):
# type: (Interval) -> bool
return intv is None or intv is False or intv == (None, None)
def encode_bitset(vals, size):
# type: (Iterable[int], int) -> int
"""
Encode a set of values (each between 0 and size) as a bitset of width size.
"""
res = 0
assert is_power_of_two(size) and size <= 64
for v in vals:
assert 0 <= v and v < size
res |= 1 << v
return res
def pp_set(s):
# type: (Iterable[Any]) -> str
"""
Return a consistent string representation of a set (ordering is fixed)
"""
return '{' + ', '.join([repr(x) for x in sorted(s)]) + '}'
def decode_interval(intv, full_range, default=None):
# type: (BoolInterval, Interval, int) -> Interval
"""
Decode an interval specification which can take the following values:
True
Use the `full_range`.
`False` or `None`
An empty interval
(lo, hi)
An explicit interval
"""
if isinstance(intv, tuple):
# mypy bug here: 'builtins.None' object is not iterable
lo, hi = intv
assert is_power_of_two(lo)
assert is_power_of_two(hi)
assert lo <= hi
assert lo >= full_range[0]
assert hi <= full_range[1]
return intv
if intv:
return full_range
else:
return (default, default)
def interval_to_set(intv):
# type: (Interval) -> Set
if is_empty(intv):
return set()
(lo, hi) = intv
assert is_power_of_two(lo)
assert is_power_of_two(hi)
assert lo <= hi
return set([2**i for i in range(int_log2(lo), int_log2(hi)+1)])
def legal_bool(bits):
# type: (int) -> bool
"""
True iff bits is a legal bit width for a bool type.
bits == 1 || bits \in { 8, 16, .. MAX_BITS }
"""
return bits == 1 or \
(bits >= 8 and bits <= MAX_BITS and is_power_of_two(bits))
class TypeSet(object):
"""
A set of types.
We don't allow arbitrary subsets of types, but use a parametrized approach
instead.
Objects of this class can be used as dictionary keys.
Parametrized type sets are specified in terms of ranges:
- The permitted range of vector lanes, where 1 indicates a scalar type.
- The permitted range of integer types.
- The permitted range of floating point types, and
- The permitted range of boolean types.
The ranges are inclusive from smallest bit-width to largest bit-width.
A typeset representing scalar integer types `i8` through `i32`:
>>> TypeSet(ints=(8, 32))
TypeSet(lanes={1}, ints={8, 16, 32})
Passing `True` instead of a range selects all available scalar types:
>>> TypeSet(ints=True)
TypeSet(lanes={1}, ints={8, 16, 32, 64})
>>> TypeSet(floats=True)
TypeSet(lanes={1}, floats={32, 64})
>>> TypeSet(bools=True)
TypeSet(lanes={1}, bools={1, 8, 16, 32, 64})
Similarly, passing `True` for the lanes selects all possible scalar and
vector types:
>>> TypeSet(lanes=True, ints=True)
TypeSet(lanes={1, 2, 4, 8, 16, 32, 64, 128, 256}, ints={8, 16, 32, 64})
Finally, a type set can contain special types (derived from `SpecialType`)
which can't appear as lane types.
:param lanes: `(min, max)` inclusive range of permitted vector lane counts.
:param ints: `(min, max)` inclusive range of permitted scalar integer
widths.
:param floats: `(min, max)` inclusive range of permitted scalar floating
point widths.
:param bools: `(min, max)` inclusive range of permitted scalar boolean
widths.
:param bitvecs : `(min, max)` inclusive range of permitted bitvector
widths.
:param specials: Sequence of special types to appear in the set.
"""
def __init__(
self,
lanes=None, # type: BoolInterval
ints=None, # type: BoolInterval
floats=None, # type: BoolInterval
bools=None, # type: BoolInterval
bitvecs=None, # type: BoolInterval
specials=None # type: SpecialSpec
):
# type: (...) -> None
self.lanes = interval_to_set(decode_interval(lanes, (1, MAX_LANES), 1))
self.ints = interval_to_set(decode_interval(ints, (8, MAX_BITS)))
self.floats = interval_to_set(decode_interval(floats, (32, 64)))
self.bools = interval_to_set(decode_interval(bools, (1, MAX_BITS)))
self.bools = set(filter(legal_bool, self.bools))
self.bitvecs = interval_to_set(decode_interval(bitvecs,
(1, MAX_BITVEC)))
# Allow specials=None, specials=True, specials=(...)
self.specials = set() # type: Set[types.SpecialType]
if isinstance(specials, bool):
if specials:
self.specials = set(types.ValueType.all_special_types)
elif specials:
self.specials = set(specials)
def copy(self):
# type: (TypeSet) -> TypeSet
"""
Return a copy of our self.
"""
n = TypeSet()
n.lanes = copy(self.lanes)
n.ints = copy(self.ints)
n.floats = copy(self.floats)
n.bools = copy(self.bools)
n.bitvecs = copy(self.bitvecs)
n.specials = copy(self.specials)
return n
def typeset_key(self):
# type: () -> Tuple[Tuple, Tuple, Tuple, Tuple, Tuple, Tuple]
"""Key tuple used for hashing and equality."""
return (tuple(sorted(list(self.lanes))),
tuple(sorted(list(self.ints))),
tuple(sorted(list(self.floats))),
tuple(sorted(list(self.bools))),
tuple(sorted(list(self.bitvecs))),
tuple(sorted(s.name for s in self.specials)))
def __hash__(self):
# type: () -> int
h = hash(self.typeset_key())
assert h == getattr(self, 'prev_hash', h), "TypeSet changed!"
self.prev_hash = h
return h
def __eq__(self, other):
# type: (object) -> bool
if isinstance(other, TypeSet):
return self.typeset_key() == other.typeset_key()
else:
return False
def __ne__(self, other):
# type: (object) -> bool
return not self.__eq__(other)
def __repr__(self):
# type: () -> str
s = 'TypeSet(lanes={}'.format(pp_set(self.lanes))
if len(self.ints) > 0:
s += ', ints={}'.format(pp_set(self.ints))
if len(self.floats) > 0:
s += ', floats={}'.format(pp_set(self.floats))
if len(self.bools) > 0:
s += ', bools={}'.format(pp_set(self.bools))
if len(self.bitvecs) > 0:
s += ', bitvecs={}'.format(pp_set(self.bitvecs))
if len(self.specials) > 0:
s += ', specials=[{}]'.format(pp_set(self.specials))
return s + ')'
def emit_fields(self, fmt):
# type: (Formatter) -> None
"""Emit field initializers for this typeset."""
assert len(self.bitvecs) == 0, "Bitvector types are not emitable."
fmt.comment(repr(self))
fields = (('lanes', 16),
('ints', 8),
('floats', 8),
('bools', 8))
for (field, bits) in fields:
vals = [int_log2(x) for x in getattr(self, field)]
fmt.line('{}: BitSet::<u{}>({}),'
.format(field, bits, encode_bitset(vals, bits)))
def __iand__(self, other):
# type: (TypeSet) -> TypeSet
"""
Intersect self with other type set.
>>> a = TypeSet(lanes=True, ints=(16, 32))
>>> a
TypeSet(lanes={1, 2, 4, 8, 16, 32, 64, 128, 256}, ints={16, 32})
>>> b = TypeSet(lanes=(4, 16), ints=True)
>>> a &= b
>>> a
TypeSet(lanes={4, 8, 16}, ints={16, 32})
>>> a = TypeSet(lanes=True, bools=(1, 8))
>>> b = TypeSet(lanes=True, bools=(16, 32))
>>> a &= b
>>> a
TypeSet(lanes={1, 2, 4, 8, 16, 32, 64, 128, 256})
"""
self.lanes.intersection_update(other.lanes)
self.ints.intersection_update(other.ints)
self.floats.intersection_update(other.floats)
self.bools.intersection_update(other.bools)
self.bitvecs.intersection_update(other.bitvecs)
self.specials.intersection_update(other.specials)
return self
def issubset(self, other):
# type: (TypeSet) -> bool
"""
Return true iff self is a subset of other
"""
return self.lanes.issubset(other.lanes) and \
self.ints.issubset(other.ints) and \
self.floats.issubset(other.floats) and \
self.bools.issubset(other.bools) and \
self.bitvecs.issubset(other.bitvecs) and \
self.specials.issubset(other.specials)
def lane_of(self):
# type: () -> TypeSet
"""
Return a TypeSet describing the image of self across lane_of
"""
new = self.copy()
new.lanes = set([1])
new.bitvecs = set()
return new
def as_bool(self):
# type: () -> TypeSet
"""
Return a TypeSet describing the image of self across as_bool
"""
new = self.copy()
new.ints = set()
new.floats = set()
new.bitvecs = set()
if len(self.lanes.difference(set([1]))) > 0:
new.bools = self.ints.union(self.floats).union(self.bools)
if 1 in self.lanes:
new.bools.add(1)
return new
def half_width(self):
# type: () -> TypeSet
"""
Return a TypeSet describing the image of self across halfwidth
"""
new = self.copy()
new.ints = set([x//2 for x in self.ints if x > 8])
new.floats = set([x//2 for x in self.floats if x > 32])
new.bools = set([x//2 for x in self.bools if x > 8])
new.bitvecs = set([x//2 for x in self.bitvecs if x > 1])
new.specials = set()
return new
def double_width(self):
# type: () -> TypeSet
"""
Return a TypeSet describing the image of self across doublewidth
"""
new = self.copy()
new.ints = set([x*2 for x in self.ints if x < MAX_BITS])
new.floats = set([x*2 for x in self.floats if x < MAX_BITS])
new.bools = set(filter(legal_bool,
set([x*2 for x in self.bools if x < MAX_BITS])))
new.bitvecs = set([x*2 for x in self.bitvecs if x < MAX_BITVEC])
new.specials = set()
return new
def half_vector(self):
# type: () -> TypeSet
"""
Return a TypeSet describing the image of self across halfvector
"""
new = self.copy()
new.bitvecs = set()
new.lanes = set([x//2 for x in self.lanes if x > 1])
new.specials = set()
return new
def double_vector(self):
# type: () -> TypeSet
"""
Return a TypeSet describing the image of self across doublevector
"""
new = self.copy()
new.bitvecs = set()
new.lanes = set([x*2 for x in self.lanes if x < MAX_LANES])
new.specials = set()
return new
def to_bitvec(self):
# type: () -> TypeSet
"""
Return a TypeSet describing the image of self across to_bitvec
"""
assert len(self.bitvecs) == 0
all_scalars = self.ints.union(self.floats.union(self.bools))
new = self.copy()
new.lanes = set([1])
new.ints = set()
new.bools = set()
new.floats = set()
new.bitvecs = set([lane_w * nlanes for lane_w in all_scalars
for nlanes in self.lanes])
new.specials = set()
return new
def image(self, func):
# type: (str) -> TypeSet
"""
Return the image of self across the derived function func
"""
if (func == TypeVar.LANEOF):
return self.lane_of()
elif (func == TypeVar.ASBOOL):
return self.as_bool()
elif (func == TypeVar.HALFWIDTH):
return self.half_width()
elif (func == TypeVar.DOUBLEWIDTH):
return self.double_width()
elif (func == TypeVar.HALFVECTOR):
return self.half_vector()
elif (func == TypeVar.DOUBLEVECTOR):
return self.double_vector()
elif (func == TypeVar.TOBITVEC):
return self.to_bitvec()
else:
assert False, "Unknown derived function: " + func
def preimage(self, func):
# type: (str) -> TypeSet
"""
Return the inverse image of self across the derived function func
"""
# The inverse of the empty set is always empty
if (self.size() == 0):
return self
if (func == TypeVar.LANEOF):
new = self.copy()
new.bitvecs = set()
new.lanes = set([2**i for i in range(0, int_log2(MAX_LANES)+1)])
return new
elif (func == TypeVar.ASBOOL):
new = self.copy()
new.bitvecs = set()
if 1 not in self.bools:
new.ints = self.bools.difference(set([1]))
new.floats = self.bools.intersection(set([32, 64]))
# If b1 is not in our typeset, than lanes=1 cannot be in the
# pre-image, as as_bool() of scalars is always b1.
new.lanes = self.lanes.difference(set([1]))
else:
new.ints = set([2**x for x in range(3, 7)])
new.floats = set([32, 64])
return new
elif (func == TypeVar.HALFWIDTH):
return self.double_width()
elif (func == TypeVar.DOUBLEWIDTH):
return self.half_width()
elif (func == TypeVar.HALFVECTOR):
return self.double_vector()
elif (func == TypeVar.DOUBLEVECTOR):
return self.half_vector()
elif (func == TypeVar.TOBITVEC):
new = TypeSet()
# Start with all possible lanes/ints/floats/bools
lanes = interval_to_set(decode_interval(True, (1, MAX_LANES), 1))
ints = interval_to_set(decode_interval(True, (8, MAX_BITS)))
floats = interval_to_set(decode_interval(True, (32, 64)))
bools = interval_to_set(decode_interval(True, (1, MAX_BITS)))
# See which combinations have a size that appears in self.bitvecs
has_t = set() # type: Set[Tuple[str, int, int]]
for l in lanes:
for i in ints:
if i * l in self.bitvecs:
has_t.add(('i', i, l))
for i in bools:
if i * l in self.bitvecs:
has_t.add(('b', i, l))
for i in floats:
if i * l in self.bitvecs:
has_t.add(('f', i, l))
for (t, width, lane) in has_t:
new.lanes.add(lane)
if (t == 'i'):
new.ints.add(width)
elif (t == 'b'):
new.bools.add(width)
else:
assert t == 'f'
new.floats.add(width)
return new
else:
assert False, "Unknown derived function: " + func
def size(self):
# type: () -> int
"""
Return the number of concrete types represented by this typeset
"""
return (len(self.lanes) * (len(self.ints) + len(self.floats) +
len(self.bools) + len(self.bitvecs)) +
len(self.specials))
def concrete_types(self):
# type: () -> Iterable[types.ValueType]
def by(scalar, lanes):
# type: (types.LaneType, int) -> types.ValueType
if (lanes == 1):
return scalar
else:
return scalar.by(lanes)
for nlanes in self.lanes:
for bits in self.ints:
yield by(types.IntType.with_bits(bits), nlanes)
for bits in self.floats:
yield by(types.FloatType.with_bits(bits), nlanes)
for bits in self.bools:
yield by(types.BoolType.with_bits(bits), nlanes)
for bits in self.bitvecs:
assert nlanes == 1
yield types.BVType.with_bits(bits)
for spec in self.specials:
yield spec
def get_singleton(self):
# type: () -> types.ValueType
"""
Return the singleton type represented by self. Can only call on
typesets containing 1 type.
"""
types = list(self.concrete_types())
assert len(types) == 1
return types[0]
def widths(self):
# type: () -> Set[int]
""" Return a set of the widths of all possible types in self"""
scalar_w = self.ints.union(self.floats.union(self.bools))
scalar_w = scalar_w.union(self.bitvecs)
return set(w * l for l in self.lanes for w in scalar_w)
class TypeVar(object):
"""
Type variables can be used in place of concrete types when defining
instructions. This makes the instructions *polymorphic*.
A type variable is restricted to vary over a subset of the value types.
This subset is specified by a set of flags that control the permitted base
types and whether the type variable can assume scalar or vector types, or
both.
:param name: Short name of type variable used in instruction descriptions.
:param doc: Documentation string.
:param ints: Allow all integer base types, or `(min, max)` bit-range.
:param floats: Allow all floating point base types, or `(min, max)`
bit-range.
:param bools: Allow all boolean base types, or `(min, max)` bit-range.
:param scalars: Allow type variable to assume scalar types.
:param simd: Allow type variable to assume vector types, or `(min, max)`
lane count range.
:param bitvecs: Allow all BitVec base types, or `(min, max)` bit-range.
"""
def __init__(
self,
name, # type: str
doc, # type: str
ints=False, # type: BoolInterval
floats=False, # type: BoolInterval
bools=False, # type: BoolInterval
scalars=True, # type: bool
simd=False, # type: BoolInterval
bitvecs=False, # type: BoolInterval
base=None, # type: TypeVar
derived_func=None, # type: str
specials=None # type: SpecialSpec
):
# type: (...) -> None
self.name = name
self.__doc__ = doc
self.is_derived = isinstance(base, TypeVar)
if base:
assert self.is_derived
assert derived_func
self.base = base
self.derived_func = derived_func
self.name = '{}({})'.format(derived_func, base.name)
else:
min_lanes = 1 if scalars else 2
lanes = decode_interval(simd, (min_lanes, MAX_LANES), 1)
self.type_set = TypeSet(
lanes=lanes,
ints=ints,
floats=floats,
bools=bools,
bitvecs=bitvecs,
specials=specials)
@staticmethod
def singleton(typ):
# type: (types.ValueType) -> TypeVar
"""Create a type variable that can only assume a single type."""
scalar = None # type: types.ValueType
if isinstance(typ, types.VectorType):
scalar = typ.base
lanes = (typ.lanes, typ.lanes)
elif isinstance(typ, types.LaneType):
scalar = typ
lanes = (1, 1)
elif isinstance(typ, types.SpecialType):
return TypeVar(typ.name, typ.__doc__, specials=[typ])
else:
assert isinstance(typ, types.BVType)
scalar = typ
lanes = (1, 1)
ints = None
floats = None
bools = None
bitvecs = None
if isinstance(scalar, types.IntType):
ints = (scalar.bits, scalar.bits)
elif isinstance(scalar, types.FloatType):
floats = (scalar.bits, scalar.bits)
elif isinstance(scalar, types.BoolType):
bools = (scalar.bits, scalar.bits)
elif isinstance(scalar, types.BVType):
bitvecs = (scalar.bits, scalar.bits)
tv = TypeVar(
typ.name, typ.__doc__,
ints=ints, floats=floats, bools=bools,
bitvecs=bitvecs, simd=lanes)
return tv
def __str__(self):
# type: () -> str
return "`{}`".format(self.name)
def __repr__(self):
# type: () -> str
if self.is_derived:
return (
'TypeVar({}, base={}, derived_func={})'
.format(self.name, self.base, self.derived_func))
else:
return (
'TypeVar({}, {})'
.format(self.name, self.type_set))
def __hash__(self):
# type: () -> int
if (not self.is_derived):
return object.__hash__(self)
return hash((self.derived_func, self.base))
def __eq__(self, other):
# type: (object) -> bool
if not isinstance(other, TypeVar):
return False
if self.is_derived and other.is_derived:
return (
self.derived_func == other.derived_func and
self.base == other.base)
else:
return self is other
def __ne__(self, other):
# type: (object) -> bool
return not self.__eq__(other)
# Supported functions for derived type variables.
# The names here must match the method names on `ir::types::Type`.
# The camel_case of the names must match `enum OperandConstraint` in
# `instructions.rs`.
LANEOF = 'lane_of'
ASBOOL = 'as_bool'
HALFWIDTH = 'half_width'
DOUBLEWIDTH = 'double_width'
HALFVECTOR = 'half_vector'
DOUBLEVECTOR = 'double_vector'
TOBITVEC = 'to_bitvec'
@staticmethod
def is_bijection(func):
# type: (str) -> bool
return func in [
TypeVar.HALFWIDTH,
TypeVar.DOUBLEWIDTH,
TypeVar.HALFVECTOR,
TypeVar.DOUBLEVECTOR]
@staticmethod
def inverse_func(func):
# type: (str) -> str
return {
TypeVar.HALFWIDTH: TypeVar.DOUBLEWIDTH,
TypeVar.DOUBLEWIDTH: TypeVar.HALFWIDTH,
TypeVar.HALFVECTOR: TypeVar.DOUBLEVECTOR,
TypeVar.DOUBLEVECTOR: TypeVar.HALFVECTOR
}[func]
@staticmethod
def derived(base, derived_func):
# type: (TypeVar, str) -> TypeVar
"""Create a type variable that is a function of another."""
# Safety checks to avoid over/underflows.
ts = base.get_typeset()
assert len(ts.specials) == 0, "Can't derive from special types"
if derived_func == TypeVar.HALFWIDTH:
if len(ts.ints) > 0:
assert min(ts.ints) > 8, "Can't halve all integer types"
if len(ts.floats) > 0:
assert min(ts.floats) > 32, "Can't halve all float types"
if len(ts.bools) > 0:
assert min(ts.bools) > 8, "Can't halve all boolean types"
elif derived_func == TypeVar.DOUBLEWIDTH:
if len(ts.ints) > 0:
assert max(ts.ints) < MAX_BITS,\
"Can't double all integer types."
if len(ts.floats) > 0:
assert max(ts.floats) < MAX_BITS,\
"Can't double all float types."
if len(ts.bools) > 0:
assert max(ts.bools) < MAX_BITS, "Can't double all bool types."
elif derived_func == TypeVar.HALFVECTOR:
assert min(ts.lanes) > 1, "Can't halve a scalar type"
elif derived_func == TypeVar.DOUBLEVECTOR:
assert max(ts.lanes) < MAX_LANES, "Can't double 256 lanes."
return TypeVar(None, None, base=base, derived_func=derived_func)
@staticmethod
def from_typeset(ts):
# type: (TypeSet) -> TypeVar
""" Create a type variable from a type set."""
tv = TypeVar(None, None)
tv.type_set = ts
return tv
def lane_of(self):
# type: () -> TypeVar
"""
Return a derived type variable that is the scalar lane type of this
type variable.
When this type variable assumes a scalar type, the derived type will be
the same scalar type.
"""
return TypeVar.derived(self, self.LANEOF)
def as_bool(self):
# type: () -> TypeVar
"""
Return a derived type variable that has the same vector geometry as
this type variable, but with boolean lanes. Scalar types map to `b1`.
"""
return TypeVar.derived(self, self.ASBOOL)
def half_width(self):
# type: () -> TypeVar
"""
Return a derived type variable that has the same number of vector lanes
as this one, but the lanes are half the width.
"""
return TypeVar.derived(self, self.HALFWIDTH)
def double_width(self):
# type: () -> TypeVar
"""
Return a derived type variable that has the same number of vector lanes
as this one, but the lanes are double the width.
"""
return TypeVar.derived(self, self.DOUBLEWIDTH)
def half_vector(self):
# type: () -> TypeVar
"""
Return a derived type variable that has half the number of vector lanes
as this one, with the same lane type.
"""
return TypeVar.derived(self, self.HALFVECTOR)
def double_vector(self):
# type: () -> TypeVar
"""
Return a derived type variable that has twice the number of vector
lanes as this one, with the same lane type.
"""
return TypeVar.derived(self, self.DOUBLEVECTOR)
def to_bitvec(self):
# type: () -> TypeVar
"""
Return a derived type variable that represent a flat bitvector with
the same size as self
"""
return TypeVar.derived(self, self.TOBITVEC)
def singleton_type(self):
# type: () -> types.ValueType
"""
If the associated typeset has a single type return it. Otherwise return
None
"""
ts = self.get_typeset()
if ts.size() != 1:
return None
return ts.get_singleton()
def free_typevar(self):
# type: () -> TypeVar
"""
Get the free type variable controlling this one.
"""
if self.is_derived:
return self.base.free_typevar()
elif self.singleton_type() is not None:
# A singleton type variable is not a proper free variable.
return None
else:
return self
def rust_expr(self):
# type: () -> str
"""
Get a Rust expression that computes the type of this type variable.
"""
if self.is_derived:
return '{}.{}()'.format(
self.base.rust_expr(), self.derived_func)
elif self.singleton_type():
return self.singleton_type().rust_name()
else:
return self.name
def constrain_types_by_ts(self, ts):
# type: (TypeSet) -> None
"""
Constrain the range of types this variable can assume to a subset of
those in the typeset ts.
"""
if not self.is_derived:
self.type_set &= ts
else:
self.base.constrain_types_by_ts(ts.preimage(self.derived_func))
def constrain_types(self, other):
# type: (TypeVar) -> None
"""
Constrain the range of types this variable can assume to a subset of
those `other` can assume.
"""
if self is other:
return
self.constrain_types_by_ts(other.get_typeset())
def get_typeset(self):
# type: () -> TypeSet
"""
Returns the typeset for this TV. If the TV is derived, computes it
recursively from the derived function and the base's typeset.
"""
if not self.is_derived:
return self.type_set
else:
return self.base.get_typeset().image(self.derived_func)
def get_fresh_copy(self, name):
# type: (str) -> TypeVar
"""
Get a fresh copy of self. Can only be called on free typevars.
"""
assert not self.is_derived
tv = TypeVar.from_typeset(self.type_set.copy())
tv.name = name
return tv
|
sunfishcode/cretonne
|
lib/cretonne/meta/cdsl/typevar.py
|
Python
|
apache-2.0
| 30,326 | 0.000132 |
"""
Open CarPool is a free and open source carpooling/dynamic ride sharing
system, open to be connected to other car pools and public transport.
Copyright (C) 2009-2014 Julian Rath, Oliver Pintat
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
In case this software interacts with other software tools, other
licenses may apply.
Module for the core functionality for the call2ride pilot and Open CarPool
"""
import psycopg2
import datetime
import pytz
import logging
from sms_providers import sms_trade_de
import md5
DEFAULT_COUNTRY_CODE = '49'
## Max Minutes a time can be in the past before adding a day
MINUTES_DELTA = 10
logger = logging.getLogger(__name__)
def sms_mail(receiver, message, config):
logger.info("sending mail: %s" % message)
# Import smtplib for the actual sending function
import smtplib
# Import the email modules we'll need
from email.mime.text import MIMEText
# Open a plain text file for reading. For this example, assume that
# the text file contains only ASCII characters.
#fp = open(textfile, 'rb')
# Create a text/plain message
msg = MIMEText(message)
#fp.close()
# me == the sender's email address
# you == the recipient's email address
msg['Subject'] = 'Call2Ride Text Message to %s' % receiver
msg['From'] = 'sms@call2ride.org'
msg['To'] = 'crath@ebay.de'
# Send the message via our own SMTP server, but don't include the
# envelope header.
s = smtplib.SMTP(config.get('mail', 'smtp'))
s.ehlo()
s.login(config.get('mail', 'user'), config.get('mail', 'password'))
s.sendmail(msg['From'], [msg['To']], msg.as_string())
s.quit()
def c2r_check_appointment_in_future(start_date, tm, tzname):
## if no date, then result is always true
if start_date==None or start_date=='':
return True
time = datetime.time(int(tm[0:2]), int(tm[2:4]))
date = datetime.date(int(start_date[0:4]),int(start_date[4:6]),int(start_date[6:8]))
tz = pytz.timezone(tzname)
now = datetime.datetime.now(tz)
timenow = datetime.time(now.hour, now.minute)
timeadd = datetime.timedelta(minutes=MINUTES_DELTA)
timeplus = now - timeadd
timeplus2 = datetime.time(timeplus.hour, timeplus.minute)
resultdate = datetime.datetime.combine(date, time)
if resultdate > now.replace(tzinfo=None):
return True
return False
def c2r_time_to_datetime(start_date, tm, tzname, checkonly=False, force=False):
"""
@param tm: 4 digit str
@return: datetime.datetime object with date of today and combined time
"""
time = datetime.time(int(tm[0:2]), int(tm[2:4]))
date = datetime.date.today()
start_date_bool = True
if start_date!=None and start_date!='':
date = datetime.date(int(start_date[0:4]),int(start_date[4:6]),int(start_date[6:8]))
start_date_bool = False
tz = pytz.timezone(tzname)
now = datetime.datetime.now(tz)
timenow = datetime.time(now.hour, now.minute)
timeadd = datetime.timedelta(minutes=MINUTES_DELTA)
timeplus = now - timeadd
timeplus2 = datetime.time(timeplus.hour, timeplus.minute)
resultdate = datetime.datetime.combine(date, time)
## Check Datetime, if in the past use tomorrow
if (timeplus2 > time or force) and start_date_bool:
if checkonly:
return True
aday = datetime.timedelta(days=1)
resultdate = resultdate + aday
if checkonly:
return False
resultdate = tz.localize(resultdate)
return resultdate
def normalize_caller_id(clip):
"""
@raise: ValueError if erong format
This convert a clip (eg 0712123123123, +49712123123123 or
0049712123123123) to the international form starting with +XX.
If the number is starting with only one 0 zero, germany(+49) is assumed.
"""
clip = clip.strip()
if clip.startswith('+'):
numerical_rep = clip[1:]
elif clip.startswith('00'):
numerical_rep = clip
elif clip.startswith('0'):
numerical_rep = DEFAULT_COUNTRY_CODE+clip[1:]
else:
raise ValueError, 'CLIP format unsupported "%"' % clip
##test if it is a real number
numerical_rep = long(numerical_rep)
return '+%s' % ( numerical_rep)
def verify_user_id(user_id, db_con):
"""
verify the format of the id and if it is existing in database
"""
user_id = int(user_id)
cur = db_con.cursor()
cur.execute("SELECT NULL FROM users WHERE id = %d" % user_id)
cur.fetchone()
if cur.rowcount == 1:
return True
else:
return False
def verify_user_number_id(user_number_id, db_con):
"""
verify the format of the id and if it is existing in database
"""
user_number_id = int(user_number_id)
cur = db_con.cursor()
cur.execute("SELECT NULL FROM user_number WHERE id = %d" % user_number_id)
cur.fetchone()
if cur.rowcount == 1:
return True
else:
return False
def verify_route_id(route_id, db_con):
"""
verify the format of the id and if it is existing in database
"""
user_id = int(route_id)
cur = db_con.cursor()
cur.execute("SELECT NULL FROM routes WHERE id = %d" % route_id)
cur.fetchone()
if cur.rowcount == 1:
return True
else:
return False
def get_user_id_from_number_id(number_id, db_con):
cur = db_con.cursor()
sql = 'select user_id from user_number where id = %d'
cur.execute(sql % number_id)
user_id, = cur.fetchone()
cur.close()
return user_id
def get_active_offer_ids(user_number_id, route_id, reverse, db_con, start_date=None):
"""
returns the latest active offer ids
"""
user_id = get_user_id_from_number_id(user_number_id, db_con)
date = datetime.date.today()
if start_date!=None and start_date!='':
date = datetime.date(int(start_date[0:4]),int(start_date[4:6]),int(start_date[6:8]))
sqldate = ' date_trunc(\'day\', start_time)=TIMESTAMP \'%s\' and ' % date.strftime('%Y-%m-%d')
cur = db_con.cursor()
rev = 'reverse=FALSE'
if reverse == 1:
rev = 'reverse=TRUE'
sql = "SELECT id FROM ride_offers WHERE %s user_number_id in (select id from user_number where user_id = %s) AND route_id = %s AND status = 'open' and %s" % (sqldate, user_id, route_id, rev)
cur.execute(sql)
res = cur.fetchall()
if res != []:
return [c[0] for c in res]
else:
return None
def get_active_request_ids(user_number_id, start_point, end_point, db_con, start_date=None):
"""
retruns the latest active offer ids
"""
date = datetime.date.today()
if start_date!=None and start_date!='':
date = datetime.date(int(start_date[0:4]),int(start_date[4:6]),int(start_date[6:8]))
sqldate = ' date_trunc(\'day\', earliest_start_time)=TIMESTAMP \'%s\' and ' % date.strftime('%Y-%m-%d')
user_id = get_user_id_from_number_id(user_number_id, db_con)
cur = db_con.cursor()
cur.execute("SELECT id FROM ride_requests WHERE %s user_number_id in (select id from user_number where user_id = %s) AND start_point = %s AND end_point = %s AND status = 'open'" % (sqldate, user_id, start_point, end_point))
res = cur.fetchall()
if res != []:
return [c[0] for c in res]
else:
return None
class Call2RideError(Exception):
"""
General call2ride exception
"""
pass
class Call2Ride(object):
"""
Main class implementing core functions for call2ride, may offered by
webservices or used directly
"""
_INSERT_OFFER_STATMENT = '''INSERT INTO ride_offers
(user_number_id, route_id, start_time, status, reverse) VALUES(%i, %i, '%s', 'open', %d=1)'''
_INSERT_REQUEST_STATEMENT = '''INSERT INTO ride_requests
(user_number_id, start_point, end_point, earliest_start_time, latest_start_time, status)
VALUES(%i, %i, %i, '%s', '%s', 'open')'''
_SELECT_CELL_STATMENT = '''SELECT cell_phone_nr FROM users
WHERE id = %i'''
_LOG_SMS_STATEMENT = '''INSERT INTO
sms(message_id, message, receiver_nr, cost, error) VALUES
('%s', '%s', '%s', %f, '%s')'''
##for matching in a request
_REQ_MATCH_STATEMENT = """
SELECT ride_offers.start_time, users.name, users.cell_phone_nr
FROM ride_offers, users, routes WHERE
ride_offers.user_id = users.id AND ride_offers.route_id = routes.id
AND ride_offers.start_time >= TIMESTAMP '%s'
AND ride_offers.start_time <= TIMESTAMP '%s'
AND ride_offers.route_id = %d
AND ride_offers.status = 'open'
AND routes.status = 'enabled'
ORDER by ride_offers.start_time
LIMIT 5"""
##find possible ride offer
_POSSIBLE_RIDES_STATEMENT = """
SELECT id FROM ride_offers WHERE ride_offers.status = 'open' AND route_id in (
SELECT distinct route_id FROM route_pickuppoint WHERE
point_id=%d
and route_id in (
SELECT distinct route_id FROM route_pickuppoint where point_id=%d
)
)
"""
##reverse matching in a offer
_OFFER_MATCH_STATEMENT = """
SELECT users.cell_phone_nr FROM users, ride_requests
WHERE
users.id = ride_requests.user_id
AND ride_requests.earliest_start_time <= TIMESTAMP '%s'
AND ride_requests.latest_start_time >= TIMESTAMP '%s'
AND ride_requests.route_id = %s
AND ride_requests.status = 'open'
"""
_REQ_PUBLIC_TRANSPORT_MATCH = """SELECT departure_time, means FROM
public_transport WHERE public_transport.departure_time + current_date >= '%s' AND
public_transport.departure_time + current_date<= '%s' AND
public_transport.start_point = %d AND end_point = %d ORDER BY departure_time LIMIT 2"""
##closes all requests on this route before timstamp
##log statement needs to be callled before the normal
_REQ_CLOSE_LOG_STATEMENT = """INSERT INTO
status_log(ride_requests_id, old_status, new_status)
VALUES
(%d, 'open', 'closed')"""
_REQ_CLOSE_STATEMENT = """UPDATE ride_requests
SET status = 'closed'
WHERE id=%d"""
##log statement needs to be callled before the normal
_OFFER_CLOSE_LOG_STATEMENT = """INSERT INTO
status_log(ride_offer_id, old_status, new_status)
VALUES(
%d,
'open', 'closed')"""
_OFFER_CLOSE_STATEMENT = """UPDATE ride_offers
SET status = 'closed'
WHERE id = %d and reverse=(1=%d)"""
_GET_ROUTES_STATEMENT = """SELECT id FROM routes WHERE status = 'enabled'"""
_GET_TAXI_NUMBER_STATEMENT = """SELECT number FROM taxi WHERE location_id = %d"""
##i18n?
_SMS_FOUND_TEXT = u"""Your next rides on route #%s (%s) are:
%s
"""
_SMS_REQ_TAXI_TEXT = """or call a Taxi %s """
_SMS_OFFER_TEXT = '%s, %s (%s)\n'
_SMS_OFFER_MATCH_TEXT = u"""There is a new ride matching your request\
for route #%s (%s): %s, %s (%s)."""
_SMS_OFFER_CONFIRMATION = """Thank you for offering a ride on route #%s (%s) at %s. We will inform potential passengers. Please call again in case you need to reschedule or can not offer any more rides."""
## CREATE TABLE notifications (
## id bigserial primary key,
## user_id bigserial NOT NULL,
## message text NOT NULL,
## date_matched timestamp with time zone default NULL,
## unread boolean default true);
_GET_NOTIFICATIONS = """SELECT message FROM notifications WHERE user_id = %d and unread=true"""
_INSERT_NOTIFICATIONS_STATMENT = '''INSERT INTO notifications
(user_id, message, date_matched, unread) VALUES(%d, '%s', now(), true)'''
_UPDATE_NOTIFICATIONS_STATMENT = '''UPDATE notifications set unread=false where id = %d'''
def __init__(self, sms_provider, config, timeformat='%d %b %H:%M'):
"""
@param sms_provider: a provider for sending sms eg call_trade_de.py
@param config: ConfigParser Object
"""
self._timeformat = timeformat
self._sms_provider = sms_provider
self._dsn = config.get('database', 'connection')
self._config = config
logger.debug('initialized interface with dsn "%s"' % self._dsn)
def _get_cell_phone_nr(self, user_id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
cur.execute(self._SELECT_CELL_STATMENT % user_id)
return cur.fetchone()[0]
def _send_sms(self, receiver, message):
logger.debug("_send_sms with message %s" %message)
"""
Sends a sms via the sms provider and create the relating database
entry to it
"""
res = None
try:
res = self._sms_provider.send_sms(receiver, message)
error = 'NULL'
logger.debug("sms response %s" % res)
except sms_trade_de.SmsGatewayError, err:
error = "'%s'" % str(err)
logger.debug("sms error %s" % error)
import smtplib
from email.mime.text import MIMEText
msg = MIMEText(message)
msg['Subject'] = 'Open CarPool SMS Error %s' % error
msg['From'] = 'sms@call2ride.org'
msg['To'] = 'mail@opencarpool.org'
s = smtplib.SMTP(self._config.get('mail', 'smtp'))
s.ehlo()
s.login(self._config.get('mail', 'user'), self._config.get('mail', 'password'))
s.sendmail(msg['From'], [msg['To']], msg.as_string())
s.quit()
con = psycopg2.connect(self._dsn)
cur = con.cursor()
if res!=None:
cur.execute(self._LOG_SMS_STATEMENT % (
res.get('message_id', 'NO_MESSAGE_ID'),
message,
receiver,
res.get('cost', 0.0),
error))
con.commit()
con.close()
sms_mail(receiver, message, self._config)
def callid_to_locationid(self, call_id):
"""
This method is to convert a callid(the telefonnumber which is
called) to the relating location id.
"""
raise Call2RideError("NOT_IMPLEMENTED")
con = psycopg2.connect(self._dsn)
cur = con.cursor()
cur.execute(self._XXX)
if cur.rowcount == 0:
raise Call2RideError("UNKNOW_ERROR")
def callerid_to_userid(self, caller_id):
"""
This convert a callerid (eg 0712123123123, +49712123123123 or
0049712123123123) in a userid representation of the backend.
If the number is starting with only one 0 zero, germany(+49) is
assumed. The user_id should be in string format, because the
backend may use db-id's, uuid's or openid...
@param caller_id: str
@returns: str max lenght 100, user_id
"""
clip = normalize_caller_id(caller_id)
con = psycopg2.connect(self._dsn)
cur = con.cursor()
##cur.execute("SELECT id from users WHERE cell_phone_nr = '%s'" % clip)
cur.execute("SELECT id from user_number WHERE number = '%s'" % clip)
if cur.rowcount == 0:
return -1
else:
return cur.fetchone()[0]
def offer_ride(self, user_number_id, location_id, route_key, start_time, reverse, start_date=None, send_sms=1):
"""
@param user_id: str with max lenght of 100
@param route_id: int
@param start_time: 4 digit integer HHMM
@returns: None
"""
con = psycopg2.connect(self._dsn)
cur = con.cursor()
##parameter checking
if not verify_user_number_id(user_number_id, con):
raise Call2RideError("ERR_UNKNOWN_USER_ID")
lang = self._get_lang_by_user_number_id(user_number_id)
## check location_id
if location_id == 0:
location_id = self._get_default_location_id(user_number_id)
##find route id
# select distinct(r.id) from routes r, user_number un where key='10' and location_id=1 and (r.user_id=0)or (r.user_id=un.user_id and un.id = 17)
#sql = "select r.id from routes r, user_number un where key='%s' and r.user_id=un.user_id and un.id = %d and location_id=%d"
sql = "select distinct(r.id) from routes r, user_number un where key='%s' and location_id=%d and ((r.user_id=0) or (r.user_id=un.user_id and un.id = %d))"
cur.execute(sql % (route_key, location_id, user_number_id))
res = cur.fetchone()
if res == None:
sql = "select id from routes where key='%s' and location_id=%d"
cur.execute(sql % (route_key, location_id))
res = cur.fetchone()
if res == None:
raise Call2RideError("ERR_UNKNOWN_ROUTE_ID")
route_id = res[0]
## check if offer is in the future
if c2r_check_appointment_in_future(start_date, start_time, self.get_timezone_name(location_id)) == False:
return 'ERROR: Appointment is in the past'
##time to today
timestamp = c2r_time_to_datetime(start_date, start_time, self.get_timezone_name(location_id))
self.close_offer(user_number_id, route_id, reverse, False, start_date)
##insert offer entry into database
cur.execute(self._INSERT_OFFER_STATMENT % (user_number_id,
route_id,
timestamp, reverse))
con.commit()
##get name and cellphone nr from user for matching
driver_name, driver_cellphone = self._get_user_info(user_number_id)
##reverse matching
sql = """select id, start_point, end_point, user_number_id from ride_requests where
start_point in (select point_id from route_pickuppoint where route_id=%d) and
end_point in (select point_id from route_pickuppoint where route_id=%d) and
status = 'open'"""
cur.execute(sql % (route_id, route_id))
for (aid, astart_point, aend_point, auser_id) in cur.fetchall():
## check direction
direction_check = self._check_direction(route_id, reverse, astart_point, aend_point)
if direction_check == 1:
##get steptime
#sql2 = 'select steptime from route_pickuppoint where route_id=%d and point_id=%d'
cur2 = con.cursor()
#cur2.execute(sql2 % (route_id, astart_point))
#st, = cur2.fetchone()
st = self._get_steptime(route_id, astart_point, aend_point)
sql2 = """select TIMESTAMP WITH TIME ZONE '%s' + time '%s' from ride_requests where id = %d and
earliest_start_time <= TIMESTAMP WITH TIME ZONE '%s' + time '%s' and
latest_start_time >= TIMESTAMP WITH TIME ZONE '%s' + time '%s'
"""
cur2.execute(sql2 % (timestamp, st, aid, timestamp, st, timestamp, st))
res = cur2.fetchone()
if res != None:
point_starttime_db, = res
point_starttime = self._dbtime_to_localtime(point_starttime_db, self.get_timezone_name(location_id))
XSMS_OFFER_MATCH_TEXT = u"""There is a new ride matching your request from %s(%s) to %s(%s): %s, %s (%s)."""
XSMS_OFFER_MATCH_TEXT = self._t(XSMS_OFFER_MATCH_TEXT,lang)
smstext = XSMS_OFFER_MATCH_TEXT % (
self._get_pickuppoint_name(astart_point),
astart_point,
self._get_pickuppoint_name(aend_point),
aend_point,
point_starttime.strftime(self._timeformat),
driver_name,
driver_cellphone)
reqester_name, requester_phone = self._get_user_info(auser_id)
self._send_sms(requester_phone, smstext)
##send confirmation sms
sql = 'select name from route_pickuppoint rp, pickuppoint p where p.id=rp.point_id and route_id = %d order by position'
if reverse == 1:
sql += ' desc'
cur.execute(sql % route_id)
first = 1
route_names = ''
for (name,) in cur.fetchall():
if first == 0:
route_names += ', '
route_names += name
first = 0
route_names = route_names.decode("utf-8")
## syntax for translation texts: self._t(text, lang)
smstext = self._t(self._SMS_OFFER_CONFIRMATION, lang) % (route_key, route_names, timestamp.strftime(self._timeformat))
if send_sms == 1:
self._send_sms(driver_cellphone, smstext)
cur.close()
con.close()
return smstext
def request_ride_route(self,
user_number_id,
location_id,
route_key,
reverse,
earliest_start_time,
latest_start_time,
start_date=None,
send_sms=1):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
##find route id
sql = "select r.id from routes r, user_number un where key='%s' and r.user_id=un.user_id and un.id = %d and location_id=%d"
cur.execute(sql % (route_key, user_number_id, location_id))
res = cur.fetchone()
if res == None:
sql = "select id from routes where key='%s' and location_id=%d"
cur.execute(sql % (route_key, location_id))
res = cur.fetchone()
if res == None:
raise Call2RideError("ERR_UNKNOWN_ROUTE_ID")
route_id = res[0]
##find point id
rps = self.route_points_get(route_id)
first = 1
for (id, rid, pid, steptime, position) in rps:
if first == 1:
first_point = pid
last_point = pid
first = 0
##get keys
start_key = self._get_point_key(first_point)
end_key = self._get_point_key(last_point)
## reverse
if reverse:
tmp = start_key
start_key = end_key
end_key = tmp
return self.request_ride(user_number_id, location_id, start_key, end_key, earliest_start_time, latest_start_time, start_date, send_sms)
def get_matches_for_offer(self, offer_id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
##find route id
sql = "select ro.route_id, ro.reverse, to_char(ro.start_time, 'YYYY-Mon-DD HH24:MI:SS'), r.location_id from ride_offers ro, routes r where ro.id=%d and ro.route_id=r.id"
cur.execute(sql % offer_id)
res = cur.fetchone()
route_id, reverse, timestamp, location_id = res
l = []
##reverse matching
sql = """select id, start_point, end_point, user_number_id from ride_requests where
start_point in (select point_id from route_pickuppoint where route_id=%d) and
end_point in (select point_id from route_pickuppoint where route_id=%d) and
status = 'open'"""
cur.execute(sql % (route_id, route_id))
for (aid, astart_point, aend_point, auser_id) in cur.fetchall():
## check direction
direction_check = self._check_direction(route_id, reverse, astart_point, aend_point)
if direction_check == 1:
cur2 = con.cursor()
st = self._get_steptime(route_id, astart_point, aend_point)
sql2 = """select id, TIMESTAMP WITH TIME ZONE '%s' + time '%s' from ride_requests where id = %d and
earliest_start_time <= TIMESTAMP WITH TIME ZONE '%s' + time '%s' and
latest_start_time >= TIMESTAMP WITH TIME ZONE '%s' + time '%s'
"""
cur2.execute(sql2 % (timestamp, st, aid, timestamp, st, timestamp, st))
res = cur2.fetchone()
if res != None:
matching_route_id, point_starttime_db, = res
l.append(matching_route_id)
return l
def get_matches_for_request(self, request_id):
"""
find all matches for a specific request
"""
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "select id, to_char(earliest_start_time, 'YYYY-Mon-DD HH24:MI:SS'), to_char(latest_start_time, 'YYYY-Mon-DD HH24:MI:SS'), to_char(request_time, 'YYYY-Mon-DD HH24:MI:SS'), status, start_point, end_point, user_number_id from ride_requests where id=%d"
cur.execute(sql % request_id)
res = cur.fetchone()
start_time = res[1]
latest_start_time = res[2]
start_point = res[5]
end_point = res[6]
sql = "select id, location_id from pickuppoint where id=%d"
cur.execute(sql % start_point)
res_l = cur.fetchone()
location_id = res_l[1]
l = []
offer_text = ''
cur.execute(self._POSSIBLE_RIDES_STATEMENT % (start_point, end_point))
for roid, in cur.fetchall():
cur2 = con.cursor()
ride_res = self._get_ride_offer_info(roid)
if ride_res != False:
route_id, reverse, driver_name, driver_cell_phone_nr = ride_res
st = self._get_steptime(route_id, start_point, end_point)
if self._check_direction(route_id, reverse, start_point, end_point):
sql2 = """SELECT start_time + time '%s'
FROM ride_offers WHERE
start_time + time '%s' >= TIMESTAMP WITH TIME ZONE '%s'
AND start_time + time '%s' <= TIMESTAMP WITH TIME ZONE '%s' AND id = %d"""
timematch = sql2 % (st, st, start_time, st, latest_start_time, roid)
cur2.execute(timematch)
amatch = cur2.fetchone()
if amatch != None:
start_time_db = amatch[0]
point_starttime = self._dbtime_to_localtime(start_time_db, self.get_timezone_name(location_id))
offer_text += '%s, %s, %s\n' % (point_starttime.strftime(self._timeformat), driver_name, driver_cell_phone_nr)
l.append(roid)
cur2.close()
return l
def request_ride(self,
user_number_id,
location_id,
start_key,
end_key,
earliest_start_time,
latest_start_time,
start_date=None,
send_sms=1):
"""
This function is for requesting a ride for a route.
It starts the match process in the backend,
this may work asynchronous as it doesn't return anything.
It closes all requests for this user_id and this route_id before
earliest_start_time.
@param user_id: backend specific user-id
@param loaction_id: backaend specific id of the location
@param start_key: key of the start pickuppoint
@param end_key: key of the end pickuppoint
@param earliest_start_time: 4 digit integer HHMM
@param latest_start_time: 4 digit integer HHMM
"""
##open db connection
con = psycopg2.connect(self._dsn)
cur = con.cursor()
##validate paramters
if not verify_user_number_id(user_number_id, con):
raise Call2RideError("ERR_UNKNOWN_USER_ID")
## check location_id
if location_id == 0:
location_id = self._get_default_location_id(user_number_id)
lang = self._get_lang_by_user_number_id(user_number_id)
##translate start_key and end_key to ids start_point and end_point
start_point, start_name = self._point_key_info(location_id, start_key)
end_point, end_name = self._point_key_info(location_id, end_key)
##convert times
tzname = self.get_timezone_name(location_id)
check_est = c2r_time_to_datetime(start_date, earliest_start_time, tzname, True)
start_time = c2r_time_to_datetime(start_date, earliest_start_time, tzname)
latest_start_time = c2r_time_to_datetime(start_date, latest_start_time, tzname, False, check_est)
##close all request before this
self.close_request(user_number_id, start_point, end_point, False, start_date)
##insert into db
cur.execute(self._INSERT_REQUEST_STATEMENT % (user_number_id,
start_point,
end_point,
start_time,
latest_start_time))
con.commit()
## Find possible ride offers
offer_text = ''
cur.execute(self._POSSIBLE_RIDES_STATEMENT % (start_point, end_point))
for roid, in cur.fetchall():
cur2 = con.cursor()
ride_res = self._get_ride_offer_info(roid)
if ride_res != False:
route_id, reverse, driver_name, driver_cell_phone_nr = ride_res
st = self._get_steptime(route_id, start_point, end_point)
if self._check_direction(route_id, reverse, start_point, end_point):
sql2 = """SELECT start_time + time '%s'
FROM ride_offers WHERE
start_time + time '%s' >= TIMESTAMP WITH TIME ZONE '%s'
AND start_time + time '%s' <= TIMESTAMP WITH TIME ZONE '%s' AND id = %d"""
timematch = sql2 % (st, st, start_time, st, latest_start_time, roid)
cur2.execute(timematch)
amatch = cur2.fetchone()
if amatch != None:
start_time_db = amatch[0]
point_starttime = self._dbtime_to_localtime(start_time_db, self.get_timezone_name(location_id))
offer_text += '%s, %s, %s\n' % (point_starttime.strftime(self._timeformat), driver_name, driver_cell_phone_nr)
cur2.close()
##match transport
cur.execute(self._REQ_PUBLIC_TRANSPORT_MATCH % (start_time,
latest_start_time,
start_point, end_point))
for deptime, means in cur.fetchall():
offer_text += '%s, %s\n' % (deptime.strftime(self._timeformat), means)
##match taxi
cur.execute(self._GET_TAXI_NUMBER_STATEMENT % location_id)
res = cur.fetchone()
if res is not None:
offer_text += self._t(self._SMS_REQ_TAXI_TEXT, lang) % res[0]
##send sms
sql_p = 'SELECT name from pickuppoint where id = %d'
cur.execute(sql_p % start_point)
res = cur.fetchone()
p1name = res[0]
cur.execute(sql_p % end_point)
res = cur.fetchone()
p2name = res[0]
XSMS_FOUND_TEXT = """Your next rides from %s to %s are:
%s
"""
XSMS_FOUND_TEXT = self._t(XSMS_FOUND_TEXT, lang)
smstext = XSMS_FOUND_TEXT % (p1name.decode("utf-8"), p2name.decode("utf-8"), offer_text)
xdriver_name, xdriver_cellphone = self._get_user_info(user_number_id)
if send_sms==1:
self._send_sms(xdriver_cellphone, smstext)
cur.close()
con.close()
return smstext
def close_request_route(self, user_number_id, location_id, route_key, reverse, throw=True, start_date=None):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
##find route id
sql = "select r.id from routes r, user_number un where key='%s' and r.user_id=un.user_id and un.id = %d and location_id=%d"
cur.execute(sql % (route_key, user_number_id, location_id))
res = cur.fetchone()
if res == None:
sql = "select id from routes where key='%s' and location_id=%d"
cur.execute(sql % (route_key, location_id))
res = cur.fetchone()
if res == None:
raise Call2RideError("ERR_UNKNOWN_ROUTE_ID")
route_id = res[0]
##find point id
rps = self.route_points_get(route_id)
first = 1
for (id, rid, pid, steptime, position) in rps:
if first == 1:
first_point = pid
last_point = pid
first = 0
## reverse
if reverse:
tmp = first_point
first_point = last_point
last_point = tmp
return self.close_request(user_number_id, first_point, last_point, throw, start_date)
def close_request_key(self, user_number_id, start_point_key, end_point_key, throw=True, start_date=None):
location_id = self._get_default_location_id(user_number_id)
sql = "select id from pickuppoint where location_id=%d and key='%s'"
con = psycopg2.connect(self._dsn)
cur = con.cursor()
cur.execute(sql % (location_id, start_point_key))
res = cur.fetchone()
sp = res[0]
cur.execute(sql % (location_id, end_point_key))
res = cur.fetchone()
ep = res[0]
self.close_request(user_number_id, sp, ep, throw, start_date)
def close_request(self, user_number_id, start_point, end_point, throw=True, start_date=None):
"""
will close all request according route_id and user_id
@param throw: if True(defualt) a Call2RideError is thrown if there
are no requests to close
"""
##open db connection
con = psycopg2.connect(self._dsn)
cur = con.cursor()
if not verify_user_number_id(user_number_id, con):
raise Call2RideError("ERR_UNKNOWN_USER_ID")
#if not verify_route_id(route_id, con):
# raise Call2RideError("ERR_UNKNOWN_ROUTE_ID")
request_ids = get_active_request_ids(user_number_id, start_point, end_point, con, start_date)
if request_ids != None:
for request_id in request_ids:
cur.execute(self._REQ_CLOSE_LOG_STATEMENT % request_id)
cur.execute(self._REQ_CLOSE_STATEMENT % request_id)
elif throw==True:
raise Call2RideError("ERR_UNKNOWN_REQUEST")
con.commit()
cur.close()
con.close()
def close_offer_key(self, user_number_id, route_key, reverse, throw=True, start_date=None):
location_id = self._get_default_location_id(user_number_id)
sql = "select id from routes where location_id=%d and key='%s'"
con = psycopg2.connect(self._dsn)
cur = con.cursor()
cur.execute(sql % (location_id, route_key))
res = cur.fetchone()
self.close_offer(user_number_id, res[0], reverse, throw, start_date)
def close_offer(self, user_number_id, route_id, reverse, throw=True, start_date=None):
##open db connection
con = psycopg2.connect(self._dsn)
cur = con.cursor()
if not verify_user_number_id(user_number_id, con):
raise Call2RideError("ERR_UNKNOWN_USER_ID")
if not verify_route_id(route_id, con):
raise Call2RideError("ERR_UNKNOWN_ROUTE_ID")
offer_ids = get_active_offer_ids(user_number_id, route_id, reverse, con, start_date)
if offer_ids != None:
for offer_id in offer_ids:
cur.execute(self._OFFER_CLOSE_LOG_STATEMENT % (offer_id))
cur.execute(self._OFFER_CLOSE_STATEMENT % (offer_id, reverse))
elif throw==True:
raise Call2RideError("ERR_UNKNOWN_OFFER")
con.commit()
cur.close()
def _get_pickuppoint_name(self, point_id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'select name from pickuppoint where id = %d'
cur.execute(sql % point_id)
res = cur.fetchone()
cur.close()
con.close()
if res != None:
return res[0]
return None
def _t(self, text, lang):
if lang == 'en' or lang == '' or lang is None:
return text
sql = "select translation from translation where key='%s' and lang='%s'"
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = sql % (text, lang)
sql = sql.replace('%', '%%')
cur.execute(sql)
res = cur.fetchone()
cur.close()
con.close()
if res != None:
return res[0].decode("utf-8")
sql = "insert into translation (key, lang) values ('%s', '%s')"
sql = sql % (text, lang)
sql = sql.replace('%', '\%')
self._do_sql(sql)
return text
def _get_lang_by_user_number_id(self, user_number_id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'select language from users u, user_number un where un.id = %d and u.id=un.user_id'
cur.execute(sql % user_number_id)
res = cur.fetchone()
cur.close()
con.close()
if res != None:
lang = res[0]
if lang == '':
lang = 'en'
return lang
def _get_user_info(self, user_number_id):
logger.debug("_get_user_info method has user_id %d" % user_number_id)
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'select name, number from users u, user_number un where un.id = %d and u.id=un.user_id'
cur.execute(sql % user_number_id)
res = cur.fetchone()
cur.close()
con.close()
if res != None:
return res
return None
def get_user_number(self, user_number_id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'select name, number from users u, user_number un where un.id = %d and u.id=un.user_id'
cur.execute(sql % user_number_id)
res = cur.fetchone()
cur.close()
con.close()
if res != None:
name, num = res
return num
return None
def get_user_id_by_user_number_id(self, user_number_id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'select u.id, name from users u, user_number un where un.id = %d and u.id=un.user_id'
cur.execute(sql % user_number_id)
res = cur.fetchone()
cur.close()
con.close()
if res != None:
id, name = res
return id
return None
def check_route_id(self, route_id):
"""
Check if a route with id 'xx' is existing.
@param route_id: int
@returns: boolean
"""
con = psycopg2.connect(self._dsn)
cur = con.cursor()
result = verify_route_id(route_id, con)
cur.close()
con.close()
return result
def get_user_routes(self, user_id, location_id):
"""
Get Routes per User
"""
con = psycopg2.connect(self._dsn)
cur = con.cursor()
cur2 = con.cursor()
result = []
sql = "select id, key, user_id from routes where (user_id is null or user_id = %d) and location_id = %d";
cur.execute(sql % (user_id, location_id))
for routes in cur.fetchall():
sql = "select name, position from route_pickuppoint rp, pickuppoint p where rp.point_id=p.id and route_id = %d order by position asc limit 1"
sql2 = "select name, position from route_pickuppoint rp, pickuppoint p where rp.point_id=p.id and route_id = %d order by position desc limit 1"
cur2.execute(sql % routes[0])
start,p1 = cur2.fetchone()
cur2.execute(sql2 % routes[0])
ziel,p2 = cur2.fetchone()
result.append((routes[1], routes[2], start, ziel))
cur.close()
cur2.close()
con.close()
return result
def get_routes(self, side_id):
"""
get all routes
"""
con = psycopg2.connect(self._dsn)
cur = con.cursor()
result = []
cur.execute(self._GET_ROUTES_STATEMENT)
for route_id in cur.fetchall():
result.append(route_id[0])
cur.close()
con.close()
return result
def _get_steptime(self, route_id, start_point, end_point):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'select steptime, position from route_pickuppoint where route_id=%d and point_id=%d'
cur.execute(sql % (route_id, start_point))
sp_st, sp_pos = cur.fetchone()
cur.execute(sql % (route_id, end_point))
ep_st, ep_pos = cur.fetchone()
if sp_pos < ep_pos:
return sp_st
else:
sql = 'select steptime from route_pickuppoint where route_id=%d order by position desc limit 1'
cur.execute(sql % route_id)
lp_st, = cur.fetchone()
sql = """select time '%s' - time '%s'"""
cur.execute(sql % (lp_st, sp_st))
rev, = cur.fetchone()
return rev
def _check_direction(self, route_id, reverse, start_point, end_point):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'select position from route_pickuppoint where route_id=%d and point_id=%d'
cur.execute(sql % (route_id, start_point))
sp_pos, = cur.fetchone()
cur.execute(sql % (route_id, end_point))
ep_pos, = cur.fetchone()
cur.close()
con.close()
if reverse == 0:
if sp_pos < ep_pos:
return 1
return 0
else:
if sp_pos > ep_pos:
return 1
return 0
##Translate start_key and end_key to ids start_point and end_point
def _point_key_info(self, location_id, point_key):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = """select id, name from pickuppoint where location_id = %d and key = '%s'"""
cur.execute(sql % (location_id, point_key))
res = cur.fetchone()
cur.close()
con.close()
return res
##Get Ride Offer Infos
def _get_ride_offer_info(self, ride_offer_id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = """ select ro.route_id, ro.reverse, u.name, un.number
from ride_offers ro, users u, user_number un
where ro.id = %d and un.id=ro.user_number_id and un.user_id=u.id"""
cur.execute(sql % ride_offer_id)
res = cur.fetchone()
if res == None:
return False
route_id = res[0]
name = res[2]
cell_phone_nr = res[3]
reverse = 0
if res[1]:
reverse = 1
cur.close()
con.close()
return route_id, reverse, name, cell_phone_nr
def get_timezone_name(self, location_id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'select timezone from location where id=%d'
cur.execute(sql % location_id)
timezonename, = cur.fetchone()
cur.close()
con.close()
return timezonename
def _get_point_key(self, pid):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'select key from pickuppoint where id=%d'
cur.execute(sql % pid)
key, = cur.fetchone()
cur.close()
con.close()
return key
def _dbtime_to_localtime(self, atime, timezone):
tz = pytz.timezone(timezone)
localtime = atime.astimezone(tz)
return localtime
def get_user_number_id_from_email(self, email):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "select un.id from user_number un, users u where email='%s' and u.id=un.user_id and is_default=TRUE"
cur.execute(sql % email)
user_number_id, = cur.fetchone()
cur.close()
con.close()
return user_number_id
def _get_default_location_id(self, user_number_id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "select default_location_id from user_number un, users u where un.id=%d and u.id=un.user_id"
cur.execute(sql % user_number_id)
default_location_id, = cur.fetchone()
cur.close()
con.close()
return default_location_id
def check_login(self, email, password):
logger.debug('Check Login')
con = psycopg2.connect(self._dsn)
cur = con.cursor()
hash = md5.new()
hash.update('abc123' + password)
password_hash = hash.hexdigest()
sql = "select id from users where email='%s' and password='%s' and is_active=TRUE"
cur.execute(sql % (email, password_hash))
uid, = cur.fetchone()
cur.close()
con.close()
if uid != None:
return uid
return None
def get_user_info_email(self, email):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "select id, name, email, company_id, default_location_id, group_id, is_active, language from users where email='%s'"
cur.execute(sql % email)
res = cur.fetchone()
cur.close()
con.close()
if res != None:
return res
return None
def get_user_info(self, id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "select id, name, email, company_id, default_location_id, group_id, is_active, language from users where id=%d"
cur.execute(sql % id)
res = cur.fetchone()
cur.close()
con.close()
if res != None:
return res
return None
def companies_get(self, id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "select id, name, smskey, zendeskid, logourl, email from company order by id asc"
if (id>0):
sql = "select id, name, smskey, zendeskid, logourl, email from company where id=%d order by id asc"
sql = sql % id
cur.execute(sql)
res = cur.fetchall()
cur.close()
con.close()
return res
def locations_get(self):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'select id, company_id, "Name", timezone, phone from location order by id asc'
cur.execute(sql)
res = cur.fetchall()
cur.close()
con.close()
return res
def pickuppoints_get(self):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'select id, location_id, name, geo, key from pickuppoint order by id asc'
cur.execute(sql)
res = cur.fetchall()
cur.close()
con.close()
return res
def routes_get(self):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'select id, origin, destination, status, key, location_id, user_id from routes order by id asc'
cur.execute(sql)
res = cur.fetchall()
cur.close()
con.close()
return res
def users_get(self):
logger.debug('users_get-123-')
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'select id, name, is_active, email, company_id, default_location_id, password, group_id from users order by id asc'
cur.execute(sql)
res = cur.fetchall()
cur.close()
con.close()
return res
def route_points_get(self, route_id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'select id, route_id, point_id, to_char(steptime, \'HH24:MI:SS\'), position from route_pickuppoint where route_id = %d order by position asc'
sql = sql % route_id
cur.execute(sql)
res = cur.fetchall()
cur.close()
con.close()
return res
def user_numbers_get(self, user_id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'SELECT id,user_id,number,is_default, is_active FROM user_number where user_id = %d order by id'
sql = sql % user_id
cur.execute(sql)
res = cur.fetchall()
cur.close()
con.close()
return res
def offers_get(self, user_id, past=1):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sqlwhere = ''
if user_id > 0:
sqlwhere = 'where user_number_id in (select id from user_number where user_id=%d)' % user_id
if past == 0:
if user_id == 0:
sqlwhere = 'where '
else:
sqlwhere += ' and '
sqlwhere += ' start_time >= current_timestamp'
sql = 'SELECT id, user_number_id, route_id, to_char(start_time, \'YYYY-Mon-DD HH24:MI:SS\'), to_char(request_time, \'YYYY-Mon-DD HH24:MI:SS\'), status, reverse FROM ride_offers %s order by start_time desc'
sql = sql % sqlwhere
cur.execute(sql)
res = cur.fetchall()
cur.close()
con.close()
return res
def departure_timetable_get(self, location_id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'SELECT o.route_id, to_char(o.start_time, \'HH24:MI\') AS time, p.name, u.name, n.number, u.email FROM (ride_offers o LEFT JOIN route_pickuppoint rp USING(route_id) LEFT JOIN pickuppoint p ON rp.point_id = p.id) LEFT JOIN user_number n ON o.user_number_id = n.id LEFT JOIN users u ON n.user_id = u.id WHERE o.start_time >= current_timestamp AND o.start_time < current_timestamp + INTERVAL \'12 hour\' AND o.status != \'closed\' AND o.reverse = true AND p.location_id = %d ORDER BY o.start_time ASC, o.route_id ASC, rp.position DESC' % location_id
cur.execute(sql)
res = cur.fetchall()
cur.close()
con.close()
return res
def offer_get(self, id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'SELECT id, user_number_id, route_id, to_char(start_time, \'YYYY-Mon-DD HH24:MI:SS\'), to_char(request_time, \'YYYY-Mon-DD HH24:MI:SS\'), status, reverse FROM ride_offers where id=%s'
sql = sql % id
cur.execute(sql)
res = cur.fetchall()
cur.close()
con.close()
return res[0]
def requests_get(self, user_id, past=1):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sqlwhere = ''
if user_id > 0:
sqlwhere = 'where user_number_id in (select id from user_number where user_id=%d)' % user_id
if past == 0:
if user_id == 0:
sqlwhere = 'where '
else:
sqlwhere += ' and '
sqlwhere += ' latest_start_time >= current_timestamp'
sql = 'SELECT id, user_number_id, start_point, end_point, to_char(earliest_start_time, \'YYYY-Mon-DD HH24:MI:SS\'), to_char(latest_start_time, \'YYYY-Mon-DD HH24:MI:SS\'), to_char(request_time, \'YYYY-Mon-DD HH24:MI:SS\'), status FROM ride_requests %s order by earliest_start_time desc'
sql = sql % sqlwhere
cur.execute(sql)
res = cur.fetchall()
cur.close()
con.close()
return res
def request_get(self, id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = 'SELECT id, user_number_id, start_point, end_point, to_char(earliest_start_time, \'YYYY-Mon-DD HH24:MI:SS\'), to_char(latest_start_time, \'YYYY-Mon-DD HH24:MI:SS\'), to_char(request_time, \'YYYY-Mon-DD HH24:MI:SS\'), status FROM ride_requests where id = %s'
sql = sql % id
cur.execute(sql)
res = cur.fetchall()
cur.close()
con.close()
return res[0]
def companies_insert(self, cname, smskey, zendeskid, logourl, email):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "insert into company (name, smskey, zendeskid, logourl, email) values ('%s', '%s', '%s', '%s', '%s')"
sql = sql % (cname, smskey, zendeskid, logourl, email)
cur.execute(sql)
cur.close()
con.commit()
con.close()
def locations_insert(self, lname, cid, timezone, phone):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "insert into location (\"Name\", company_id, timezone, phone) values ('%s', %d, '%s', '%s')"
sql = sql % (lname, cid, timezone, phone)
cur.execute(sql)
cur.close()
con.commit()
con.close()
def pickuppoints_insert(self, name, lid, geo, key):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "insert into pickuppoint (\"name\", location_id, geo, key) values ('%s', %d, '%s', '%s')"
sql = sql % (name, lid, geo, key)
cur.execute(sql)
cur.close()
con.commit()
con.close()
def routes_insert(self, origin, destination, status, key, lid, user_id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "insert into routes (origin, destination, status, key, location_id, user_id) values ('%s', '%s', '%s', '%s', '%d', %d)"
sql = sql % (origin, destination, status, key, lid, user_id)
cur.execute(sql)
sql = "SELECT currval('routes_seq')"
cur.execute(sql)
res = cur.fetchone()
rid = res[0]
cur.close()
con.commit()
con.close()
return rid
def companies_delete(self, cid):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "delete from company where id = %d"
sql = sql % cid
cur.execute(sql)
cur.close()
con.commit()
con.close()
def locations_delete(self, lid):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "delete from location where id = %d"
sql = sql % lid
cur.execute(sql)
cur.close()
con.commit()
con.close()
def pickuppoints_delete(self, id):
self.delete_entry('pickuppoint', id)
def routes_delete(self, id):
self.delete_entry('routes', id)
def delete_entry(self, table, id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "delete from %s where id = %d"
sql = sql % (table, id)
cur.execute(sql)
cur.close()
con.commit()
con.close()
def companies_update(self, cid, cname, smskey, zendeskid, logourl, email):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "update company set name='%s', smskey='%s', zendeskid='%s', logourl ='%s', email = '%s' where id = %d"
sql = sql % (cname, smskey, zendeskid, logourl, email, cid)
cur.execute(sql)
cur.close()
con.commit()
con.close()
def locations_update(self, lid, lname, cid, timezone, phone):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "update location set \"Name\"='%s', company_id=%d, timezone='%s', phone='%s' where id = %d"
sql = sql % (lname, cid, timezone, phone, lid)
cur.execute(sql)
cur.close()
con.commit()
con.close()
def pickuppoints_update(self, id, name, lid, geo, key):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "update pickuppoint set \"name\"='%s', location_id=%d, geo='%s', key='%s' where id = %d"
sql = sql % (name, lid, geo, key, id)
cur.execute(sql)
cur.close()
con.commit()
con.close()
def routes_update(self, id, origin, destination, status, key, lid, user_id):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "update routes set origin='%s', destination='%s', status='%s', key='%s', location_id=%d, user_id=%d where id = %d"
sql = sql % (origin, destination, status, key, lid, user_id, id)
cur.execute(sql)
cur.close()
con.commit()
con.close()
def userinfo_update(self, uid, name, email, cid, dlid, gid, is_active, language):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "update users set name='%s', email='%s', company_id=%d, default_location_id=%d, group_id=%d, is_active=%s, language='%s' where id = %d"
isa = 'False'
if is_active==1:
isa = 'True'
sql = sql % (name, email, cid, dlid, gid, isa, language, uid)
cur.execute(sql)
cur.close()
con.commit()
con.close()
def userinfo_insert(self, name, email, cid, dlid, gid, is_active, number):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "insert into users (name, email, company_id, default_location_id, group_id, is_active) values ('%s', '%s', %d, %d, %d, %s)"
isa = 'False'
if is_active==1:
isa = 'True'
sql = sql % (name, email, cid, dlid, gid, isa)
cur.execute(sql)
sql = "SELECT currval('users_id_seq')"
cur.execute(sql)
res = cur.fetchone()
uid = res[0]
sql = "insert into user_number (user_id, number, is_default, is_active) values (%d, '%s', True, True)"
sql = sql % (uid, number)
cur.execute(sql)
cur.close()
con.commit()
con.close()
def user_delete(self, uid):
sql = "delete from user_number where user_id=%d"
sql = sql % uid
self._do_sql(sql)
sql = "delete from users where id=%d"
sql = sql % uid
self._do_sql(sql)
def routepoints_delete(self, rid):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "delete from route_pickuppoint where route_id = %d"
sql = sql % (rid)
cur.execute(sql)
cur.close()
con.commit()
con.close()
def routepoints_insert(self, rid, pid, st, pos):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
sql = "insert into route_pickuppoint (route_id, point_id, steptime, position) values (%d, %d, '%s', %d)"
sql = sql % (rid, pid, st, pos)
cur.execute(sql)
cur.close()
con.commit()
con.close()
def user_update_default_location(self, uid, dlid):
sql = "update users set default_location_id=%d where id=%d"
sql = sql % (dlid, uid)
self._do_sql(sql)
def user_update_language(self, uid, language):
sql = "update users set language='%s' where id=%d"
sql = sql % (language, uid)
self._do_sql(sql)
def user_number_delete(self, unid):
sql = "delete from user_number where id=%d"
sql = sql % unid
self._do_sql(sql)
def user_number_default(self, uid, unid):
sql = "update user_number set is_default = False where user_id=%d"
sql = sql % uid
self._do_sql(sql)
sql = "update user_number set is_default = True where id=%d"
sql = sql % unid
self._do_sql(sql)
def user_number_delete(self, unid):
sql = "delete from user_number where id=%d"
sql = sql % unid
self._do_sql(sql)
def user_number_add(self, uid, number, code):
logger.debug('user_number_add')
sql = "insert into user_number (user_id, number, is_default, validation_code, is_active) values (%d, '%s', False, '%s', False)"
sql = sql % (uid, number, code)
self._do_sql(sql)
self._send_sms(number, 'Your validation code for Open CarPool is: %s' % code)
def user_number_add_admin(self, uid, number):
sql = "insert into user_number (user_id, number, is_default, is_active) values (%d, '%s', False,True)"
sql = sql % (uid, number)
self._do_sql(sql)
def user_number_activate(self, unid, code):
sql = "update user_number set is_active=True where validation_code='%s' and id=%d"
sql = sql % (code, unid)
self._do_sql(sql)
def log_error(self, uid, code, message):
sql = "insert into error_log (user_id, code, message, time) values (%d, '%s', '%s', current_timestamp)"
sql = sql % (uid, code, message)
self._do_sql(sql)
def change_password(self, uid, new_password):
hash = md5.new()
hash.update('abc123' + new_password)
password_hash = hash.hexdigest()
sql = "update users set password='%s' where id=%d"
sql = sql % (password_hash, uid)
self._do_sql(sql)
def _do_sql(self, sql):
con = psycopg2.connect(self._dsn)
cur = con.cursor()
cur.execute(sql)
cur.close()
con.commit()
con.close()
def close_single_offer(self, offer_id):
sql = "update ride_offers set status = 'closed' where id = %d"
sql = sql % offer_id
self._do_sql(sql)
def close_single_request(self, request_id):
sql = "update ride_requests set status = 'closed' where id = %d"
sql = sql % request_id
self._do_sql(sql)
def get_common_timezones(self):
from pytz import common_timezones
return common_timezones
def GenPasswd(self):
import string
import random
return "".join(random.sample(string.letters+string.digits, 10))
def check_lost_password(self, key):
sql = "SELECT id from users where lost_password='%s'"
sql = sql % key
con = psycopg2.connect(self._dsn)
cur = con.cursor()
cur.execute(sql)
uid, = cur.fetchone()
cur.close()
con.commit()
con.close()
if uid:
sql = "update users set lost_password=Null where id=%d"
sql = sql % uid
self._do_sql(sql)
return uid
return 0
def create_notification(self, user_id, message):
##open db connection
con = psycopg2.connect(self._dsn)
cur = con.cursor()
if not verify_user_number_id(user_id, con):
raise Call2RideError("ERR_UNKNOWN_USER_ID")
cur.execute(self._INSERT_NOTIFICATIONS_STATMENT % (user_id, message))
con.commit()
cur.close()
con.close()
def mark_notification_read(self, id):
##open db connection
con = psycopg2.connect(self._dsn)
cur = con.cursor()
cur.execute(self._UPDATE_NOTIFICATIONS_STATMENT % id)
con.commit()
cur.close()
con.close()
def get_notification(self, user_id):
##open db connection
con = psycopg2.connect(self._dsn)
cur = con.cursor()
if not verify_user_id(user_id, con):
raise Call2RideError("ERR_UNKNOWN_USER_ID")
cur.execute(self._GET_NOTIFICATIONS % user_id)
res = cur.fetchall()
con.commit()
cur.close()
con.close()
return res
def lost_password(self, email, baseurl):
sql = "SELECT email from users where email='%s'"
sql = sql % email
con = psycopg2.connect(self._dsn)
cur = con.cursor()
cur.execute(sql)
res, = cur.fetchone()
cur.close()
con.commit()
con.close()
if res:
key = self.GenPasswd()
sql = "update users set lost_password='%s' where email='%s'"
sql = sql % (key, email)
self._do_sql(sql)
import smtplib
from email.mime.text import MIMEText
message = "Please use this link and change your password: " + baseurl + "login.php?pwlost=%s"
message = message % key
msg = MIMEText(message)
msg['Subject'] = 'Open CarPool: Lost Password'
msg['From'] = 'support@opencarpool.org'
msg['To'] = email
s = smtplib.SMTP(self._config.get('mail', 'smtp'))
s.ehlo()
s.login(self._config.get('mail', 'user'), self._config.get('mail', 'password'))
s.sendmail(msg['From'], [msg['To']], msg.as_string())
s.quit()
return 1
return 0
def accept_offer(self, user_number_id, offer_id, send_sms):
send_sms = 1
smstext = "Somebody accepted your offer!"
if send_sms == 1:
self._send_sms("+4917640382017", smstext)
cur.close()
con.close()
return smstext
|
amife/carpool
|
service/src/call2ride/interface.py
|
Python
|
gpl-3.0
| 62,939 | 0.018145 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
CSV_DEFAULT_SEP = ','
CSV_DEFAULT_LF = '\n'
def append_same_length(columns, data):
if columns != []:
return len(columns) == len(data)
return True
class DataBuffer(object):
def __init__(self, csv_filename, columns=None, csv_sep=CSV_DEFAULT_SEP, csv_lf=CSV_DEFAULT_LF):
self._fd = open(csv_filename, "w")
self.csv_sep = csv_sep
self.csv_lf = csv_lf
if columns is None:
self._columns = []
else:
self._columns = columns
@property
def columns(self):
return self._columns
@columns.setter
def columns(self, value):
self._columns = value
self._append_columns()
def _append_columns(self):
if self.columns != []:
self.append(*self.columns)
def append(self, *data):
assert append_same_length(self.columns, data), "data and columns must have same length"
s = self.csv_sep.join(map(lambda x: str(x), data)) + self.csv_lf
self._fd.write(s)
def close(self):
self._fd.close()
#def to_csv(self, filename):
# pass
def __enter__(self):
return self
def __exit__(self, typ, value, traceback):
self.close()
def main():
data = DataBuffer("data.csv")
data.columns = ["a", "b", "c"]
data.append(1, 2.5, 3)
data.append(4, 5, 6)
data.append(7, 8, 9)
data.close()
def main_with_context_manager():
"""
Using a context manager (with ...) will
automatically close file descriptor
"""
with DataBuffer("data.csv") as data:
data.columns = ["a", "b", "c"]
data.append(1, 2.5, 3)
data.append(4, 5, 6)
data.append(7, 8, 9)
if __name__ == '__main__':
main_with_context_manager()
|
scls19fr/openchrono
|
openchrono/databuffer.py
|
Python
|
gpl-3.0
| 1,914 | 0.009404 |
#!python
"""
Update the astrometric and photometric measurements of an mpc observation based on the header contents of the
observation.
"""
from copy import deepcopy
import time
from astropy import units
import os
import math
import numpy
import logging
import re
import mp_ephem
from ossos import storage, wcs, astrom
from ossos import orbfit
import argparse
__author__ = 'jjk'
# Maximum allowed change in angle during re-measure
TOLERANCE = 2.00 * units.arcsec
def _flipped_ccd(ccd):
"""
Is this CCD likely flipped?
The MegaCam imager on CFHT has E/N flipped for some of their CCDs.
@param ccd:
@return:
"""
return ccd < 18 or ccd in [36, 37]
def remeasure(mpc_in, reset_pixel_coordinates=True):
"""
Compute the RA/DEC of the line based on the X/Y in the comment and the WCS of the associated image.
Comment of supplied astrometric line (mpc_in) must be in OSSOSComment format.
@param mpc_in: An line of astrometric measurement to recompute the RA/DEC from the X/Y in the comment.
@type mpc_in: mp_ephem.Observation
@param reset_pixel_coordinates: try and determine correct X/Y is X/Y doesn't map to correct RA/DEC value
@type reset_pixel_coordinates: bool
@type reset_pixecl_coordinates: bool
"""
if mpc_in.null_observation:
return mpc_in
mpc_obs = deepcopy(mpc_in)
logging.debug("rm start: {}".format(mpc_obs.to_string()))
if not isinstance(mpc_obs.comment, mp_ephem.ephem.OSSOSComment):
logging.error("Failed to convert comment line")
return mpc_in
parts = re.search('(?P<expnum>\d{7})(?P<type>\S)(?P<ccd>\d\d)', str(mpc_obs.comment.frame))
if not parts:
logging.error("Failed to parse expnum from frame info in comment line")
return mpc_in
ccd = int(parts.group('ccd'))
expnum = int(parts.group('expnum'))
exp_type = parts.group('type')
try:
header = _connection_error_wrapper(storage._get_sghead, expnum)[ccd+1]
except IOError as ioerr:
logging.error(str(ioerr))
logging.error("Failed to get astrometric header for: {}".format(mpc_obs))
return mpc_in
this_wcs = wcs.WCS(header)
coordinate = this_wcs.xy2sky(mpc_obs.comment.x, mpc_obs.comment.y, usepv=True)
mpc_obs.coordinate = coordinate[0].to('degree').value, coordinate[1].to('degree').value
sep = mpc_in.coordinate.separation(mpc_obs.coordinate)
if sep > TOLERANCE*20 and mpc_in.discovery and _flipped_ccd(ccd):
logging.warn("Large ({}) offset using X/Y in comment line to compute RA/DEC".format(sep))
if reset_pixel_coordinates:
logging.info("flipping/flopping the discvoery x/y position recorded.")
x = header['NAXIS1'] - mpc_obs.comment.x + 1
y = header['NAXIS2'] - mpc_obs.comment.y + 1
new_coordinate = this_wcs.xy2sky(x, y, usepv=True)
new_sep = mpc_in.coordinate.separation(new_coordinate)
if new_sep < TOLERANCE*2:
mpc_obs.coordinate = new_coordinate
mpc_obs.comment.x = x
mpc_obs.comment.y = y
sep = new_sep
if sep > TOLERANCE:
# use the old header RA/DEC to predict the X/Y and then use that X/Y to get new RA/DEC
logging.warn("sep: {} --> large offset when using comment line X/Y to compute RA/DEC")
if reset_pixel_coordinates:
logging.warn("Using RA/DEC and original WCS to compute X/Y and replacing X/Y in comment.".format(sep))
header2 = _connection_error_wrapper(storage.get_astheader, expnum, ccd)
image_wcs = wcs.WCS(header2)
(x, y) = image_wcs.sky2xy(mpc_in.coordinate.ra.degree, mpc_in.coordinate.dec.degree, usepv=False)
mpc_obs.coordinate = this_wcs.xy2sky(x, y, usepv=True)
mpc_obs.comment.x = x
mpc_obs.comment.y = y
logging.info("Coordinate changed: ({:5.2f},{:5.2f}) --> ({:5.2f},{:5.2f})".format(mpc_obs.comment.x,
mpc_obs.comment.y,
x, y))
if mpc_obs.comment.mag_uncertainty is not None:
try:
merr = float(mpc_obs.comment.mag_uncertainty)
fwhm = float(_connection_error_wrapper(storage.get_fwhm, expnum, ccd))
centroid_err = merr * fwhm * header['PIXSCAL1']
logging.debug("Centroid uncertainty: {} {} => {}".format(merr, fwhm, centroid_err))
except Exception as err:
logging.error(str(err))
logging.error("Failed to compute centroid_err for observation:\n"
"{}\nUsing default of 0.2".format(mpc_obs.to_string()))
centroid_err = 0.2
else:
centroid_err = 0.2
mpc_obs.comment.astrometric_level = header.get('ASTLEVEL', "0")
try:
asterr = float(header['ASTERR'])
residuals = (asterr ** 2 + centroid_err ** 2) ** 0.5
logging.debug("Residuals: {} {} => {}".format(asterr, centroid_err, residuals))
except Exception as err:
logging.error(str(err))
logging.error("Failed while trying to compute plate uncertainty for\n{}".format(mpc_obs.to_string()))
logging.error('Using default of 0.25')
residuals = 0.25
mpc_obs.comment.plate_uncertainty = residuals
logging.debug("sending back: {}".format(mpc_obs.to_string()))
return mpc_obs
def _connection_error_wrapper(func, *args, **kwargs):
"""
Wrap a call to func in a try/except that repeats on ConnectionError
@param func:
@param args:
@param kwargs:
@return:
"""
counter = 0
while counter < 5:
try:
result = func(*args, **kwargs)
return result
except Exception as ex:
time.sleep(5)
counter += 1
logging.warning(str(ex))
def recompute_mag(mpc_in, skip_centroids=False):
"""
Get the mag of the object given the mp_ephem.ephem.Observation
"""
# TODO this really shouldn't need to build a 'reading' to get the cutout...
from ossos.downloads.cutouts import downloader
dlm = downloader.ImageCutoutDownloader()
mpc_obs = deepcopy(mpc_in)
assert isinstance(mpc_obs, mp_ephem.ephem.Observation)
assert isinstance(mpc_obs.comment, mp_ephem.ephem.OSSOSComment)
if mpc_obs.null_observation:
return mpc_obs
parts = re.search('(?P<expnum>\d{7})(?P<type>\S)(?P<ccd>\d\d)', mpc_obs.comment.frame)
if parts is None:
return mpc_obs
expnum = parts.group('expnum')
ccd = parts.group('ccd')
file_type = parts.group('type')
observation = astrom.Observation(expnum, file_type, ccd)
assert isinstance(observation, astrom.Observation)
ast_header = _connection_error_wrapper(storage._get_sghead, int(expnum))[int(ccd)+1]
filter_value = None
for keyword in ['FILTER', 'FILT1 NAME']:
filter_value = ast_header.get(keyword, None)
if filter_value is not None:
if filter_value.startswith('gri'):
filter_value = 'w'
else:
filter_value = filter_value[0]
break
# The ZP for the current astrometric lines is the pipeline one. The new ZP is in the astheader file.
new_zp = ast_header.get('PHOTZP')
# The .zeropoint.used value is likely the one used for the original photometry.
old_zp = _connection_error_wrapper(storage.get_zeropoint, int(expnum), int(ccd))
reading = astrom.SourceReading(float(mpc_obs.comment.x), float(mpc_obs.comment.y), float(mpc_obs.comment.x),
float(mpc_obs.comment.y), mpc_obs.coordinate.ra.degree,
mpc_obs.coordinate.dec.degree, float(mpc_obs.comment.x), float(mpc_obs.comment.y),
observation, ssos=True, from_input_file=True, null_observation=False,
discovery=mpc_obs.discovery)
cutout = _connection_error_wrapper(dlm.download_cutout, reading, needs_apcor=True)
cutout._zmag = new_zp
if math.fabs(cutout.zmag - old_zp) > 0.3:
logging.warn("Large change in zeropoint detected: {} -> {}".format(old_zp, cutout.zmag))
try:
PHOT = cutout.get_observed_magnitude(centroid=not skip_centroids and mpc_obs.note1 != "H")
x = PHOT['XCENTER']
y = PHOT['YCENTER']
mag = PHOT['MAG']
merr = PHOT['MERR']
cutout.update_pixel_location((x, y), hdu_index=cutout.extno)
x, y = cutout.observed_source_point
except Exception as ex:
logging.error("ERROR: {}".format(str(ex)))
return mpc_obs
try:
if mpc_obs.comment.mag_uncertainty is not None and mpc_obs.comment.mag is not None and math.fabs(mpc_obs.comment.mag - mag) > 3.5 * mpc_obs.comment.mag_uncertainty:
logging.warn("recomputed magnitude shift large: {} --> {}".format(mpc_obs.mag, mag[0]))
if math.sqrt((x.value - mpc_obs.comment.x) ** 2 + (y.value - mpc_obs.comment.y) ** 2) > 1.9:
logging.warn("Centroid shifted ({},{}) -> ({},{})".format(mpc_obs.comment.x,
mpc_obs.comment.y,
x.value,
y.value))
except Exception as ex:
logging.error(str(ex))
# Don't use the new X/Y for Hand measured entries. (although call to get_observed_magnitude should have changed)
if str(mpc_obs.note1) != "H" and not skip_centroids:
mpc_obs.comment.x = x.value
mpc_obs.comment.y = y.value
try:
mag = float(mag)
except:
return mpc_obs
if math.isnan(mag):
return mpc_obs
if mag > 10:
mpc_obs._band = filter_value
mpc_obs.comment.mag = mag
mpc_obs.comment.mag_uncertainty = merr
# Update the mpc record magnitude if previous value existed here.
if (mpc_obs.mag is not None or (mpc_obs.mag is None and mpc_in.comment.photometry_note[0] == "Z")) and mag > 10:
mpc_obs.mag = mag
return mpc_obs
def run(mpc_file, cor_file,
skip_discovery=True, skip_mags=False,
skip_centroids=False, compare_orbits=False):
"""
:param mpc_file: A file containing the astrometric lines to be updated.
:param cor_file: The base name for the updated astrometry and diagnostic files.
:param skip_mags: Should we skip recomputing the magnitude of sources?
:return: :raise ValueError: If actions on the mpc_obs indicate this is not a valid OSSOS observations
"""
observations = mp_ephem.EphemerisReader().read(mpc_file)
logging.debug("Read in Observations: {}".format(observations))
original_obs = []
modified_obs = []
logging.info("ASTROMETRY FILE: {} --> {}.tlf".format(mpc_file, cor_file))
for mpc_in in observations:
try:
if not isinstance(mpc_in.comment, mp_ephem.ephem.OSSOSComment):
logging.info(type(mpc_in.comment))
logging.info("Skipping: {}".format(mpc_in.to_string()))
continue
if ((skip_discovery and mpc_in.discovery) or
(not skip_discovery and not mpc_in.discovery)):
logging.info("Discovery mis-match")
logging.info("Skipping: {}".format(mpc_in.to_string()))
continue
logging.info("="*220)
logging.info(" orig: {}".format(mpc_in.to_string()))
if mpc_in.comment.astrometric_level == 4:
logging.info("Already at maximum AstLevel, skipping.")
continue
if mpc_in.null_observation:
logging.info("Skipping NULL observation.")
continue
mpc_obs = remeasure(mpc_in)
logging.info("new wcs: {}".format(mpc_obs.to_string()))
if not skip_mags:
# and not mpc_obs.comment.photometry_note[0] == "Z":
mpc_mag = remeasure(recompute_mag(mpc_obs,
skip_centroids=skip_centroids),
reset_pixel_coordinates=not skip_centroids)
else:
mpc_mag = mpc_obs
sep = mpc_in.coordinate.separation(mpc_mag.coordinate)
if sep > TOLERANCE:
logging.error("Large offset: {} arc-sec".format(sep))
logging.error("orig: {}".format(mpc_in.to_string()))
logging.error(" new: {}".format(mpc_mag.to_string()))
new_comment = "BIG SHIFT HERE"
mpc_mag.comment.comment = mpc_mag.comment.comment + " " + new_comment
logging.info("new cen: {}".format(mpc_mag.to_string()))
original_obs.append(mpc_in)
modified_obs.append(mpc_mag)
logging.info("="*220)
except:
logging.error("Skipping: {}".format(mpc_in))
optr = open(cor_file + ".tlf", 'w')
for idx in range(len(modified_obs)):
inp = original_obs[idx]
out = modified_obs[idx]
if inp != out:
optr.write(out.to_tnodb()+"\n")
optr.close()
if not compare_orbits:
return True
try:
compare_orbits(original_obs, modified_obs, cor_file)
except Exception as ex:
logging.error("Orbit comparison failed: {}".format(ex))
logging.info("="*220)
return True
def compare_orbits(original_obs, modified_obs, cor_file):
"""Compare the orbit fit given the oringal and modified astrometry."""
origin = orbfit.Orbfit(original_obs)
modified = orbfit.Orbfit(modified_obs)
orbpt = file(cor_file+".orb", 'w')
# Dump summaries of the orbits
orbpt.write("#"*80+"\n")
orbpt.write("# ORIGINAL ORBIT\n")
orbpt.write(origin.summarize()+"\n")
orbpt.write("#"*80+"\n")
orbpt.write("# MODIFIED ORBIT\n")
orbpt.write(modified.summarize()+"\n")
orbpt.write("#"*80+"\n")
# Create a report of the change in orbit parameter uncertainty
for element in ['a', 'e', 'inc', 'om', 'Node', 'T']:
oval = getattr(origin, element).value
doval = getattr(origin, "d"+element).value
mval = getattr(modified, element).value
dmval = getattr(modified, "d"+element).value
precision = max(int(-1*math.floor(math.log10(dmval))), int(-1*math.floor(math.log10(doval)))) + 1
precision = max(0, precision)
vdigits = 12
ddigits = 6
vpadding = " "*int(vdigits-precision)
dpadding = " "*int(ddigits-precision)
orbpt.write("{element:>5s}: "
"{oval[0]:>{vdigits}.{vdigits}}.{oval[1]:<{precision}.{precision}} {vpadding} +/- "
"{doval[0]:>{ddigits}.{ddigits}}.{doval[1]:<{precision}.{precision}} {dpadding} ==> "
"{mval[0]:>{vdigits}.{vdigits}}.{mval[1]:<{precision}.{precision}} {vpadding} +/- "
"{dmval[0]:>{ddigits}.{ddigits}}.{dmval[1]:<{precision}.{precision}}\n".format(
element=element,
dpadding=dpadding,
vpadding=vpadding,
vdigits=vdigits,
ddigits=ddigits,
oval="{:12.12f}".format(oval).split("."),
doval="{:12.12f}".format(doval).split("."),
mval="{:12.12f}".format(mval).split("."),
dmval="{:12.12f}".format(dmval).split("."),
precision=precision)
)
delta = math.fabs(oval - mval)
if delta > 3.5 * doval:
logging.warn("large delta for element {}: {} --> {}".format(element, oval, mval))
# Compute the stdev of the residuals and report the change given the new observations
orbpt.write("*"*80+"\n")
orbpt.write("Change in orbital parameters \n")
sep = "Change in scatter between initial and recalibrated obseravtions. \n"
for orb in [origin, modified]:
orbpt.write(sep)
sep = "\n ==> becomes ==> \n"
residuals = orb.residuals
dra = []
ddec = []
mags = {}
for observation in orb.observations:
if not observation.null_observation:
dra.append(observation.ra_residual)
ddec.append(observation.dec_residual)
filter = observation.band
if filter is not None:
if filter not in mags:
mags[filter] = []
try:
mags[filter].append(float(observation.mag))
except:
pass
if observation.comment.plate_uncertainty * 5.0 < \
((observation.ra_residual ** 2 + observation.dec_residual ** 2) ** 0.5):
logging.warn("LARGE RESIDUAL ON: {}".format(observation.to_string()))
logging.warn("Fit residual unreasonably large.")
dra = numpy.array(dra)
ddec = numpy.array(ddec)
merr_str = ""
for filter in mags:
mag = numpy.percentile(numpy.array(mags[filter]), (50))
mags[filter] = numpy.percentile(numpy.array(mags[filter]), (5,95))
merr = (mags[filter][1] - mags[filter][0])/6.0
merr_str += " {}: {:8.2f} +/- {:8.2f}".format(filter, mag, merr)
orbpt.write("ra_std:{:8.4} dec_std:{:8.4} mag: {}".format(dra.std(), ddec.std(), merr_str))
orbpt.write("\n")
orbpt.close()
def main():
description = """This program takes as input a set of OSSOS measurements and adjusts the astrometric and photometric
entries to be consistent with the current best estimate for the astrometric and photometric calibrations.
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('ast_file', help="An MPC file to update.")
parser.add_argument('--discovery', help="Only process the discovery images.", action='store_true', default=False)
parser.add_argument('--result_base_name', help="base name for remeasurement results (defaults to basename of input)",
default=None)
parser.add_argument('--skip-mags', action="store_true", help="Recompute magnitudes.", default=False)
parser.add_argument('--skip-centroids', action="store_true", help="Recompute centroids.", default=False)
parser.add_argument('--compare-orbits', action='store_true', help="Compute/Compare pre and post remeasure orbits?", default=False)
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
level = logging.INFO
if args.debug:
level = logging.DEBUG
import coloredlogs
logger = logging.getLogger('update_astrom')
coloredlogs.install(level=level)
if args.result_base_name is None:
base_name = os.path.splitext(os.path.basename(args.ast_file))[0]
else:
base_name = args.result_base_name
run(args.ast_file, base_name,
skip_discovery=not args.discovery,
skip_mags=args.skip_mags,
skip_centroids=args.skip_centroids,
compare_orbits=args.compare_orbits)
if __name__ == '__main__':
sys.exit(main())
|
OSSOS/MOP
|
src/ossos/core/ossos/pipeline/update_astrometry.py
|
Python
|
gpl-3.0
| 19,113 | 0.006017 |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../tools'))
import files
import table
import genetics
def main(argv):
int_mass = table.integer_mass(argv[0])
lines = files.read_lines(argv[1])
leaderboard = [([int_mass[p] for p in peptide], peptide) for peptide in lines[0].split()]
spectrum = [int(m) for m in lines[1].split()]
N = int(lines[2])
print ' '.join(leader[1] for leader in genetics.trim_leaderboard(leaderboard, spectrum, N))
if __name__ == "__main__":
main(sys.argv[1:])
|
cowboysmall/rosalind
|
src/textbook/rosalind_ba4l.py
|
Python
|
mit
| 569 | 0.015817 |
from .mdl_user import *
from .mdl_club import *
from .mdl_event import *
from .mdl_receipt import *
from .mdl_budget import *
from .mdl_division import *
from .mdl_eventsignin import *
from .mdl_joinrequest import *
|
fsxfreak/club-suite
|
clubsuite/suite/models/__init__.py
|
Python
|
mit
| 216 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2008-2014 Jaap Karssenberg <jaap.karssenberg@gmail.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
'''
This is the development documentation of zim.
B{NOTE:} There is also some generic development documentation in the
"HACKING" folder in the source distribution. Please also have a look
at that if you want to help with zim development.
In this API documentation many of the methods with names starting with
C{do_} and C{on_} are not documented. The reason is that these are
signal handlers that are not part of the external API. They act upon
a signal but should never be called directly by other objects.
Overview
========
The script C{zim.py} is a thin wrapper around the C{main()} function
defined in L{zim.main}. This main function constructs a C{Command}
object that implements a specific commandline command. The C{Command}
object then either connects to a running instance of zim, or executes
the application.
To execute the application, the command typically constructs a
C{Notebook} object, a C{PluginManager} and a C{ConfigManager}. Then
depending on the command the graphical interface is constructed, a
webserver is started or some other action is executed on the notebook.
The C{Notebook} object is found in L{zim.notebook} and implements the
API for accessing and storing pages, attachments and other data in
the notebook folder.
The notebook works together with an C{Index} object which keeps a
SQLite database of all the pages to speed up notebook access and allows
to e.g. show a list of pages in the side pane of the user interface.
Another aspect of the notebook is the parsing of the wiki text in the
pages such that it can be shown in the interface or exported to another
format. See L{zim.formats} for implementations of different parsers.
All classes related to configuration are located in L{zim.config}.
The C{ConfigManager} handles looking up config files and provides them
for all components.
Plugins are defined as sub-modules of L{zim.plugins}. The
C{PluginManager} manages the plugins that are loaded and objects that
can be extended by plugins.
The graphical user interface is implemented in the L{zim.gui} module
and it's sub-modules. The webinterface is implemented in L{zim.www}.
The graphical interface uses a background process to coordinate
between running instances, this is implemented in L{zim.ipc}.
Functionality for exporting content is implemented in L{zim.exporter}.
And search functionality can be found in L{zim.search}.
Many classes in zim have signals which allow other objects to connect
to a listen for specific events. This allows for an event driven chain
of control, which is mainly used in the graphical interface, but is
also used elsewhere. If you are not familiar with event driven programs
please refer to a Gtk manual.
Infrastructure classes
----------------------
All functions and objects to interact with the file system can be
found in L{zim.fs}.
For executing external applications see L{zim.applications} or
L{zim.gui.applications}.
Some generic base classes and functions can be found in L{zim.utils}
@newfield signal: Signal, Signals
@newfield emits: Emits, Emits
@newfield implementation: Implementation
'''
# New epydoc fields defined above are inteded as follows:
# @signal: signal-name (param1, param2): description
# @emits: signal
# @implementation: must implement / optional for sub-classes
# Bunch of meta data, used at least in the about dialog
__version__ = '0.62'
__url__='http://www.zim-wiki.org'
__author__ = 'Jaap Karssenberg <jaap.karssenberg@gmail.com>'
__copyright__ = 'Copyright 2008 - 2014 Jaap Karssenberg <jaap.karssenberg@gmail.com>'
__license__='''\
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
'''
import os
import sys
import gettext
import logging
import locale
logger = logging.getLogger('zim')
#: This parameter can be set by ./setup.py, can be e.g. "maemo"
PLATFORM = None
########################################################################
## Note: all init here must happen before importing any other zim
## modules, so can not use zim.fs utilities etc.
## therefore ZIM_EXECUTABLE is a string, not an object
## Check executable and relative data dir
## (sys.argv[0] should always be correct, even for compiled exe)
if os.name == "nt":
# See notes in zim/fs.py about encoding expected by abspath
ZIM_EXECUTABLE = os.path.abspath(
unicode(sys.argv[0], sys.getfilesystemencoding())
)
else:
ZIM_EXECUTABLE = unicode(
os.path.abspath(sys.argv[0]),
sys.getfilesystemencoding()
)
## Initialize locale (needed e.g. for natural_sort)
locale.setlocale(locale.LC_ALL, '')
## Initialize gettext (maybe make this optional later for module use ?)
if os.name == "nt" and not os.environ.get('LANG'):
# Set locale config for gettext (other platforms have this by default)
# Using LANG because it is lowest prio - do not override other params
lang, enc = locale.getlocale()
os.environ['LANG'] = lang + '.' + enc
logging.info('Locale set to: %s', os.environ['LANG'])
_localedir = os.path.join(os.path.dirname(ZIM_EXECUTABLE), 'locale')
if not os.name == "nt":
_localedir = _localedir.encode(sys.getfilesystemencoding())
if os.path.isdir(_localedir):
# We are running from a source dir - use the locale data included there
gettext.install('zim', _localedir, unicode=True, names=('_', 'gettext', 'ngettext'))
else:
# Hope the system knows where to find the data
gettext.install('zim', None, unicode=True, names=('_', 'gettext', 'ngettext'))
########################################################################
## Now we are allowed to import sub modules
import zim.environ # initializes environment parameters
import zim.config
# Check if we can find our own data files
_file = zim.config.data_file('zim.png')
if not (_file and _file.exists()): #pragma: no cover
raise AssertionError(
'ERROR: Could not find data files in path: \n'
'%s\n'
'Try setting XDG_DATA_DIRS'
% map(str, zim.config.data_dirs())
)
def get_zim_revision():
'''Returns multiline string with bazaar revision info, if any.
Otherwise a string saying no info was found. Intended for debug
logging.
'''
try:
from zim._version import version_info
return '''\
Zim revision is:
branch: %(branch_nick)s
revision: %(revno)s %(revision_id)s
date: %(date)s''' % version_info
except ImportError:
return 'No bzr version-info found'
|
fabricehong/zim-desktop
|
zim/__init__.py
|
Python
|
gpl-2.0
| 7,256 | 0.008131 |
# -*- coding: utf-8 -*-
##
## This file is part of intbitset.
## Copyright (C) 2007, 2008, 2009, 2010, 2011, 2013, 2014 CERN.
##
## intbitset is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## intbitset is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with intbitset; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the intbitset data structure."""
__revision__ = "$Id$"
import sys
import zlib
import six
import re
import pkg_resources
import unittest
class IntbitsetTest(unittest.TestCase):
"""Test functions related to intbitset data structure."""
if sys.version_info < (2, 7):
def assertIn(self, test_value, expected_set, msg=None):
if msg is None:
msg = "%s did not occur in %s" % (test_value, expected_set)
self.assert_(test_value in expected_set, msg)
def setUp(self):
from intbitset import intbitset
self.intbitset = intbitset
with open('tests/intbitset_example.int', 'rb') as f:
CFG_INTBITSET_BIG_EXAMPLE = f.read()
self.sets = [
[1024],
[10, 20],
[10, 40],
[60, 70],
[60, 80],
[10, 20, 60, 70],
[10, 40, 60, 80],
[1000],
[10000],
[23, 45, 67, 89, 110, 130, 174, 1002, 2132, 23434],
[700, 2000],
list(range(1000, 1100)),
[30], [31], [32], [33],
[62], [63], [64], [65],
[126], [127], [128], [129]
]
self.fncs_list = [
(intbitset.__and__, set.__and__, int.__and__, False),
(intbitset.__or__, set.__or__, int.__or__, False),
(intbitset.__xor__, set.__xor__, int.__xor__, False),
(intbitset.__sub__, set.__sub__, int.__sub__, False),
(intbitset.__iand__, set.__iand__, int.__and__, True),
(intbitset.__ior__, set.__ior__, int.__or__, True),
(intbitset.__ixor__, set.__ixor__, int.__xor__, True),
(intbitset.__isub__, set.__isub__, int.__sub__, True),
]
self.cmp_list = [
(intbitset.__eq__, set.__eq__, lambda x, y: cmp(x, y) == 0),
(intbitset.__ge__, set.__ge__, lambda x, y: cmp(x, y) >= 0),
(intbitset.__gt__, set.__gt__, lambda x, y: cmp(x, y) > 0),
(intbitset.__le__, set.__le__, lambda x, y: cmp(x, y) <= 0),
(intbitset.__lt__, set.__lt__, lambda x, y: cmp(x, y) < 0),
(intbitset.__ne__, set.__ne__, lambda x, y: cmp(x, y) != 0),
]
self.big_examples = [list(self.intbitset(CFG_INTBITSET_BIG_EXAMPLE))]
self.corrupted_strdumps = [
six.b("ciao"),
six.b(self.intbitset([2, 6000000]).strbits()),
six.b("djflsdkfjsdljfsldkfjsldjlfk"),
]
def tearDown(self):
del self.big_examples
del self.corrupted_strdumps
def _helper_sanity_test(self, intbitset1, msg=''):
wordbitsize = intbitset1.get_wordbitsize()
size1 = intbitset1.get_size()
allocated1 = intbitset1.get_allocated()
creator_list = intbitset1.extract_finite_list()
up_to1 = creator_list and max(creator_list) or -1
self.assertTrue(up_to1 <= size1 * wordbitsize < allocated1 * wordbitsize, "up_to1=%s, size1=%s, allocated1=%s while testing %s during %s" % (up_to1, size1 * wordbitsize, allocated1 * wordbitsize, intbitset1, msg))
tmp = self.intbitset(intbitset1.fastdump())
size2 = tmp.get_size()
allocated2 = tmp.get_allocated()
creator_list = tmp.extract_finite_list()
up_to2 = creator_list and max(creator_list) or -1
self.assertTrue(up_to2 <= size2 * wordbitsize < allocated2 * wordbitsize, "After serialization up_to2=%s, size2=%s, allocated2=%s while testing %s during %s" % (up_to2, size2 * wordbitsize, allocated2 * wordbitsize, intbitset1, msg))
def _helper_test_via_fncs_list(self, fncs, intbitset1, intbitset2):
orig1 = self.intbitset(intbitset1)
orig2 = self.intbitset(intbitset2)
msg = "Testing %s(%s, %s)" % (fncs[0].__name__, repr(intbitset1), repr(intbitset2))
trailing1 = intbitset1.is_infinite()
trailing2 = intbitset2.is_infinite()
if fncs[3]:
fncs[0](intbitset1, intbitset2)
trailing1 = fncs[2](trailing1, trailing2) > 0
up_to = intbitset1.extract_finite_list() and max(intbitset1.extract_finite_list()) or -1
else:
intbitset3 = fncs[0](intbitset1, intbitset2)
trailing3 = fncs[2](trailing1, trailing2) > 0
up_to = intbitset3.extract_finite_list() and max(intbitset3.extract_finite_list()) or -1
set1 = set(orig1.extract_finite_list(up_to))
set2 = set(orig2.extract_finite_list(up_to))
if fncs[3]:
fncs[1](set1, set2)
else:
set3 = fncs[1](set1, set2)
self._helper_sanity_test(intbitset1, msg)
self._helper_sanity_test(intbitset2, msg)
if fncs[3]:
self.assertEqual(set1 & set(intbitset1.extract_finite_list(up_to)), set(intbitset1.extract_finite_list(up_to)), "%s not equal to %s after executing %s(%s, %s)" % (set1, set(intbitset1.extract_finite_list(up_to)), fncs[0].__name__, repr(orig1), repr(orig2)))
self.assertEqual(set1 | set(intbitset1.extract_finite_list(up_to)), set1, "%s not equal to %s after executing %s(%s, %s)" % (set1, set(intbitset1.extract_finite_list(up_to)), fncs[0].__name__, repr(orig1), repr(orig2)))
self.assertEqual(trailing1, intbitset1.is_infinite(), "%s is not %s as it is supposed to be after executing %s(%s, %s)" % (intbitset1, trailing1 and 'infinite' or 'finite', fncs[0].__name__, repr(orig1), repr(orig2)))
else:
self._helper_sanity_test(intbitset3, msg)
self.assertEqual(set3 & set(intbitset3.extract_finite_list(up_to)), set(intbitset3.extract_finite_list(up_to)), "%s not equal to %s after executing %s(%s, %s)" % (set3, set(intbitset3.extract_finite_list(up_to)), fncs[0].__name__, repr(orig1), repr(orig2)))
self.assertEqual(set3 | set(intbitset3.extract_finite_list(up_to)), set3, "%s not equal to %s after executing %s(%s, %s)" % (set3, set(intbitset3.extract_finite_list(up_to)), fncs[0].__name__, repr(orig1), repr(orig2)))
self.assertEqual(trailing3, intbitset3.is_infinite(), "%s is not %s as it is supposed to be after executing %s(%s, %s)" % (intbitset3, trailing3 and 'infinite' or 'finite', fncs[0].__name__, repr(orig1), repr(orig2)))
def _helper_test_normal_set(self, fncs):
for set1 in self.sets:
for set2 in self.sets:
self._helper_test_via_fncs_list(fncs, self.intbitset(set1), self.intbitset(set2))
def _helper_test_empty_set(self, fncs):
for set1 in self.sets:
self._helper_test_via_fncs_list(fncs, self.intbitset(set1), self.intbitset([]))
self._helper_test_via_fncs_list(fncs, self.intbitset([]), self.intbitset(set1))
self._helper_test_via_fncs_list(fncs, self.intbitset([]), self.intbitset([]))
def _helper_test_inifinite_set(self, fncs):
for set1 in self.sets:
for set2 in self.sets:
self._helper_test_via_fncs_list(fncs, self.intbitset(set1), self.intbitset(set2, trailing_bits=True))
self._helper_test_via_fncs_list(fncs, self.intbitset(set1, trailing_bits=True), self.intbitset(set2))
self._helper_test_via_fncs_list(fncs, self.intbitset(set1, trailing_bits=True), self.intbitset(set2, trailing_bits=True))
def _helper_test_infinite_vs_empty(self, fncs):
for set1 in self.sets:
self._helper_test_via_fncs_list(fncs, self.intbitset(set1, trailing_bits=True), self.intbitset([]))
self._helper_test_via_fncs_list(fncs, self.intbitset([]), self.intbitset(set1, trailing_bits=True))
self._helper_test_via_fncs_list(fncs, self.intbitset([]), self.intbitset(trailing_bits=True))
self._helper_test_via_fncs_list(fncs, self.intbitset(trailing_bits=True), self.intbitset([]))
def test_no_segmentation_fault(self):
"""intbitset - test no segmentation fault with foreign data types"""
for intbitset_fnc, set_fnc, dummy, dummy in self.fncs_list:
self.assertRaises(TypeError, intbitset_fnc, (self.intbitset([1,2,3]), set([1,2,3])))
self.assertRaises(TypeError, set_fnc, (set([1,2,3]), self.intbitset([1,2,3])))
self.assertRaises(TypeError, intbitset_fnc, (None, self.intbitset([1,2,3])))
def test_set_intersection(self):
"""intbitset - set intersection, normal set"""
self._helper_test_normal_set(self.fncs_list[0])
def test_set_intersection_empty(self):
"""intbitset - set intersection, empty set"""
self._helper_test_empty_set(self.fncs_list[0])
def test_set_intersection_infinite(self):
"""intbitset - set intersection, infinite set"""
self._helper_test_inifinite_set(self.fncs_list[0])
def test_set_intersection_infinite_empty(self):
"""intbitset - set intersection, infinite vs empty"""
self._helper_test_infinite_vs_empty(self.fncs_list[0])
def test_set_union(self):
"""intbitset - set union, normal set"""
self._helper_test_normal_set(self.fncs_list[1])
def test_set_union_empty(self):
"""intbitset - set union, empty set"""
self._helper_test_empty_set(self.fncs_list[1])
def test_set_union_infinite(self):
"""intbitset - set union, infinite set"""
self._helper_test_inifinite_set(self.fncs_list[1])
def test_set_union_infinite_empty(self):
"""intbitset - set union, infinite vs empty"""
self._helper_test_infinite_vs_empty(self.fncs_list[1])
def test_set_symmetric_difference(self):
"""intbitset - set symmetric difference, normal set"""
self._helper_test_normal_set(self.fncs_list[2])
def test_set_symmetric_difference_empty(self):
"""intbitset - set symmetric difference, empty set"""
self._helper_test_empty_set(self.fncs_list[2])
def test_set_symmetric_difference_infinite(self):
"""intbitset - set symmetric difference, infinite set"""
self._helper_test_inifinite_set(self.fncs_list[2])
def test_set_symmetric_difference_infinite_empty(self):
"""intbitset - set symmetric difference, infinite vs empty"""
self._helper_test_infinite_vs_empty(self.fncs_list[2])
def test_set_difference(self):
"""intbitset - set difference, normal set"""
self._helper_test_normal_set(self.fncs_list[3])
def test_set_difference_empty(self):
"""intbitset - set difference, empty set"""
self._helper_test_empty_set(self.fncs_list[3])
def test_set_difference_infinite(self):
"""intbitset - set difference, infinite set"""
self._helper_test_inifinite_set(self.fncs_list[3])
def test_set_difference_infinite_empty(self):
"""intbitset - set difference, infinite vs empty"""
self._helper_test_infinite_vs_empty(self.fncs_list[3])
def test_set_intersection_in_place(self):
"""intbitset - set intersection, normal set in place"""
self._helper_test_normal_set(self.fncs_list[4])
def test_set_intersection_empty_in_place(self):
"""intbitset - set intersection, empty set in place"""
self._helper_test_empty_set(self.fncs_list[4])
def test_set_intersection_infinite_in_place(self):
"""intbitset - set intersection, infinite set in place"""
self._helper_test_inifinite_set(self.fncs_list[4])
def test_set_intersection_infinite_empty_in_place(self):
"""intbitset - set intersection, infinite vs empty in place"""
self._helper_test_infinite_vs_empty(self.fncs_list[4])
def test_set_union_in_place(self):
"""intbitset - set union, normal set in place"""
self._helper_test_normal_set(self.fncs_list[5])
def test_set_union_empty_in_place(self):
"""intbitset - set union, empty set in place"""
self._helper_test_empty_set(self.fncs_list[5])
def test_set_union_infinite_in_place(self):
"""intbitset - set union, infinite set in place"""
self._helper_test_inifinite_set(self.fncs_list[5])
def test_set_union_infinite_empty_in_place(self):
"""intbitset - set union, infinite vs empty in place"""
self._helper_test_infinite_vs_empty(self.fncs_list[5])
def test_set_symmetric_difference_in_place(self):
"""intbitset - set symmetric difference, normal set in place"""
self._helper_test_normal_set(self.fncs_list[6])
def test_set_symmetric_difference_empty_in_place(self):
"""intbitset - set symmetric difference, empty set in place"""
self._helper_test_empty_set(self.fncs_list[6])
def test_set_symmetric_difference_infinite_in_place(self):
"""intbitset - set symmetric difference, infinite set in place"""
self._helper_test_inifinite_set(self.fncs_list[6])
def test_set_symmetric_difference_infinite_empty_in_place(self):
"""intbitset - set symmetric difference, infinite vs empty in place"""
self._helper_test_infinite_vs_empty(self.fncs_list[6])
def test_set_difference_in_place(self):
"""intbitset - set difference, normal set in place"""
self._helper_test_normal_set(self.fncs_list[7])
def test_set_difference_empty_in_place(self):
"""intbitset - set difference, empty set in place"""
self._helper_test_empty_set(self.fncs_list[7])
def test_set_difference_infinite_in_place(self):
"""intbitset - set difference, infinite set in place"""
self._helper_test_inifinite_set(self.fncs_list[7])
def test_set_difference_infinite_empty_in_place(self):
"""intbitset - set difference, infinite vs empty in place"""
self._helper_test_infinite_vs_empty(self.fncs_list[7])
def test_list_dump(self):
"""intbitset - list dump"""
for set1 in self.sets + [[]]:
self.assertEqual(list(self.intbitset(set1)), set1)
def test_ascii_bit_dump(self):
"""intbitset - ascii bit dump"""
for set1 in self.sets + [[]]:
tot = 0
count = 0
for bit in self.intbitset(set1).strbits():
if bit == '0':
self.assertFalse(count in set1)
elif bit == '1':
self.assertFalse(count not in set1)
tot += 1
else:
self.fail()
count += 1
self.assertEqual(tot, len(set1))
def test_tuple_of_tuples(self):
"""intbitset - support tuple of tuples"""
for set1 in self.sets + [[]]:
tmp_tuple = tuple([(elem, ) for elem in set1])
self.assertEqual(list(self.intbitset(set1)), list(self.intbitset(tmp_tuple)))
for set1 in self.sets + [[]]:
tmp_tuple = tuple([(elem, ) for elem in set1])
self.assertEqual(self.intbitset(set1, trailing_bits=True), self.intbitset(tmp_tuple, trailing_bits=True))
def test_marshalling(self):
"""intbitset - marshalling"""
for set1 in self.sets + [[]]:
self.assertEqual(self.intbitset(set1), self.intbitset(self.intbitset(set1).fastdump()))
for set1 in self.sets + [[]]:
self.assertEqual(self.intbitset(set1, trailing_bits=True), self.intbitset(self.intbitset(set1, trailing_bits=True).fastdump()))
def test_pickling(self):
"""intbitset - pickling"""
from six.moves import cPickle
for set1 in self.sets + [[]]:
self.assertEqual(self.intbitset(set1), cPickle.loads(cPickle.dumps(self.intbitset(set1), -1)))
for set1 in self.sets + [[]]:
self.assertEqual(self.intbitset(set1, trailing_bits=True), cPickle.loads(cPickle.dumps(self.intbitset(set1, trailing_bits=True), -1)))
def test_set_emptiness(self):
"""intbitset - tests for emptiness"""
for set1 in self.sets + [[]]:
self.assertEqual(not set(set1), not self.intbitset(set1))
def test_set_len(self):
"""intbitset - tests len()"""
for set1 in self.sets + [[]]:
intbitset1 = self.intbitset(set1)
pythonset1 = set(set1)
self.assertEqual(len(pythonset1), len(intbitset1))
intbitset1.add(76543)
pythonset1.add(76543)
self.assertEqual(len(pythonset1), len(intbitset1))
intbitset1.remove(76543)
pythonset1.remove(76543)
self.assertEqual(len(pythonset1), len(intbitset1))
def test_set_clear(self):
"""intbitset - clearing"""
for set1 in self.sets + [[]]:
intbitset1 = self.intbitset(set1)
intbitset1.clear()
self.assertEqual(list(intbitset1), [])
intbitset1 = self.intbitset(set1, trailing_bits=True)
intbitset1.clear()
self.assertEqual(list(intbitset1), [])
def test_set_repr(self):
"""intbitset - Pythonic representation"""
if False:
big_examples = self.big_examples
else:
big_examples = []
for set1 in self.sets + [[]] + big_examples:
intbitset1 = self.intbitset(set1)
intbitset = self.intbitset
self.assertEqual(intbitset1, eval(repr(intbitset1)))
for set1 in self.sets + [[]] + big_examples:
intbitset1 = self.intbitset(set1, trailing_bits=True)
self.assertEqual(intbitset1, eval(repr(intbitset1)))
def test_set_cmp(self):
"""intbitset - (non infinite) set comparison"""
for set1 in self.sets + [[]]:
for set2 in self.sets + [[]]:
for op in self.cmp_list:
self.assertEqual(op[0](self.intbitset(set1), self.intbitset(set2)), op[1](set(set1), set(set2)), "Error in comparing %s %s with comparing function %s" % (set1, set2, op[0].__name__))
def test_set_update_with_signs(self):
"""intbitset - set update with signs"""
dict1 = {10 : -1, 20 : 1, 23 : -1, 27 : 1, 33 : -1, 56 : 1, 70 : -1, 74 : 1}
for set1 in self.sets + [[]]:
intbitset1 = self.intbitset(set1)
intbitset1.update_with_signs(dict1)
up_to = max(list(dict1.keys()) + set1)
for i in range(up_to + 1):
if dict1.get(i, i in set1 and 1 or -1) == 1:
self.assertIn(i, intbitset1, "%s was not correctly updated from %s by %s" % (repr(intbitset1), repr(set1), repr(dict1)))
else:
self.assertFalse(i in intbitset1, "%s was not correctly updated from %s by %s" % (repr(intbitset1), repr(set1), repr(dict1)))
def test_set_cloning(self):
"""intbitset - set cloning"""
import copy
for set1 in self.sets + [[]]:
intbitset1 = self.intbitset(set1)
intbitset2 = self.intbitset(intbitset1)
intbitset3 = copy.deepcopy(intbitset2)
self._helper_sanity_test(intbitset1)
self._helper_sanity_test(intbitset2)
self._helper_sanity_test(intbitset3)
self.assertEqual(intbitset1, intbitset2)
self.assertEqual(intbitset1, intbitset3)
for set1 in self.sets + [[]]:
intbitset1 = self.intbitset(set1, trailing_bits=True)
intbitset2 = self.intbitset(intbitset1)
intbitset3 = copy.deepcopy(intbitset2)
self._helper_sanity_test(intbitset1)
self._helper_sanity_test(intbitset2)
self._helper_sanity_test(intbitset3)
self.assertEqual(intbitset1, intbitset2)
self.assertEqual(intbitset1, intbitset3)
def test_set_pop(self):
"""intbitset - set pop"""
for set1 in self.sets + [[]]:
intbitset1 = self.intbitset(set1)
pythonlist1 = list(set1)
while True:
try:
res1 = pythonlist1.pop()
except IndexError:
self.assertRaises(KeyError, intbitset1.pop)
self._helper_sanity_test(intbitset1)
break
res2 = intbitset1.pop()
self._helper_sanity_test(intbitset1)
self.assertEqual(res1, res2)
def test_set_getitem(self):
"""intbitset - __getitem__"""
for set1 in self.sets + [[]]:
intbitset1 = self.intbitset(set1)
pythonlist1 = list(set1)
for i in range(-2 * len(set1) - 2, 2 * len(set1) + 2):
try:
res1 = pythonlist1[i]
except IndexError:
self.assertRaises(IndexError, intbitset1.__getitem__, i)
continue
res2 = intbitset1[i]
self.assertEqual(res1, res2)
for set1 in self.sets + [[]]:
intbitset1 = self.intbitset(set1)
pythonlist1 = list(set1)
for start in range(-2 * len(set1) - 2, 2 * len(set1) + 2):
for stop in range(-2 * len(set1) - 2, 2 * len(set1) + 2):
for step in range(1, 3):
res1 = pythonlist1[start:stop:step]
res2 = intbitset1[start:stop:step]
self.assertEqual(res1, list(res2), "Failure with set %s, start %s, stop %s, step %s, found %s, expected %s, indices: %s" % (set1, start, stop, step, list(res2), res1, slice(start, stop, step).indices(len(pythonlist1))))
def test_set_iterator(self):
"""intbitset - set iterator"""
for set1 in self.sets + [[]]:
intbitset1 = self.intbitset(set1)
self._helper_sanity_test(intbitset1)
tmp_set1 = []
for recid in intbitset1:
self._helper_sanity_test(intbitset1)
tmp_set1.append(recid)
self._helper_sanity_test(intbitset1)
self.assertEqual(set1, tmp_set1)
for set1 in self.sets + [[]]:
tmp_set1 = []
for recid in self.intbitset(set1):
tmp_set1.append(recid)
self.assertEqual(set1, tmp_set1)
def test_set_corruption(self):
"""intbitset - set corruption"""
set1 = self.intbitset()
for strdump in self.corrupted_strdumps:
## These should fail because they are not compressed
self.assertRaises(ValueError, self.intbitset, strdump)
self.assertRaises(ValueError, set1.fastload, strdump)
strdump = zlib.compress(strdump)
## These should fail because they are not of the good
## length
self.assertRaises(ValueError, self.intbitset, strdump)
self.assertRaises(ValueError, set1.fastload, strdump)
def test_set_consistence(self):
"""intbitset - set consistence"""
tests = (
(
(20, 30, 1000, 40),
six.b('x\x9cc`\x10p``d\x18\x18\x80d/\x00*\xb6\x00S'),
six.b('x\x9cc`\x10p`\x18(\xf0\x1f\x01\x00k\xe6\x0bF')
),
(
(20, 30, 1000, 41),
six.b('x\x9cc`\x10p``b\x18\x18\xc0\x88`\x02\x00+9\x00T'),
six.b('x\x9cc`\x10p`\x18(\xf0\x1f\x01\x00k\xe6\x0bF')
),
(
(20, 30, 1001, 41),
six.b('x\x9cc`\x10p``b\x18\x18\x80d/\x00+D\x00U'),
six.b('x\x9cc`\x10p`\x18(\xf0\xef?\x1c\x00\x00k\xdb\x0bE')
)
)
for original, dumped, dumped_trails in tests:
intbitset1 = self.intbitset(original)
intbitset2 = self.intbitset(original, trailing_bits=True)
intbitset3 = self.intbitset(dumped)
intbitset4 = self.intbitset(dumped_trails)
self._helper_sanity_test(intbitset1)
self._helper_sanity_test(intbitset2)
self._helper_sanity_test(intbitset3)
self._helper_sanity_test(intbitset4)
self.assertEqual(intbitset1.fastdump(), dumped)
self.assertEqual(intbitset1, intbitset3)
self.assertEqual(intbitset2.fastdump(), dumped_trails)
self.assertEqual(intbitset2, intbitset4)
|
crepererum/intbitset
|
tests/test_intbitset.py
|
Python
|
gpl-2.0
| 24,902 | 0.002972 |
import logging
import winreg
from nativeconfig.configs.base_config import BaseConfig
LOG = logging.getLogger('nativeconfig')
ERROR_NO_MORE_ITEMS = 259
ERROR_NO_MORE_FILES = 18
def traverse_registry_key(key, sub_key):
"""
Traverse registry key and yield one by one.
@raise WindowsError: If key cannot be opened (e.g. does not exist).
"""
current_key = winreg.OpenKey(key, sub_key, 0, winreg.KEY_ALL_ACCESS)
try:
i = 0
while True:
next_key = winreg.EnumKey(current_key, i)
for k in traverse_registry_key(key, r'{}\{}'.format(sub_key, next_key)):
yield k
i += 1
except OSError:
yield sub_key
class RegistryConfig(BaseConfig):
"""
Store config in Windows Registry.
@cvar REGISTRY_KEY: Key in the registry where config will be stored.
@cvar REGISTRY_PATH: Path relative to REGISTRY_KEY that points to the config.
"""
LOG = LOG.getChild('RegistryConfig')
REGISTRY_KEY = winreg.HKEY_CURRENT_USER
def __init__(self):
k = winreg.CreateKey(self.REGISTRY_KEY, self.REGISTRY_PATH)
winreg.CloseKey(k)
super(RegistryConfig, self).__init__()
#{ BaseConfig
def get_value_cache_free(self, name):
try:
with winreg.OpenKey(self.REGISTRY_KEY, self.REGISTRY_PATH) as app_key:
try:
value, value_type = winreg.QueryValueEx(app_key, name)
if not value_type == winreg.REG_SZ:
raise ValueError("value must be a REG_SZ")
return value
except OSError:
pass
except:
self.LOG.exception("Unable to get \"%s\" from the registry:", name)
return None
def set_value_cache_free(self, name, raw_value):
try:
if raw_value is not None:
with winreg.OpenKey(self.REGISTRY_KEY, self.REGISTRY_PATH, 0, winreg.KEY_WRITE) as app_key:
winreg.SetValueEx(app_key, name, 0, winreg.REG_SZ, raw_value)
else:
self.del_value_cache_free(name)
except:
self.LOG.exception("Unable to set \"%s\" in the registry:", name)
def del_value_cache_free(self, name):
try:
try:
for k in traverse_registry_key(self.REGISTRY_KEY, r'{}\{}'.format(self.REGISTRY_PATH, name)):
winreg.DeleteKey(self.REGISTRY_KEY, k)
except OSError:
with winreg.OpenKey(self.REGISTRY_KEY, self.REGISTRY_PATH, 0, winreg.KEY_ALL_ACCESS) as app_key:
winreg.DeleteValue(app_key, name)
except:
self.LOG.info("Unable to delete \"%s\" from the registry:", name)
def get_array_value_cache_free(self, name):
try:
with winreg.OpenKey(self.REGISTRY_KEY, self.REGISTRY_PATH) as app_key:
value, value_type = winreg.QueryValueEx(app_key, name)
if not value_type == winreg.REG_MULTI_SZ:
raise ValueError("value must be a REG_MULTI_SZ")
return value
except:
self.LOG.info("Unable to get array \"%s\" from the registry:", name, exc_info=True)
return None
def set_array_value_cache_free(self, name, value):
try:
if value is not None:
with winreg.OpenKey(self.REGISTRY_KEY, self.REGISTRY_PATH, 0, winreg.KEY_WRITE) as app_key:
winreg.SetValueEx(app_key, name, 0, winreg.REG_MULTI_SZ, value)
else:
self.del_value_cache_free(name)
except:
self.LOG.exception("Unable to set \"%s\" in the registry:", name)
def get_dict_value_cache_free(self, name):
try:
with winreg.OpenKey(self.REGISTRY_KEY, r'{}\{}'.format(self.REGISTRY_PATH, name), 0, winreg.KEY_ALL_ACCESS) as app_key:
v = {}
try:
i = 0
while True:
name, value, value_type = winreg.EnumValue(app_key, i)
if value_type != winreg.REG_SZ:
raise ValueError("value must be a REG_SZ")
if value is not None:
v[name] = value
i += 1
except OSError as e:
if e.winerror != ERROR_NO_MORE_ITEMS and e.winerror != ERROR_NO_MORE_FILES:
raise
else:
pass # end of keys
return v
except:
self.LOG.info("Unable to get dict '%s' from the registry:", name, exc_info=True)
return None
def set_dict_value_cache_free(self, name, value):
try:
self.del_value_cache_free(name)
if value is not None:
with winreg.CreateKey(self.REGISTRY_KEY, r'{}\{}'.format(self.REGISTRY_PATH, name)) as app_key:
for k, v in value.items():
winreg.SetValueEx(app_key, k, 0, winreg.REG_SZ, v)
except:
self.LOG.exception("Unable to set \"%s\" in the registry:", name)
#}
|
GreatFruitOmsk/nativeconfig
|
nativeconfig/configs/registry_config.py
|
Python
|
mit
| 5,235 | 0.004585 |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
from __future__ import print_function
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ['argparser', 'run_flow', 'run', 'message_if_missing']
import logging
import socket
import sys
import webbrowser
from six.moves import BaseHTTPServer
from six.moves import urllib
from oauth2client import client
from oauth2client import util
_CLIENT_SECRETS_MESSAGE = """WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
"""
def _CreateArgumentParser():
try:
import argparse
except ImportError:
return None
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--auth_host_name', default='localhost',
help='Hostname when running a local web server.')
parser.add_argument('--noauth_local_webserver', action='store_true',
default=False, help='Do not run a local web server.')
parser.add_argument('--auth_host_port', default=[8080, 8090], type=int,
nargs='*', help='Port web server should listen on.')
parser.add_argument('--logging_level', default='ERROR',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Set the logging level of detail.')
return parser
# argparser is an ArgumentParser that contains command-line options expected
# by tools.run(). Pass it in as part of the 'parents' argument to your own
# ArgumentParser.
argparser = _CreateArgumentParser()
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(self):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
query = self.path.split('?', 1)[-1]
query = dict(urllib.parse.parse_qsl(query))
self.server.query_params = query
self.wfile.write("<html><head><title>Authentication Status</title></head>")
self.wfile.write("<body><p>The authentication flow has completed.</p>")
self.wfile.write("</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as command line program."""
@util.positional(3)
def run_flow(flow, storage, flags, http=None):
"""Core code for a command-line application.
The run() function is called from your application and runs through all the
steps to obtain credentials. It takes a Flow argument and attempts to open an
authorization server page in the user's default web browser. The server asks
the user to grant your application access to the user's data. If the user
grants access, the run() function returns new credentials. The new credentials
are also stored in the Storage argument, which updates the file associated
with the Storage object.
It presumes it is run from a command-line application and supports the
following flags:
--auth_host_name: Host name to use when running a local web server
to handle redirects during OAuth authorization.
(default: 'localhost')
--auth_host_port: Port to use when running a local web server to handle
redirects during OAuth authorization.;
repeat this option to specify a list of values
(default: '[8080, 8090]')
(an integer)
--[no]auth_local_webserver: Run a local web server to handle redirects
during OAuth authorization.
(default: 'true')
The tools module defines an ArgumentParser the already contains the flag
definitions that run() requires. You can pass that ArgumentParser to your
ArgumentParser constructor:
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
flags = parser.parse_args(argv)
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a Storage to store the credential in.
flags: argparse.ArgumentParser, the command-line flags.
http: An instance of httplib2.Http.request
or something that acts like it.
Returns:
Credentials, the obtained credential.
"""
logging.getLogger().setLevel(getattr(logging, flags.logging_level))
if not flags.noauth_local_webserver:
success = False
port_number = 0
for port in flags.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((flags.auth_host_name, port),
ClientRedirectHandler)
except socket.error:
pass
else:
success = True
break
flags.noauth_local_webserver = not success
if not success:
print('Failed to start a local webserver listening on either port 8080')
print('or port 9090. Please check your firewall settings and locally')
print('running programs that may be blocking or using those ports.')
print()
print('Falling back to --noauth_local_webserver and continuing with')
print('authorization.')
print()
if not flags.noauth_local_webserver:
oauth_callback = 'http://%s:%s/' % (flags.auth_host_name, port_number)
else:
oauth_callback = client.OOB_CALLBACK_URN
flow.redirect_uri = oauth_callback
authorize_url = flow.step1_get_authorize_url()
if not flags.noauth_local_webserver:
webbrowser.open(authorize_url, new=1, autoraise=True)
print('Your browser has been opened to visit:')
print()
print(' ' + authorize_url)
print()
print('If your browser is on a different machine then exit and re-run this')
print('application with the command-line parameter ')
print()
print(' --noauth_local_webserver')
print()
else:
print('Go to the following link in your browser:')
print()
print(' ' + authorize_url)
print()
code = None
if not flags.noauth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print('Failed to find "code" in the query parameters of the redirect.')
sys.exit('Try running with --noauth_local_webserver.')
else:
code = raw_input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http=http)
except client.FlowExchangeError as e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print('Authentication successful.')
return credential
def message_if_missing(filename):
"""Helpful message to display if the CLIENT_SECRETS file is missing."""
return _CLIENT_SECRETS_MESSAGE % filename
try:
from oauth2client.old_run import run
from oauth2client.old_run import FLAGS
except ImportError:
def run(*args, **kwargs):
raise NotImplementedError(
'The gflags library must be installed to use tools.run(). '
'Please install gflags or preferrably switch to using '
'tools.run_flow().')
|
ychen820/microblog
|
y/google-cloud-sdk/platform/gsutil/third_party/oauth2client/oauth2client/tools.py
|
Python
|
bsd-3-clause
| 8,468 | 0.006731 |
import os
import subprocess
from pymongo import MongoClient
from flask import Flask, redirect, url_for, request, flash
from flask_bootstrap import Bootstrap
from flask_mongoengine import MongoEngine
from flask_modular_auth import AuthManager, current_authenticated_entity, SessionBasedAuthProvider, KeyBasedAuthProvider
from .reverse_proxy import ReverseProxied
# Initialize app
app = Flask(__name__)
app.wsgi_app = ReverseProxied(app.wsgi_app)
# Generate or load secret key
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')
try:
app.secret_key = open(SECRET_FILE).read().strip()
except IOError:
try:
import random
app.secret_key = ''.join([random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
secret = open(SECRET_FILE, 'w')
secret.write(app.secret_key)
secret.close()
except IOError:
Exception('Please create a %s file with random characters \
to generate your secret key!' % SECRET_FILE)
# Load config
config_path = os.getenv('CONFIG_PATH', 'mass_flask_config.config_development.DevelopmentConfig')
app.config.from_object(config_path)
# Init db
db = MongoEngine(app)
# Init flask-bootstrap
Bootstrap(app)
# Init auth system
def setup_session_auth(user_loader):
app.session_provider = SessionBasedAuthProvider(user_loader)
auth_manager.register_auth_provider(app.session_provider)
def setup_key_based_auth(key_loader):
app.key_based_provider = KeyBasedAuthProvider(key_loader)
auth_manager.register_auth_provider(app.key_based_provider)
def unauthorized_callback():
if current_authenticated_entity.is_authenticated:
flash('You are not authorized to access this resource!', 'warning')
return redirect(url_for('mass_flask_webui.index'))
else:
return redirect(url_for('mass_flask_webui.login', next=request.url))
auth_manager = AuthManager(app, unauthorized_callback=unauthorized_callback)
# Set the version number. For the future we should probably read it from a file.
app.version = '1.0-alpha1'
|
mass-project/mass_server
|
mass_flask_config/app.py
|
Python
|
mit
| 2,133 | 0.002344 |
# -*- coding: utf-8 -*-
#
# iago documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 28 18:57:03 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../src'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx']
autodoc_mock_imports = ['pint',]
intersphinx_mapping = {
'python': ('https://docs.python.org/2', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('http://pandas-docs.github.io/pandas-docs-travis/', None),
'mdanalysis': ('http://pythonhosted.org/MDAnalysis', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'iago'
copyright = u'2016, Guido Falk von Rudorff'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'iagodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'iago.tex', u'iago Documentation',
u'Guido Falk von Rudorff', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'iago', u'iago Documentation',
[u'Guido Falk von Rudorff'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'iago', u'iago Documentation',
u'Guido Falk von Rudorff', 'iago', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
ferchault/iago
|
docs/conf.py
|
Python
|
mit
| 8,700 | 0.006897 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
import requests_mock
from airflow.operators.http_operator import SimpleHttpOperator
try:
from unittest import mock
except ImportError:
import mock
class SimpleHttpOpTests(unittest.TestCase):
def setUp(self):
os.environ['AIRFLOW_CONN_HTTP_EXAMPLE'] = 'http://www.example.com'
@requests_mock.mock()
def test_response_in_logs(self, m):
"""
Test that when using SimpleHttpOperator with 'GET',
the log contains 'Example Domain' in it
"""
m.get('http://www.example.com', text='Example.com fake response')
operator = SimpleHttpOperator(
task_id='test_HTTP_op',
method='GET',
endpoint='/',
http_conn_id='HTTP_EXAMPLE',
log_response=True,
)
with mock.patch.object(operator.log, 'info') as mock_info:
operator.execute(None)
mock_info.assert_called_with('Example.com fake response')
|
malmiron/incubator-airflow
|
tests/operators/test_http_operator.py
|
Python
|
apache-2.0
| 1,793 | 0 |
# -*- coding:utf-8 -*-
# This code is automatically transpiled by Saklient Translator
import six
from ..client import Client
from .model import Model
from ..resources.resource import Resource
from ..resources.routerplan import RouterPlan
from ...util import Util
import saklient
str = six.text_type
# module saklient.cloud.models.model_routerplan
class Model_RouterPlan(Model):
## ルータ帯域プランを検索するための機能を備えたクラス。
## @private
# @return {str}
def _api_path(self):
return "/product/internet"
## @private
# @return {str}
def _root_key(self):
return "InternetPlan"
## @private
# @return {str}
def _root_key_m(self):
return "InternetPlans"
## @private
# @return {str}
def _class_name(self):
return "RouterPlan"
## @private
# @param {any} obj
# @param {bool} wrapped=False
# @return {saklient.cloud.resources.resource.Resource}
def _create_resource_impl(self, obj, wrapped=False):
Util.validate_type(wrapped, "bool")
return RouterPlan(self._client, obj, wrapped)
## 次に取得するリストの開始オフセットを指定します。
#
# @param {int} offset オフセット
# @return {saklient.cloud.models.model_routerplan.Model_RouterPlan} this
def offset(self, offset):
Util.validate_type(offset, "int")
return self._offset(offset)
## 次に取得するリストの上限レコード数を指定します。
#
# @param {int} count 上限レコード数
# @return {saklient.cloud.models.model_routerplan.Model_RouterPlan} this
def limit(self, count):
Util.validate_type(count, "int")
return self._limit(count)
## Web APIのフィルタリング設定を直接指定します。
#
# @param {str} key キー
# @param {any} value 値
# @param {bool} multiple=False valueに配列を与え、OR条件で完全一致検索する場合にtrueを指定します。通常、valueはスカラ値であいまい検索されます。
# @return {saklient.cloud.models.model_routerplan.Model_RouterPlan}
def filter_by(self, key, value, multiple=False):
Util.validate_type(key, "str")
Util.validate_type(multiple, "bool")
return self._filter_by(key, value, multiple)
## 次のリクエストのために設定されているステートをすべて破棄します。
#
# @return {saklient.cloud.models.model_routerplan.Model_RouterPlan} this
def reset(self):
return self._reset()
## 指定したIDを持つ唯一のリソースを取得します。
#
# @param {str} id
# @return {saklient.cloud.resources.routerplan.RouterPlan} リソースオブジェクト
def get_by_id(self, id):
Util.validate_type(id, "str")
return self._get_by_id(id)
## リソースの検索リクエストを実行し、結果をリストで取得します。
#
# @return {saklient.cloud.resources.routerplan.RouterPlan[]} リソースオブジェクトの配列
def find(self):
return self._find()
## @ignore
# @param {saklient.cloud.client.Client} client
def __init__(self, client):
super(Model_RouterPlan, self).__init__(client)
Util.validate_type(client, "saklient.cloud.client.Client")
|
sakura-internet/saklient.python
|
saklient/cloud/models/model_routerplan.py
|
Python
|
mit
| 3,430 | 0.0117 |
from tile_view import *
from table import *
from game_state import *
from circ_button import *
class TableView:
GREY = (100,100,100)
BLACK = (0,0,0)
WHITE = (255,255,255)
BGCOLOR = (60,60,100)
TILE_COLOR = (90, 255, 90)
TILE_RADIUS = 30
TABLE_POS = (245, 90)
# table : Table
# pl1, pl2: Player
# game_board: list(TileView)
# pl1_view pl2_view
# marble_stack: the available marbles
def __init__(self, state: State, surface: pygame.Surface):
pl1 = state.pl1
pl2 = state.pl2
table = state.t
self.game_board = []
r = TableView.TILE_RADIUS
for i in range(-3,4):
for j in range(-3,4):
if math.fabs(i+j)<=3:
self.game_board.append(TileView(table.get(i,j),\
TableView.TABLE_POS[0], TableView.TABLE_POS[1],\
r, TableView.TILE_COLOR))#, lambda btn : print(btn.col, btn.row)))
W = surface.get_width()
H = surface.get_height()
self.pl1_view = PlayerView(pl1, (0,0))
self.pl2_view = PlayerView(pl2, (W-TableView.TILE_RADIUS*2,0))
self.marble_stack = []
self.marble_stack.append(CircleButton(int(W/2-r*3), H-r*2, r, \
TableView.WHITE, str(table.marbles[0])))
self.marble_stack.append(CircleButton(int(W/2), H-r*2, r, \
TableView.GREY, str(table.marbles[1])))
self.marble_stack.append(CircleButton(int(W/2+r*3), H-r*2, r, \
TableView.BLACK, str(table.marbles[2])))
def draw(self, surface: pygame.Surface, state: State):
surface.fill(TableView.BGCOLOR)
for tile in self.game_board:
tile.draw_button(surface,state.t.get(tile.col, tile.row))
self.pl1_view.draw(surface, state.pl1)
self.pl2_view.draw(surface, state.pl2)
for i in range(len(state.t.marbles)):
btn = self.marble_stack[i]
btn.text = str(state.t.marbles[i])
btn.draw_button(surface)
def get_pressed_tile(self, pos):
for tile in self.game_board:
if tile.pressed(pos):
return (tile.col, tile.row)
return None
def get_pressed_marble(self, pos):
for (i,marble) in enumerate(self.marble_stack):
if marble.pressed(pos):
return i
return None
class PlayerView:
def __init__(self, pl: Player, pos: (int, int)):
self.pos = pos
r = int(TableView.TILE_RADIUS/2)
self.buttons = [CircleButton(pos[0]+r*2, \
r*3, r, \
TableView.WHITE, str(pl.marbles[0]))]
self.buttons.append(CircleButton(pos[0]+r*2, \
r*6, r, \
TableView.GREY, str(pl.marbles[1])))
self.buttons.append(CircleButton(pos[0]+r*2, \
r*9, r, \
TableView.BLACK, str(pl.marbles[2])))
def draw(self, surface: pygame.Surface, player: Player):
for i in range(len(self.buttons)):
btn = self.buttons[i]
btn.text = str(player.marbles[i])
btn.draw_button(surface)
font_size = int(TableView.TILE_RADIUS*3//len(player.name))
myFont = pygame.font.SysFont("Calibri", font_size)
myText = myFont.render(player.name, 1, (0,0,0))
surface.blit(myText, (self.pos[0]+TableView.TILE_RADIUS/2,\
self.pos[1] + TableView.TILE_RADIUS/2))
|
lipk/pyzertz
|
pyzertz/table_view.py
|
Python
|
apache-2.0
| 3,483 | 0.014643 |
import flask
import MemeRepo.db as db
import MemeRepo.funcs as fnc
from MemeRepo.config import config
def handle(code, uri):
result = db.get_file(uri)
if result == None:
return flask.render_template("error.html", msg="That file does not exist", code="400"), 400
else:
if result['owner'] == code:
db.delete_file(uri)
return 'deleted'
else:
return flask.render_template("error.html", msg="You do not own that file", code="403"), 403
|
carcinogoy/MemeRepo
|
MemeRepo/delete.py
|
Python
|
mit
| 517 | 0.009671 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do square matrix multiplication on Android Phone."""
import tvm
from tvm import te
import os
from tvm import rpc
from tvm.contrib import util, ndk
import numpy as np
# Set to be address of tvm proxy.
proxy_host = os.environ["TVM_ANDROID_RPC_PROXY_HOST"]
proxy_port = 9090
key = "android"
# Change target configuration.
# Run `adb shell cat /proc/cpuinfo` to find the arch.
arch = "arm64"
target = "llvm -mtriple=%s-linux-android" % arch
def ngflops(N):
return 2.0 * float(N * N * N) / (10 ** 9)
dtype = "float32"
def evaluate(func, ctx, N, times):
a_np = np.random.uniform(size=(N, N)).astype(dtype)
b_np = np.random.uniform(size=(N, N)).astype(dtype)
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros((N, N), dtype=dtype), ctx)
time_f = func.time_evaluator(func.entry_name, ctx, number=times)
cost = time_f(a, b, c).mean
gf = ngflops(N) / cost
print("%g secs/op, %g GFLOPS" % (cost, gf))
np.testing.assert_almost_equal(c.asnumpy(), a_np.dot(b_np), decimal=2)
def test_gemm_gpu(N, times, bn, num_block, num_thread):
assert bn <= N
assert num_thread * num_thread * 16 <= N
assert num_block * num_block * 2 <= N
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="Btmp")
k = te.reduce_axis((0, N), name="k")
packedB = te.compute((N, N / bn, bn), lambda x, y, z: B[x, y * bn + z], name="B")
C = te.compute(
(N, N), lambda ii, jj: te.sum(A[ii, k] * packedB[k, jj / bn, jj % bn], axis=k), name="C"
)
s = te.create_schedule(C.op)
CC = s.cache_write(C, "local")
block_x = te.thread_axis("blockIdx.x")
block_y = te.thread_axis("blockIdx.y")
thread_x = te.thread_axis("threadIdx.x")
thread_y = te.thread_axis("threadIdx.y")
thread_xz = te.thread_axis((0, 2), "vthread", name="vx")
thread_yz = te.thread_axis((0, 2), "vthread", name="vy")
pby, pbi = s[packedB].split(packedB.op.axis[0], nparts=num_thread)
pbx, pbj = s[packedB].split(packedB.op.axis[1], nparts=num_thread)
s[packedB].bind(pby, thread_y)
s[packedB].bind(pbx, thread_x)
pbz, pbk = s[packedB].split(packedB.op.axis[2], factor=8)
s[packedB].vectorize(pbk)
by, yi = s[C].split(C.op.axis[0], nparts=num_block)
bx, xi = s[C].split(C.op.axis[1], nparts=num_thread)
s[C].bind(by, block_y)
s[C].bind(bx, thread_y)
s[C].reorder(by, bx, yi, xi)
tyz, yi = s[C].split(yi, nparts=2)
ty, yi = s[C].split(yi, nparts=num_block)
txz, xi = s[C].split(xi, nparts=2)
tx, xi = s[C].split(xi, nparts=num_thread)
s[C].reorder(tyz, txz, ty, tx, yi, xi)
s[C].bind(tyz, thread_yz)
s[C].bind(txz, thread_xz)
s[C].bind(ty, block_x)
s[C].bind(tx, thread_x)
xyi, xxi = s[C].split(xi, factor=8)
s[C].reorder(tyz, txz, ty, tx, yi, xyi, xxi)
s[C].vectorize(xxi)
s[CC].compute_at(s[C], yi)
yo, xo = CC.op.axis
s[CC].reorder(k, yo, xo)
xo, xi = s[CC].split(xo, factor=8)
s[CC].vectorize(xi)
ko, ki = s[CC].split(k, factor=2)
s[CC].unroll(ki)
print(tvm.lower(s, [A, B, C], simple_mode=True))
f = tvm.build(s, [A, B, C], "opencl", target_host=target, name="gemm_gpu")
temp = util.tempdir()
path_dso = temp.relpath("gemm_gpu.so")
f.export_library(path_dso, ndk.create_shared)
# connect to the proxy
remote = rpc.connect(proxy_host, proxy_port, key=key)
ctx = remote.cl(0)
remote.upload(path_dso)
f = remote.load_module("gemm_gpu.so")
evaluate(f, ctx, N, times)
if __name__ == "__main__":
test_gemm_gpu(1024, times=5, bn=8, num_block=2, num_thread=8)
|
sxjscience/tvm
|
apps/topi_recipe/gemm/android_gemm_square.py
|
Python
|
apache-2.0
| 4,440 | 0.00045 |
import arcpy, os, json, csv
from portal import additem, shareItem, generateToken, getUserContent, updateItem, getGroupID, deleteItem, getGroupContent
from metadata import metadata
from ESRImapservice import ESRImapservice
class csvportal(object):
def __init__(self, user, password, portal, worksspace, groups=[]):
"""Connect to portal with username and pasword, also set the local workspace"""
self.user = user
self.password = password
self.portal = portal
self.groups = groups
self.token = generateToken(self.user, self.password, self.portal)
self.groupIDs = [getGroupID(g, self.token, self.portal) for g in self.groups]
if len(self.groupIDs) == 0:
self.userContent = getUserContent(user, '', self.token, self.portal )
else:
self.userContent = getGroupContent(self.groups[0], self.token, self.portal)
self.existingIDs = { n['title'] : n['id'] for n in self.userContent["items"]}
self.LayersFoundinMXD = []
self.ws = worksspace
if worksspace: arcpy.env.workspace = worksspace
def updateToken(self):
"""refresh the token, might be necessary if becomes invalid"""
self.token = generateToken(self.user, self.password, self.portal)
return self.token
def uploadCsv(self, csvpath, sep=";", headerlines=1, nameCol=0, pathCol=1, urlCol=2):
"""upload every row in a csv"""
with open( csvpath , 'rb') as csvfile:
nr = 0
csv_reader = csv.reader(csvfile, dialect=csv.excel, delimiter=sep)
for n in range(headerlines): csv_reader.next()
for row in csv_reader:
line = [unicode(cell, 'latin-1') for cell in row]
name, ds, url = (line[nameCol], line[pathCol], line[urlCol])
if self.ws and os.path.dirname(ds).endswith('.sde'):
ds = os.path.join(self.ws , os.path.basename(ds) )
self.addLyr(ds, name, url, self.groupIDs)
#generate new token every 50 uses
if not nr%50 : self.token = generateToken(self.user, self.password, self.portal)
nr += 1
##TODO: DELETE layers in group and not in csv
def addLyr(self, dataSource, name, serviceUrl, groupIDs=[]):
"""Add *dataSource* to *portal* for *user* , as a item with *name*
representing a layer in *service* """
meta = metadata.metadataFromArcgis( dataSource )
author = meta.credits if len( meta.credits ) else "Stad Antwerpen"
descrip = ( "<strong>"+ meta.title +"</strong> <div><em>"+
meta.orgname + "</em></div> " + meta.description +
"\n<br/> Creatiedatum: " + meta.createDate +
"\n<br/> Publicatiedatum: " + meta.pubDate +
"\n<br/> Revisiedatum: " + meta.reviseDate +
"\n<br/> Beheer: " + meta.contacts +
"\n<br/> Contact: " + meta.eMails )
if name in self.existingIDs.keys():
self.LayersFoundinMXD.append(name)
arcpy.AddMessage( "updating " + name )
item = updateItem(self.user, self.token, self.portal, self.existingIDs[name], serviceUrl,
title=name, summary=meta.purpose, description=descrip, author=author, tags=",".join(meta.tags))
else:
arcpy.AddMessage( "adding " + name )
item = additem(self.user, self.token, self.portal, serviceUrl,
title=name, summary=meta.purpose, description=descrip, author=author, tags=",".join(meta.tags) )
if "success" in item.keys() and item["success"]:
id = item["id"]
arcpy.AddMessage( shareItem(id, self.token, self.portal, True, True, groupIDs) )
elif "success" in item.keys() and not item["success"]:
raise Exception( "Error uploading "+ name +" "+ json.dumps(result))
else:
arcpy.AddMessage("unsure of success for layer "+ name +" "+ json.dumps(result))
def delLyr(self, name):
if name in self.existingIDs.keys():
result = deleteItem(self.existingIDs[name] , self.token, self.portal, self.user)
if "success" in result.keys() and result["success"]:
arcpy.AddMessage("Deleted layer: " + name )
elif "success" in result.keys() and not result["success"]:
raise Exception( "Error deleting "+ name +" "+ json.dumps(result))
else:
arcpy.AddMessage("unsure of success for layer "+ name +" "+ json.dumps(result))
|
warrieka/portal4argis_tools
|
portal/csvportal.py
|
Python
|
mit
| 4,761 | 0.018694 |
# -*- coding: mbcs -*-
# Created by makepy.py version 0.5.01
# By python version 2.7.9 (default, Dec 10 2014, 12:28:03) [MSC v.1500 64 bit (AMD64)]
# From type library '{C866CA3A-32F7-11D2-9602-00C04F8EE628}'
# On Fri Jun 30 10:48:37 2017
'Microsoft Speech Object Library'
makepy_version = '0.5.01'
python_version = 0x20709f0
import win32com.client.CLSIDToClass, pythoncom, pywintypes
import win32com.client.util
from pywintypes import IID
from win32com.client import Dispatch
# The following 3 lines may need tweaking for the particular server
# Candidates are pythoncom.Missing, .Empty and .ArgNotFound
defaultNamedOptArg=pythoncom.Empty
defaultNamedNotOptArg=pythoncom.Empty
defaultUnnamedArg=pythoncom.Empty
CLSID = IID('{C866CA3A-32F7-11D2-9602-00C04F8EE628}')
MajorVersion = 5
MinorVersion = 4
LibraryFlags = 8
LCID = 0x0
class constants:
DISPID_SRGCmdLoadFromFile =7 # from enum DISPIDSPRG
DISPID_SRGCmdLoadFromMemory =10 # from enum DISPIDSPRG
DISPID_SRGCmdLoadFromObject =8 # from enum DISPIDSPRG
DISPID_SRGCmdLoadFromProprietaryGrammar=11 # from enum DISPIDSPRG
DISPID_SRGCmdLoadFromResource =9 # from enum DISPIDSPRG
DISPID_SRGCmdSetRuleIdState =13 # from enum DISPIDSPRG
DISPID_SRGCmdSetRuleState =12 # from enum DISPIDSPRG
DISPID_SRGCommit =6 # from enum DISPIDSPRG
DISPID_SRGDictationLoad =14 # from enum DISPIDSPRG
DISPID_SRGDictationSetState =16 # from enum DISPIDSPRG
DISPID_SRGDictationUnload =15 # from enum DISPIDSPRG
DISPID_SRGId =1 # from enum DISPIDSPRG
DISPID_SRGIsPronounceable =19 # from enum DISPIDSPRG
DISPID_SRGRecoContext =2 # from enum DISPIDSPRG
DISPID_SRGReset =5 # from enum DISPIDSPRG
DISPID_SRGRules =4 # from enum DISPIDSPRG
DISPID_SRGSetTextSelection =18 # from enum DISPIDSPRG
DISPID_SRGSetWordSequenceData =17 # from enum DISPIDSPRG
DISPID_SRGState =3 # from enum DISPIDSPRG
DISPIDSPTSI_ActiveLength =2 # from enum DISPIDSPTSI
DISPIDSPTSI_ActiveOffset =1 # from enum DISPIDSPTSI
DISPIDSPTSI_SelectionLength =4 # from enum DISPIDSPTSI
DISPIDSPTSI_SelectionOffset =3 # from enum DISPIDSPTSI
DISPID_SABufferInfo =201 # from enum DISPID_SpeechAudio
DISPID_SABufferNotifySize =204 # from enum DISPID_SpeechAudio
DISPID_SADefaultFormat =202 # from enum DISPID_SpeechAudio
DISPID_SAEventHandle =205 # from enum DISPID_SpeechAudio
DISPID_SASetState =206 # from enum DISPID_SpeechAudio
DISPID_SAStatus =200 # from enum DISPID_SpeechAudio
DISPID_SAVolume =203 # from enum DISPID_SpeechAudio
DISPID_SABIBufferSize =2 # from enum DISPID_SpeechAudioBufferInfo
DISPID_SABIEventBias =3 # from enum DISPID_SpeechAudioBufferInfo
DISPID_SABIMinNotification =1 # from enum DISPID_SpeechAudioBufferInfo
DISPID_SAFGetWaveFormatEx =3 # from enum DISPID_SpeechAudioFormat
DISPID_SAFGuid =2 # from enum DISPID_SpeechAudioFormat
DISPID_SAFSetWaveFormatEx =4 # from enum DISPID_SpeechAudioFormat
DISPID_SAFType =1 # from enum DISPID_SpeechAudioFormat
DISPID_SASCurrentDevicePosition=5 # from enum DISPID_SpeechAudioStatus
DISPID_SASCurrentSeekPosition =4 # from enum DISPID_SpeechAudioStatus
DISPID_SASFreeBufferSpace =1 # from enum DISPID_SpeechAudioStatus
DISPID_SASNonBlockingIO =2 # from enum DISPID_SpeechAudioStatus
DISPID_SASState =3 # from enum DISPID_SpeechAudioStatus
DISPID_SBSFormat =1 # from enum DISPID_SpeechBaseStream
DISPID_SBSRead =2 # from enum DISPID_SpeechBaseStream
DISPID_SBSSeek =4 # from enum DISPID_SpeechBaseStream
DISPID_SBSWrite =3 # from enum DISPID_SpeechBaseStream
DISPID_SCSBaseStream =100 # from enum DISPID_SpeechCustomStream
DISPID_SDKCreateKey =8 # from enum DISPID_SpeechDataKey
DISPID_SDKDeleteKey =9 # from enum DISPID_SpeechDataKey
DISPID_SDKDeleteValue =10 # from enum DISPID_SpeechDataKey
DISPID_SDKEnumKeys =11 # from enum DISPID_SpeechDataKey
DISPID_SDKEnumValues =12 # from enum DISPID_SpeechDataKey
DISPID_SDKGetBinaryValue =2 # from enum DISPID_SpeechDataKey
DISPID_SDKGetStringValue =4 # from enum DISPID_SpeechDataKey
DISPID_SDKGetlongValue =6 # from enum DISPID_SpeechDataKey
DISPID_SDKOpenKey =7 # from enum DISPID_SpeechDataKey
DISPID_SDKSetBinaryValue =1 # from enum DISPID_SpeechDataKey
DISPID_SDKSetLongValue =5 # from enum DISPID_SpeechDataKey
DISPID_SDKSetStringValue =3 # from enum DISPID_SpeechDataKey
DISPID_SFSClose =101 # from enum DISPID_SpeechFileStream
DISPID_SFSOpen =100 # from enum DISPID_SpeechFileStream
DISPID_SGRAddResource =6 # from enum DISPID_SpeechGrammarRule
DISPID_SGRAddState =7 # from enum DISPID_SpeechGrammarRule
DISPID_SGRAttributes =1 # from enum DISPID_SpeechGrammarRule
DISPID_SGRClear =5 # from enum DISPID_SpeechGrammarRule
DISPID_SGRId =4 # from enum DISPID_SpeechGrammarRule
DISPID_SGRInitialState =2 # from enum DISPID_SpeechGrammarRule
DISPID_SGRName =3 # from enum DISPID_SpeechGrammarRule
DISPID_SGRSAddRuleTransition =4 # from enum DISPID_SpeechGrammarRuleState
DISPID_SGRSAddSpecialTransition=5 # from enum DISPID_SpeechGrammarRuleState
DISPID_SGRSAddWordTransition =3 # from enum DISPID_SpeechGrammarRuleState
DISPID_SGRSRule =1 # from enum DISPID_SpeechGrammarRuleState
DISPID_SGRSTransitions =2 # from enum DISPID_SpeechGrammarRuleState
DISPID_SGRSTNextState =8 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTPropertyId =6 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTPropertyName =5 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTPropertyValue =7 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTRule =3 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTText =2 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTType =1 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTWeight =4 # from enum DISPID_SpeechGrammarRuleStateTransition
DISPID_SGRSTsCount =1 # from enum DISPID_SpeechGrammarRuleStateTransitions
DISPID_SGRSTsItem =0 # from enum DISPID_SpeechGrammarRuleStateTransitions
DISPID_SGRSTs_NewEnum =-4 # from enum DISPID_SpeechGrammarRuleStateTransitions
DISPID_SGRsAdd =3 # from enum DISPID_SpeechGrammarRules
DISPID_SGRsCommit =4 # from enum DISPID_SpeechGrammarRules
DISPID_SGRsCommitAndSave =5 # from enum DISPID_SpeechGrammarRules
DISPID_SGRsCount =1 # from enum DISPID_SpeechGrammarRules
DISPID_SGRsDynamic =2 # from enum DISPID_SpeechGrammarRules
DISPID_SGRsFindRule =6 # from enum DISPID_SpeechGrammarRules
DISPID_SGRsItem =0 # from enum DISPID_SpeechGrammarRules
DISPID_SGRs_NewEnum =-4 # from enum DISPID_SpeechGrammarRules
DISPID_SLAddPronunciation =3 # from enum DISPID_SpeechLexicon
DISPID_SLAddPronunciationByPhoneIds=4 # from enum DISPID_SpeechLexicon
DISPID_SLGenerationId =1 # from enum DISPID_SpeechLexicon
DISPID_SLGetGenerationChange =8 # from enum DISPID_SpeechLexicon
DISPID_SLGetPronunciations =7 # from enum DISPID_SpeechLexicon
DISPID_SLGetWords =2 # from enum DISPID_SpeechLexicon
DISPID_SLRemovePronunciation =5 # from enum DISPID_SpeechLexicon
DISPID_SLRemovePronunciationByPhoneIds=6 # from enum DISPID_SpeechLexicon
DISPID_SLPsCount =1 # from enum DISPID_SpeechLexiconProns
DISPID_SLPsItem =0 # from enum DISPID_SpeechLexiconProns
DISPID_SLPs_NewEnum =-4 # from enum DISPID_SpeechLexiconProns
DISPID_SLPLangId =2 # from enum DISPID_SpeechLexiconPronunciation
DISPID_SLPPartOfSpeech =3 # from enum DISPID_SpeechLexiconPronunciation
DISPID_SLPPhoneIds =4 # from enum DISPID_SpeechLexiconPronunciation
DISPID_SLPSymbolic =5 # from enum DISPID_SpeechLexiconPronunciation
DISPID_SLPType =1 # from enum DISPID_SpeechLexiconPronunciation
DISPID_SLWLangId =1 # from enum DISPID_SpeechLexiconWord
DISPID_SLWPronunciations =4 # from enum DISPID_SpeechLexiconWord
DISPID_SLWType =2 # from enum DISPID_SpeechLexiconWord
DISPID_SLWWord =3 # from enum DISPID_SpeechLexiconWord
DISPID_SLWsCount =1 # from enum DISPID_SpeechLexiconWords
DISPID_SLWsItem =0 # from enum DISPID_SpeechLexiconWords
DISPID_SLWs_NewEnum =-4 # from enum DISPID_SpeechLexiconWords
DISPID_SMSADeviceId =300 # from enum DISPID_SpeechMMSysAudio
DISPID_SMSALineId =301 # from enum DISPID_SpeechMMSysAudio
DISPID_SMSAMMHandle =302 # from enum DISPID_SpeechMMSysAudio
DISPID_SMSGetData =101 # from enum DISPID_SpeechMemoryStream
DISPID_SMSSetData =100 # from enum DISPID_SpeechMemoryStream
DISPID_SOTCategory =3 # from enum DISPID_SpeechObjectToken
DISPID_SOTCreateInstance =7 # from enum DISPID_SpeechObjectToken
DISPID_SOTDataKey =2 # from enum DISPID_SpeechObjectToken
DISPID_SOTDisplayUI =12 # from enum DISPID_SpeechObjectToken
DISPID_SOTGetAttribute =6 # from enum DISPID_SpeechObjectToken
DISPID_SOTGetDescription =4 # from enum DISPID_SpeechObjectToken
DISPID_SOTGetStorageFileName =9 # from enum DISPID_SpeechObjectToken
DISPID_SOTId =1 # from enum DISPID_SpeechObjectToken
DISPID_SOTIsUISupported =11 # from enum DISPID_SpeechObjectToken
DISPID_SOTMatchesAttributes =13 # from enum DISPID_SpeechObjectToken
DISPID_SOTRemove =8 # from enum DISPID_SpeechObjectToken
DISPID_SOTRemoveStorageFileName=10 # from enum DISPID_SpeechObjectToken
DISPID_SOTSetId =5 # from enum DISPID_SpeechObjectToken
DISPID_SOTCDefault =2 # from enum DISPID_SpeechObjectTokenCategory
DISPID_SOTCEnumerateTokens =5 # from enum DISPID_SpeechObjectTokenCategory
DISPID_SOTCGetDataKey =4 # from enum DISPID_SpeechObjectTokenCategory
DISPID_SOTCId =1 # from enum DISPID_SpeechObjectTokenCategory
DISPID_SOTCSetId =3 # from enum DISPID_SpeechObjectTokenCategory
DISPID_SOTsCount =1 # from enum DISPID_SpeechObjectTokens
DISPID_SOTsItem =0 # from enum DISPID_SpeechObjectTokens
DISPID_SOTs_NewEnum =-4 # from enum DISPID_SpeechObjectTokens
DISPID_SPCIdToPhone =3 # from enum DISPID_SpeechPhoneConverter
DISPID_SPCLangId =1 # from enum DISPID_SpeechPhoneConverter
DISPID_SPCPhoneToId =2 # from enum DISPID_SpeechPhoneConverter
DISPID_SPACommit =5 # from enum DISPID_SpeechPhraseAlternate
DISPID_SPANumberOfElementsInResult=3 # from enum DISPID_SpeechPhraseAlternate
DISPID_SPAPhraseInfo =4 # from enum DISPID_SpeechPhraseAlternate
DISPID_SPARecoResult =1 # from enum DISPID_SpeechPhraseAlternate
DISPID_SPAStartElementInResult=2 # from enum DISPID_SpeechPhraseAlternate
DISPID_SPAsCount =1 # from enum DISPID_SpeechPhraseAlternates
DISPID_SPAsItem =0 # from enum DISPID_SpeechPhraseAlternates
DISPID_SPAs_NewEnum =-4 # from enum DISPID_SpeechPhraseAlternates
DISPID_SPPBRestorePhraseFromMemory=1 # from enum DISPID_SpeechPhraseBuilder
DISPID_SPEActualConfidence =12 # from enum DISPID_SpeechPhraseElement
DISPID_SPEAudioSizeBytes =4 # from enum DISPID_SpeechPhraseElement
DISPID_SPEAudioSizeTime =2 # from enum DISPID_SpeechPhraseElement
DISPID_SPEAudioStreamOffset =3 # from enum DISPID_SpeechPhraseElement
DISPID_SPEAudioTimeOffset =1 # from enum DISPID_SpeechPhraseElement
DISPID_SPEDisplayAttributes =10 # from enum DISPID_SpeechPhraseElement
DISPID_SPEDisplayText =7 # from enum DISPID_SpeechPhraseElement
DISPID_SPEEngineConfidence =13 # from enum DISPID_SpeechPhraseElement
DISPID_SPELexicalForm =8 # from enum DISPID_SpeechPhraseElement
DISPID_SPEPronunciation =9 # from enum DISPID_SpeechPhraseElement
DISPID_SPERequiredConfidence =11 # from enum DISPID_SpeechPhraseElement
DISPID_SPERetainedSizeBytes =6 # from enum DISPID_SpeechPhraseElement
DISPID_SPERetainedStreamOffset=5 # from enum DISPID_SpeechPhraseElement
DISPID_SPEsCount =1 # from enum DISPID_SpeechPhraseElements
DISPID_SPEsItem =0 # from enum DISPID_SpeechPhraseElements
DISPID_SPEs_NewEnum =-4 # from enum DISPID_SpeechPhraseElements
DISPID_SPIAudioSizeBytes =5 # from enum DISPID_SpeechPhraseInfo
DISPID_SPIAudioSizeTime =7 # from enum DISPID_SpeechPhraseInfo
DISPID_SPIAudioStreamPosition =4 # from enum DISPID_SpeechPhraseInfo
DISPID_SPIElements =10 # from enum DISPID_SpeechPhraseInfo
DISPID_SPIEngineId =12 # from enum DISPID_SpeechPhraseInfo
DISPID_SPIEnginePrivateData =13 # from enum DISPID_SpeechPhraseInfo
DISPID_SPIGetDisplayAttributes=16 # from enum DISPID_SpeechPhraseInfo
DISPID_SPIGetText =15 # from enum DISPID_SpeechPhraseInfo
DISPID_SPIGrammarId =2 # from enum DISPID_SpeechPhraseInfo
DISPID_SPILanguageId =1 # from enum DISPID_SpeechPhraseInfo
DISPID_SPIProperties =9 # from enum DISPID_SpeechPhraseInfo
DISPID_SPIReplacements =11 # from enum DISPID_SpeechPhraseInfo
DISPID_SPIRetainedSizeBytes =6 # from enum DISPID_SpeechPhraseInfo
DISPID_SPIRule =8 # from enum DISPID_SpeechPhraseInfo
DISPID_SPISaveToMemory =14 # from enum DISPID_SpeechPhraseInfo
DISPID_SPIStartTime =3 # from enum DISPID_SpeechPhraseInfo
DISPID_SPPsCount =1 # from enum DISPID_SpeechPhraseProperties
DISPID_SPPsItem =0 # from enum DISPID_SpeechPhraseProperties
DISPID_SPPs_NewEnum =-4 # from enum DISPID_SpeechPhraseProperties
DISPID_SPPChildren =9 # from enum DISPID_SpeechPhraseProperty
DISPID_SPPConfidence =7 # from enum DISPID_SpeechPhraseProperty
DISPID_SPPEngineConfidence =6 # from enum DISPID_SpeechPhraseProperty
DISPID_SPPFirstElement =4 # from enum DISPID_SpeechPhraseProperty
DISPID_SPPId =2 # from enum DISPID_SpeechPhraseProperty
DISPID_SPPName =1 # from enum DISPID_SpeechPhraseProperty
DISPID_SPPNumberOfElements =5 # from enum DISPID_SpeechPhraseProperty
DISPID_SPPParent =8 # from enum DISPID_SpeechPhraseProperty
DISPID_SPPValue =3 # from enum DISPID_SpeechPhraseProperty
DISPID_SPRDisplayAttributes =1 # from enum DISPID_SpeechPhraseReplacement
DISPID_SPRFirstElement =3 # from enum DISPID_SpeechPhraseReplacement
DISPID_SPRNumberOfElements =4 # from enum DISPID_SpeechPhraseReplacement
DISPID_SPRText =2 # from enum DISPID_SpeechPhraseReplacement
DISPID_SPRsCount =1 # from enum DISPID_SpeechPhraseReplacements
DISPID_SPRsItem =0 # from enum DISPID_SpeechPhraseReplacements
DISPID_SPRs_NewEnum =-4 # from enum DISPID_SpeechPhraseReplacements
DISPID_SPRuleChildren =6 # from enum DISPID_SpeechPhraseRule
DISPID_SPRuleConfidence =7 # from enum DISPID_SpeechPhraseRule
DISPID_SPRuleEngineConfidence =8 # from enum DISPID_SpeechPhraseRule
DISPID_SPRuleFirstElement =3 # from enum DISPID_SpeechPhraseRule
DISPID_SPRuleId =2 # from enum DISPID_SpeechPhraseRule
DISPID_SPRuleName =1 # from enum DISPID_SpeechPhraseRule
DISPID_SPRuleNumberOfElements =4 # from enum DISPID_SpeechPhraseRule
DISPID_SPRuleParent =5 # from enum DISPID_SpeechPhraseRule
DISPID_SPRulesCount =1 # from enum DISPID_SpeechPhraseRules
DISPID_SPRulesItem =0 # from enum DISPID_SpeechPhraseRules
DISPID_SPRules_NewEnum =-4 # from enum DISPID_SpeechPhraseRules
DISPID_SRAllowVoiceFormatMatchingOnNextSet=5 # from enum DISPID_SpeechRecoContext
DISPID_SRCAudioInInterferenceStatus=2 # from enum DISPID_SpeechRecoContext
DISPID_SRCBookmark =16 # from enum DISPID_SpeechRecoContext
DISPID_SRCCmdMaxAlternates =8 # from enum DISPID_SpeechRecoContext
DISPID_SRCCreateGrammar =14 # from enum DISPID_SpeechRecoContext
DISPID_SRCCreateResultFromMemory=15 # from enum DISPID_SpeechRecoContext
DISPID_SRCEventInterests =7 # from enum DISPID_SpeechRecoContext
DISPID_SRCPause =12 # from enum DISPID_SpeechRecoContext
DISPID_SRCRecognizer =1 # from enum DISPID_SpeechRecoContext
DISPID_SRCRequestedUIType =3 # from enum DISPID_SpeechRecoContext
DISPID_SRCResume =13 # from enum DISPID_SpeechRecoContext
DISPID_SRCRetainedAudio =10 # from enum DISPID_SpeechRecoContext
DISPID_SRCRetainedAudioFormat =11 # from enum DISPID_SpeechRecoContext
DISPID_SRCSetAdaptationData =17 # from enum DISPID_SpeechRecoContext
DISPID_SRCState =9 # from enum DISPID_SpeechRecoContext
DISPID_SRCVoice =4 # from enum DISPID_SpeechRecoContext
DISPID_SRCVoicePurgeEvent =6 # from enum DISPID_SpeechRecoContext
DISPID_SRCEAdaptation =15 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCEAudioLevel =17 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCEBookmark =3 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCEEndStream =2 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCEEnginePrivate =18 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCEFalseRecognition =11 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCEHypothesis =8 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCEInterference =12 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCEPhraseStart =6 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCEPropertyNumberChange=9 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCEPropertyStringChange=10 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCERecognition =7 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCERecognitionForOtherContext=16 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCERecognizerStateChange=14 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCERequestUI =13 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCESoundEnd =5 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCESoundStart =4 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRCEStartStream =1 # from enum DISPID_SpeechRecoContextEvents
DISPID_SRRAlternates =5 # from enum DISPID_SpeechRecoResult
DISPID_SRRAudio =6 # from enum DISPID_SpeechRecoResult
DISPID_SRRAudioFormat =3 # from enum DISPID_SpeechRecoResult
DISPID_SRRDiscardResultInfo =9 # from enum DISPID_SpeechRecoResult
DISPID_SRRPhraseInfo =4 # from enum DISPID_SpeechRecoResult
DISPID_SRRRecoContext =1 # from enum DISPID_SpeechRecoResult
DISPID_SRRSaveToMemory =8 # from enum DISPID_SpeechRecoResult
DISPID_SRRSpeakAudio =7 # from enum DISPID_SpeechRecoResult
DISPID_SRRTimes =2 # from enum DISPID_SpeechRecoResult
DISPID_SRRSetTextFeedback =12 # from enum DISPID_SpeechRecoResult2
DISPID_SRRTLength =2 # from enum DISPID_SpeechRecoResultTimes
DISPID_SRRTOffsetFromStart =4 # from enum DISPID_SpeechRecoResultTimes
DISPID_SRRTStreamTime =1 # from enum DISPID_SpeechRecoResultTimes
DISPID_SRRTTickCount =3 # from enum DISPID_SpeechRecoResultTimes
DISPID_SRAllowAudioInputFormatChangesOnNextSet=2 # from enum DISPID_SpeechRecognizer
DISPID_SRAudioInput =3 # from enum DISPID_SpeechRecognizer
DISPID_SRAudioInputStream =4 # from enum DISPID_SpeechRecognizer
DISPID_SRCreateRecoContext =10 # from enum DISPID_SpeechRecognizer
DISPID_SRDisplayUI =17 # from enum DISPID_SpeechRecognizer
DISPID_SREmulateRecognition =9 # from enum DISPID_SpeechRecognizer
DISPID_SRGetFormat =11 # from enum DISPID_SpeechRecognizer
DISPID_SRGetPropertyNumber =13 # from enum DISPID_SpeechRecognizer
DISPID_SRGetPropertyString =15 # from enum DISPID_SpeechRecognizer
DISPID_SRGetRecognizers =18 # from enum DISPID_SpeechRecognizer
DISPID_SRIsShared =5 # from enum DISPID_SpeechRecognizer
DISPID_SRIsUISupported =16 # from enum DISPID_SpeechRecognizer
DISPID_SRProfile =8 # from enum DISPID_SpeechRecognizer
DISPID_SRRecognizer =1 # from enum DISPID_SpeechRecognizer
DISPID_SRSetPropertyNumber =12 # from enum DISPID_SpeechRecognizer
DISPID_SRSetPropertyString =14 # from enum DISPID_SpeechRecognizer
DISPID_SRState =6 # from enum DISPID_SpeechRecognizer
DISPID_SRStatus =7 # from enum DISPID_SpeechRecognizer
DISPID_SVGetAudioInputs =19 # from enum DISPID_SpeechRecognizer
DISPID_SVGetProfiles =20 # from enum DISPID_SpeechRecognizer
DISPID_SRSAudioStatus =1 # from enum DISPID_SpeechRecognizerStatus
DISPID_SRSClsidEngine =5 # from enum DISPID_SpeechRecognizerStatus
DISPID_SRSCurrentStreamNumber =3 # from enum DISPID_SpeechRecognizerStatus
DISPID_SRSCurrentStreamPosition=2 # from enum DISPID_SpeechRecognizerStatus
DISPID_SRSNumberOfActiveRules =4 # from enum DISPID_SpeechRecognizerStatus
DISPID_SRSSupportedLanguages =6 # from enum DISPID_SpeechRecognizerStatus
DISPID_SVAlertBoundary =10 # from enum DISPID_SpeechVoice
DISPID_SVAllowAudioOuputFormatChangesOnNextSet=7 # from enum DISPID_SpeechVoice
DISPID_SVAudioOutput =3 # from enum DISPID_SpeechVoice
DISPID_SVAudioOutputStream =4 # from enum DISPID_SpeechVoice
DISPID_SVDisplayUI =22 # from enum DISPID_SpeechVoice
DISPID_SVEventInterests =8 # from enum DISPID_SpeechVoice
DISPID_SVGetAudioOutputs =18 # from enum DISPID_SpeechVoice
DISPID_SVGetVoices =17 # from enum DISPID_SpeechVoice
DISPID_SVIsUISupported =21 # from enum DISPID_SpeechVoice
DISPID_SVPause =14 # from enum DISPID_SpeechVoice
DISPID_SVPriority =9 # from enum DISPID_SpeechVoice
DISPID_SVRate =5 # from enum DISPID_SpeechVoice
DISPID_SVResume =15 # from enum DISPID_SpeechVoice
DISPID_SVSkip =16 # from enum DISPID_SpeechVoice
DISPID_SVSpeak =12 # from enum DISPID_SpeechVoice
DISPID_SVSpeakCompleteEvent =20 # from enum DISPID_SpeechVoice
DISPID_SVSpeakStream =13 # from enum DISPID_SpeechVoice
DISPID_SVStatus =1 # from enum DISPID_SpeechVoice
DISPID_SVSyncronousSpeakTimeout=11 # from enum DISPID_SpeechVoice
DISPID_SVVoice =2 # from enum DISPID_SpeechVoice
DISPID_SVVolume =6 # from enum DISPID_SpeechVoice
DISPID_SVWaitUntilDone =19 # from enum DISPID_SpeechVoice
DISPID_SVEAudioLevel =9 # from enum DISPID_SpeechVoiceEvent
DISPID_SVEBookmark =4 # from enum DISPID_SpeechVoiceEvent
DISPID_SVEEnginePrivate =10 # from enum DISPID_SpeechVoiceEvent
DISPID_SVEPhoneme =6 # from enum DISPID_SpeechVoiceEvent
DISPID_SVESentenceBoundary =7 # from enum DISPID_SpeechVoiceEvent
DISPID_SVEStreamEnd =2 # from enum DISPID_SpeechVoiceEvent
DISPID_SVEStreamStart =1 # from enum DISPID_SpeechVoiceEvent
DISPID_SVEViseme =8 # from enum DISPID_SpeechVoiceEvent
DISPID_SVEVoiceChange =3 # from enum DISPID_SpeechVoiceEvent
DISPID_SVEWord =5 # from enum DISPID_SpeechVoiceEvent
DISPID_SVSCurrentStreamNumber =1 # from enum DISPID_SpeechVoiceStatus
DISPID_SVSInputSentenceLength =8 # from enum DISPID_SpeechVoiceStatus
DISPID_SVSInputSentencePosition=7 # from enum DISPID_SpeechVoiceStatus
DISPID_SVSInputWordLength =6 # from enum DISPID_SpeechVoiceStatus
DISPID_SVSInputWordPosition =5 # from enum DISPID_SpeechVoiceStatus
DISPID_SVSLastBookmark =9 # from enum DISPID_SpeechVoiceStatus
DISPID_SVSLastBookmarkId =10 # from enum DISPID_SpeechVoiceStatus
DISPID_SVSLastResult =3 # from enum DISPID_SpeechVoiceStatus
DISPID_SVSLastStreamNumberQueued=2 # from enum DISPID_SpeechVoiceStatus
DISPID_SVSPhonemeId =11 # from enum DISPID_SpeechVoiceStatus
DISPID_SVSRunningState =4 # from enum DISPID_SpeechVoiceStatus
DISPID_SVSVisemeId =12 # from enum DISPID_SpeechVoiceStatus
DISPID_SWFEAvgBytesPerSec =4 # from enum DISPID_SpeechWaveFormatEx
DISPID_SWFEBitsPerSample =6 # from enum DISPID_SpeechWaveFormatEx
DISPID_SWFEBlockAlign =5 # from enum DISPID_SpeechWaveFormatEx
DISPID_SWFEChannels =2 # from enum DISPID_SpeechWaveFormatEx
DISPID_SWFEExtraData =7 # from enum DISPID_SpeechWaveFormatEx
DISPID_SWFEFormatTag =1 # from enum DISPID_SpeechWaveFormatEx
DISPID_SWFESamplesPerSec =3 # from enum DISPID_SpeechWaveFormatEx
DISPID_SRRGetXMLErrorInfo =11 # from enum DISPID_SpeechXMLRecoResult
DISPID_SRRGetXMLResult =10 # from enum DISPID_SpeechXMLRecoResult
SPAR_High =3 # from enum SPADAPTATIONRELEVANCE
SPAR_Low =1 # from enum SPADAPTATIONRELEVANCE
SPAR_Medium =2 # from enum SPADAPTATIONRELEVANCE
SPAR_Unknown =0 # from enum SPADAPTATIONRELEVANCE
SPAO_NONE =0 # from enum SPAUDIOOPTIONS
SPAO_RETAIN_AUDIO =1 # from enum SPAUDIOOPTIONS
SPBO_AHEAD =2 # from enum SPBOOKMARKOPTIONS
SPBO_NONE =0 # from enum SPBOOKMARKOPTIONS
SPBO_PAUSE =1 # from enum SPBOOKMARKOPTIONS
SPBO_TIME_UNITS =4 # from enum SPBOOKMARKOPTIONS
SPCT_COMMAND =0 # from enum SPCATEGORYTYPE
SPCT_DICTATION =1 # from enum SPCATEGORYTYPE
SPCT_SLEEP =2 # from enum SPCATEGORYTYPE
SPCT_SUB_COMMAND =3 # from enum SPCATEGORYTYPE
SPCT_SUB_DICTATION =4 # from enum SPCATEGORYTYPE
SPCS_DISABLED =0 # from enum SPCONTEXTSTATE
SPCS_ENABLED =1 # from enum SPCONTEXTSTATE
SPDKL_CurrentConfig =5 # from enum SPDATAKEYLOCATION
SPDKL_CurrentUser =1 # from enum SPDATAKEYLOCATION
SPDKL_DefaultLocation =0 # from enum SPDATAKEYLOCATION
SPDKL_LocalMachine =2 # from enum SPDATAKEYLOCATION
SPEI_ACTIVE_CATEGORY_CHANGED =53 # from enum SPEVENTENUM
SPEI_ADAPTATION =47 # from enum SPEVENTENUM
SPEI_END_INPUT_STREAM =2 # from enum SPEVENTENUM
SPEI_END_SR_STREAM =34 # from enum SPEVENTENUM
SPEI_FALSE_RECOGNITION =43 # from enum SPEVENTENUM
SPEI_HYPOTHESIS =39 # from enum SPEVENTENUM
SPEI_INTERFERENCE =44 # from enum SPEVENTENUM
SPEI_MAX_SR =55 # from enum SPEVENTENUM
SPEI_MAX_TTS =15 # from enum SPEVENTENUM
SPEI_MIN_SR =34 # from enum SPEVENTENUM
SPEI_MIN_TTS =1 # from enum SPEVENTENUM
SPEI_PHONEME =6 # from enum SPEVENTENUM
SPEI_PHRASE_START =37 # from enum SPEVENTENUM
SPEI_PROPERTY_NUM_CHANGE =41 # from enum SPEVENTENUM
SPEI_PROPERTY_STRING_CHANGE =42 # from enum SPEVENTENUM
SPEI_RECOGNITION =38 # from enum SPEVENTENUM
SPEI_RECO_OTHER_CONTEXT =49 # from enum SPEVENTENUM
SPEI_RECO_STATE_CHANGE =46 # from enum SPEVENTENUM
SPEI_REQUEST_UI =45 # from enum SPEVENTENUM
SPEI_RESERVED1 =30 # from enum SPEVENTENUM
SPEI_RESERVED2 =33 # from enum SPEVENTENUM
SPEI_RESERVED3 =63 # from enum SPEVENTENUM
SPEI_RESERVED5 =54 # from enum SPEVENTENUM
SPEI_RESERVED6 =55 # from enum SPEVENTENUM
SPEI_SENTENCE_BOUNDARY =7 # from enum SPEVENTENUM
SPEI_SOUND_END =36 # from enum SPEVENTENUM
SPEI_SOUND_START =35 # from enum SPEVENTENUM
SPEI_SR_AUDIO_LEVEL =50 # from enum SPEVENTENUM
SPEI_SR_BOOKMARK =40 # from enum SPEVENTENUM
SPEI_SR_PRIVATE =52 # from enum SPEVENTENUM
SPEI_SR_RETAINEDAUDIO =51 # from enum SPEVENTENUM
SPEI_START_INPUT_STREAM =1 # from enum SPEVENTENUM
SPEI_START_SR_STREAM =48 # from enum SPEVENTENUM
SPEI_TTS_AUDIO_LEVEL =9 # from enum SPEVENTENUM
SPEI_TTS_BOOKMARK =4 # from enum SPEVENTENUM
SPEI_TTS_PRIVATE =15 # from enum SPEVENTENUM
SPEI_UNDEFINED =0 # from enum SPEVENTENUM
SPEI_VISEME =8 # from enum SPEVENTENUM
SPEI_VOICE_CHANGE =3 # from enum SPEVENTENUM
SPEI_WORD_BOUNDARY =5 # from enum SPEVENTENUM
SPFM_CREATE =2 # from enum SPFILEMODE
SPFM_CREATE_ALWAYS =3 # from enum SPFILEMODE
SPFM_NUM_MODES =4 # from enum SPFILEMODE
SPFM_OPEN_READONLY =0 # from enum SPFILEMODE
SPFM_OPEN_READWRITE =1 # from enum SPFILEMODE
SPGS_DISABLED =0 # from enum SPGRAMMARSTATE
SPGS_ENABLED =1 # from enum SPGRAMMARSTATE
SPGS_EXCLUSIVE =3 # from enum SPGRAMMARSTATE
SPWT_DISPLAY =0 # from enum SPGRAMMARWORDTYPE
SPWT_LEXICAL =1 # from enum SPGRAMMARWORDTYPE
SPWT_LEXICAL_NO_SPECIAL_CHARS =3 # from enum SPGRAMMARWORDTYPE
SPWT_PRONUNCIATION =2 # from enum SPGRAMMARWORDTYPE
SPINTERFERENCE_LATENCY_TRUNCATE_BEGIN=8 # from enum SPINTERFERENCE
SPINTERFERENCE_LATENCY_TRUNCATE_END=9 # from enum SPINTERFERENCE
SPINTERFERENCE_LATENCY_WARNING=7 # from enum SPINTERFERENCE
SPINTERFERENCE_NOISE =1 # from enum SPINTERFERENCE
SPINTERFERENCE_NONE =0 # from enum SPINTERFERENCE
SPINTERFERENCE_NOSIGNAL =2 # from enum SPINTERFERENCE
SPINTERFERENCE_TOOFAST =5 # from enum SPINTERFERENCE
SPINTERFERENCE_TOOLOUD =3 # from enum SPINTERFERENCE
SPINTERFERENCE_TOOQUIET =4 # from enum SPINTERFERENCE
SPINTERFERENCE_TOOSLOW =6 # from enum SPINTERFERENCE
eLEXTYPE_APP =2 # from enum SPLEXICONTYPE
eLEXTYPE_LETTERTOSOUND =8 # from enum SPLEXICONTYPE
eLEXTYPE_MORPHOLOGY =16 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE1 =4096 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE10 =2097152 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE11 =4194304 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE12 =8388608 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE13 =16777216 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE14 =33554432 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE15 =67108864 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE16 =134217728 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE17 =268435456 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE18 =536870912 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE19 =1073741824 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE2 =8192 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE20 =-2147483648 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE3 =16384 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE4 =32768 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE5 =65536 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE6 =131072 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE7 =262144 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE8 =524288 # from enum SPLEXICONTYPE
eLEXTYPE_PRIVATE9 =1048576 # from enum SPLEXICONTYPE
eLEXTYPE_RESERVED10 =2048 # from enum SPLEXICONTYPE
eLEXTYPE_RESERVED4 =32 # from enum SPLEXICONTYPE
eLEXTYPE_RESERVED6 =128 # from enum SPLEXICONTYPE
eLEXTYPE_RESERVED7 =256 # from enum SPLEXICONTYPE
eLEXTYPE_RESERVED8 =512 # from enum SPLEXICONTYPE
eLEXTYPE_RESERVED9 =1024 # from enum SPLEXICONTYPE
eLEXTYPE_USER =1 # from enum SPLEXICONTYPE
eLEXTYPE_USER_SHORTCUT =64 # from enum SPLEXICONTYPE
eLEXTYPE_VENDORLEXICON =4 # from enum SPLEXICONTYPE
SPLO_DYNAMIC =1 # from enum SPLOADOPTIONS
SPLO_STATIC =0 # from enum SPLOADOPTIONS
SPPS_Function =16384 # from enum SPPARTOFSPEECH
SPPS_Interjection =20480 # from enum SPPARTOFSPEECH
SPPS_LMA =28672 # from enum SPPARTOFSPEECH
SPPS_Modifier =12288 # from enum SPPARTOFSPEECH
SPPS_Noncontent =24576 # from enum SPPARTOFSPEECH
SPPS_NotOverriden =-1 # from enum SPPARTOFSPEECH
SPPS_Noun =4096 # from enum SPPARTOFSPEECH
SPPS_SuppressWord =61440 # from enum SPPARTOFSPEECH
SPPS_Unknown =0 # from enum SPPARTOFSPEECH
SPPS_Verb =8192 # from enum SPPARTOFSPEECH
SPRST_ACTIVE =1 # from enum SPRECOSTATE
SPRST_ACTIVE_ALWAYS =2 # from enum SPRECOSTATE
SPRST_INACTIVE =0 # from enum SPRECOSTATE
SPRST_INACTIVE_WITH_PURGE =3 # from enum SPRECOSTATE
SPRST_NUM_STATES =4 # from enum SPRECOSTATE
SPRS_ACTIVE =1 # from enum SPRULESTATE
SPRS_ACTIVE_USER_DELIMITED =4 # from enum SPRULESTATE
SPRS_ACTIVE_WITH_AUTO_PAUSE =3 # from enum SPRULESTATE
SPRS_INACTIVE =0 # from enum SPRULESTATE
SPSMF_SAPI_PROPERTIES =0 # from enum SPSEMANTICFORMAT
SPSMF_SRGS_SAPIPROPERTIES =2 # from enum SPSEMANTICFORMAT
SPSMF_SRGS_SEMANTICINTERPRETATION_MS=1 # from enum SPSEMANTICFORMAT
SPSMF_SRGS_SEMANTICINTERPRETATION_W3C=8 # from enum SPSEMANTICFORMAT
SPSMF_UPS =4 # from enum SPSEMANTICFORMAT
SPPS_RESERVED1 =12288 # from enum SPSHORTCUTTYPE
SPPS_RESERVED2 =16384 # from enum SPSHORTCUTTYPE
SPPS_RESERVED3 =20480 # from enum SPSHORTCUTTYPE
SPPS_RESERVED4 =61440 # from enum SPSHORTCUTTYPE
SPSHT_EMAIL =4096 # from enum SPSHORTCUTTYPE
SPSHT_NotOverriden =-1 # from enum SPSHORTCUTTYPE
SPSHT_OTHER =8192 # from enum SPSHORTCUTTYPE
SPSHT_Unknown =0 # from enum SPSHORTCUTTYPE
SP_VISEME_0 =0 # from enum SPVISEMES
SP_VISEME_1 =1 # from enum SPVISEMES
SP_VISEME_10 =10 # from enum SPVISEMES
SP_VISEME_11 =11 # from enum SPVISEMES
SP_VISEME_12 =12 # from enum SPVISEMES
SP_VISEME_13 =13 # from enum SPVISEMES
SP_VISEME_14 =14 # from enum SPVISEMES
SP_VISEME_15 =15 # from enum SPVISEMES
SP_VISEME_16 =16 # from enum SPVISEMES
SP_VISEME_17 =17 # from enum SPVISEMES
SP_VISEME_18 =18 # from enum SPVISEMES
SP_VISEME_19 =19 # from enum SPVISEMES
SP_VISEME_2 =2 # from enum SPVISEMES
SP_VISEME_20 =20 # from enum SPVISEMES
SP_VISEME_21 =21 # from enum SPVISEMES
SP_VISEME_3 =3 # from enum SPVISEMES
SP_VISEME_4 =4 # from enum SPVISEMES
SP_VISEME_5 =5 # from enum SPVISEMES
SP_VISEME_6 =6 # from enum SPVISEMES
SP_VISEME_7 =7 # from enum SPVISEMES
SP_VISEME_8 =8 # from enum SPVISEMES
SP_VISEME_9 =9 # from enum SPVISEMES
SPVPRI_ALERT =1 # from enum SPVPRIORITY
SPVPRI_NORMAL =0 # from enum SPVPRIORITY
SPVPRI_OVER =2 # from enum SPVPRIORITY
SPWF_INPUT =0 # from enum SPWAVEFORMATTYPE
SPWF_SRENGINE =1 # from enum SPWAVEFORMATTYPE
SPWP_KNOWN_WORD_PRONOUNCEABLE =2 # from enum SPWORDPRONOUNCEABLE
SPWP_UNKNOWN_WORD_PRONOUNCEABLE=1 # from enum SPWORDPRONOUNCEABLE
SPWP_UNKNOWN_WORD_UNPRONOUNCEABLE=0 # from enum SPWORDPRONOUNCEABLE
eWORDTYPE_ADDED =1 # from enum SPWORDTYPE
eWORDTYPE_DELETED =2 # from enum SPWORDTYPE
SPXRO_Alternates_SML =1 # from enum SPXMLRESULTOPTIONS
SPXRO_SML =0 # from enum SPXMLRESULTOPTIONS
SAFT11kHz16BitMono =10 # from enum SpeechAudioFormatType
SAFT11kHz16BitStereo =11 # from enum SpeechAudioFormatType
SAFT11kHz8BitMono =8 # from enum SpeechAudioFormatType
SAFT11kHz8BitStereo =9 # from enum SpeechAudioFormatType
SAFT12kHz16BitMono =14 # from enum SpeechAudioFormatType
SAFT12kHz16BitStereo =15 # from enum SpeechAudioFormatType
SAFT12kHz8BitMono =12 # from enum SpeechAudioFormatType
SAFT12kHz8BitStereo =13 # from enum SpeechAudioFormatType
SAFT16kHz16BitMono =18 # from enum SpeechAudioFormatType
SAFT16kHz16BitStereo =19 # from enum SpeechAudioFormatType
SAFT16kHz8BitMono =16 # from enum SpeechAudioFormatType
SAFT16kHz8BitStereo =17 # from enum SpeechAudioFormatType
SAFT22kHz16BitMono =22 # from enum SpeechAudioFormatType
SAFT22kHz16BitStereo =23 # from enum SpeechAudioFormatType
SAFT22kHz8BitMono =20 # from enum SpeechAudioFormatType
SAFT22kHz8BitStereo =21 # from enum SpeechAudioFormatType
SAFT24kHz16BitMono =26 # from enum SpeechAudioFormatType
SAFT24kHz16BitStereo =27 # from enum SpeechAudioFormatType
SAFT24kHz8BitMono =24 # from enum SpeechAudioFormatType
SAFT24kHz8BitStereo =25 # from enum SpeechAudioFormatType
SAFT32kHz16BitMono =30 # from enum SpeechAudioFormatType
SAFT32kHz16BitStereo =31 # from enum SpeechAudioFormatType
SAFT32kHz8BitMono =28 # from enum SpeechAudioFormatType
SAFT32kHz8BitStereo =29 # from enum SpeechAudioFormatType
SAFT44kHz16BitMono =34 # from enum SpeechAudioFormatType
SAFT44kHz16BitStereo =35 # from enum SpeechAudioFormatType
SAFT44kHz8BitMono =32 # from enum SpeechAudioFormatType
SAFT44kHz8BitStereo =33 # from enum SpeechAudioFormatType
SAFT48kHz16BitMono =38 # from enum SpeechAudioFormatType
SAFT48kHz16BitStereo =39 # from enum SpeechAudioFormatType
SAFT48kHz8BitMono =36 # from enum SpeechAudioFormatType
SAFT48kHz8BitStereo =37 # from enum SpeechAudioFormatType
SAFT8kHz16BitMono =6 # from enum SpeechAudioFormatType
SAFT8kHz16BitStereo =7 # from enum SpeechAudioFormatType
SAFT8kHz8BitMono =4 # from enum SpeechAudioFormatType
SAFT8kHz8BitStereo =5 # from enum SpeechAudioFormatType
SAFTADPCM_11kHzMono =59 # from enum SpeechAudioFormatType
SAFTADPCM_11kHzStereo =60 # from enum SpeechAudioFormatType
SAFTADPCM_22kHzMono =61 # from enum SpeechAudioFormatType
SAFTADPCM_22kHzStereo =62 # from enum SpeechAudioFormatType
SAFTADPCM_44kHzMono =63 # from enum SpeechAudioFormatType
SAFTADPCM_44kHzStereo =64 # from enum SpeechAudioFormatType
SAFTADPCM_8kHzMono =57 # from enum SpeechAudioFormatType
SAFTADPCM_8kHzStereo =58 # from enum SpeechAudioFormatType
SAFTCCITT_ALaw_11kHzMono =43 # from enum SpeechAudioFormatType
SAFTCCITT_ALaw_11kHzStereo =44 # from enum SpeechAudioFormatType
SAFTCCITT_ALaw_22kHzMono =45 # from enum SpeechAudioFormatType
SAFTCCITT_ALaw_22kHzStereo =46 # from enum SpeechAudioFormatType
SAFTCCITT_ALaw_44kHzMono =47 # from enum SpeechAudioFormatType
SAFTCCITT_ALaw_44kHzStereo =48 # from enum SpeechAudioFormatType
SAFTCCITT_ALaw_8kHzMono =41 # from enum SpeechAudioFormatType
SAFTCCITT_ALaw_8kHzStereo =42 # from enum SpeechAudioFormatType
SAFTCCITT_uLaw_11kHzMono =51 # from enum SpeechAudioFormatType
SAFTCCITT_uLaw_11kHzStereo =52 # from enum SpeechAudioFormatType
SAFTCCITT_uLaw_22kHzMono =53 # from enum SpeechAudioFormatType
SAFTCCITT_uLaw_22kHzStereo =54 # from enum SpeechAudioFormatType
SAFTCCITT_uLaw_44kHzMono =55 # from enum SpeechAudioFormatType
SAFTCCITT_uLaw_44kHzStereo =56 # from enum SpeechAudioFormatType
SAFTCCITT_uLaw_8kHzMono =49 # from enum SpeechAudioFormatType
SAFTCCITT_uLaw_8kHzStereo =50 # from enum SpeechAudioFormatType
SAFTDefault =-1 # from enum SpeechAudioFormatType
SAFTExtendedAudioFormat =3 # from enum SpeechAudioFormatType
SAFTGSM610_11kHzMono =66 # from enum SpeechAudioFormatType
SAFTGSM610_22kHzMono =67 # from enum SpeechAudioFormatType
SAFTGSM610_44kHzMono =68 # from enum SpeechAudioFormatType
SAFTGSM610_8kHzMono =65 # from enum SpeechAudioFormatType
SAFTNoAssignedFormat =0 # from enum SpeechAudioFormatType
SAFTNonStandardFormat =2 # from enum SpeechAudioFormatType
SAFTText =1 # from enum SpeechAudioFormatType
SAFTTrueSpeech_8kHz1BitMono =40 # from enum SpeechAudioFormatType
SASClosed =0 # from enum SpeechAudioState
SASPause =2 # from enum SpeechAudioState
SASRun =3 # from enum SpeechAudioState
SASStop =1 # from enum SpeechAudioState
SBONone =0 # from enum SpeechBookmarkOptions
SBOPause =1 # from enum SpeechBookmarkOptions
SpeechAllElements =-1 # from enum SpeechConstants
Speech_Default_Weight =1.0 # from enum SpeechConstants
Speech_Max_Pron_Length =384 # from enum SpeechConstants
Speech_Max_Word_Length =128 # from enum SpeechConstants
Speech_StreamPos_Asap =0 # from enum SpeechConstants
Speech_StreamPos_RealTime =-1 # from enum SpeechConstants
SDKLCurrentConfig =5 # from enum SpeechDataKeyLocation
SDKLCurrentUser =1 # from enum SpeechDataKeyLocation
SDKLDefaultLocation =0 # from enum SpeechDataKeyLocation
SDKLLocalMachine =2 # from enum SpeechDataKeyLocation
SDTAll =255 # from enum SpeechDiscardType
SDTAlternates =128 # from enum SpeechDiscardType
SDTAudio =64 # from enum SpeechDiscardType
SDTDisplayText =8 # from enum SpeechDiscardType
SDTLexicalForm =16 # from enum SpeechDiscardType
SDTPronunciation =32 # from enum SpeechDiscardType
SDTProperty =1 # from enum SpeechDiscardType
SDTReplacement =2 # from enum SpeechDiscardType
SDTRule =4 # from enum SpeechDiscardType
SDA_Consume_Leading_Spaces =8 # from enum SpeechDisplayAttributes
SDA_No_Trailing_Space =0 # from enum SpeechDisplayAttributes
SDA_One_Trailing_Space =2 # from enum SpeechDisplayAttributes
SDA_Two_Trailing_Spaces =4 # from enum SpeechDisplayAttributes
SECFDefault =196609 # from enum SpeechEmulationCompareFlags
SECFEmulateResult =1073741824 # from enum SpeechEmulationCompareFlags
SECFIgnoreCase =1 # from enum SpeechEmulationCompareFlags
SECFIgnoreKanaType =65536 # from enum SpeechEmulationCompareFlags
SECFIgnoreWidth =131072 # from enum SpeechEmulationCompareFlags
SECFNoSpecialChars =536870912 # from enum SpeechEmulationCompareFlags
SECHighConfidence =1 # from enum SpeechEngineConfidence
SECLowConfidence =-1 # from enum SpeechEngineConfidence
SECNormalConfidence =0 # from enum SpeechEngineConfidence
SFTInput =0 # from enum SpeechFormatType
SFTSREngine =1 # from enum SpeechFormatType
SGRSTTDictation =3 # from enum SpeechGrammarRuleStateTransitionType
SGRSTTEpsilon =0 # from enum SpeechGrammarRuleStateTransitionType
SGRSTTRule =2 # from enum SpeechGrammarRuleStateTransitionType
SGRSTTTextBuffer =5 # from enum SpeechGrammarRuleStateTransitionType
SGRSTTWildcard =4 # from enum SpeechGrammarRuleStateTransitionType
SGRSTTWord =1 # from enum SpeechGrammarRuleStateTransitionType
SGSDisabled =0 # from enum SpeechGrammarState
SGSEnabled =1 # from enum SpeechGrammarState
SGSExclusive =3 # from enum SpeechGrammarState
SGDisplay =0 # from enum SpeechGrammarWordType
SGLexical =1 # from enum SpeechGrammarWordType
SGLexicalNoSpecialChars =3 # from enum SpeechGrammarWordType
SGPronounciation =2 # from enum SpeechGrammarWordType
SINoSignal =2 # from enum SpeechInterference
SINoise =1 # from enum SpeechInterference
SINone =0 # from enum SpeechInterference
SITooFast =5 # from enum SpeechInterference
SITooLoud =3 # from enum SpeechInterference
SITooQuiet =4 # from enum SpeechInterference
SITooSlow =6 # from enum SpeechInterference
SLTApp =2 # from enum SpeechLexiconType
SLTUser =1 # from enum SpeechLexiconType
SLODynamic =1 # from enum SpeechLoadOption
SLOStatic =0 # from enum SpeechLoadOption
SPSFunction =16384 # from enum SpeechPartOfSpeech
SPSInterjection =20480 # from enum SpeechPartOfSpeech
SPSLMA =28672 # from enum SpeechPartOfSpeech
SPSModifier =12288 # from enum SpeechPartOfSpeech
SPSNotOverriden =-1 # from enum SpeechPartOfSpeech
SPSNoun =4096 # from enum SpeechPartOfSpeech
SPSSuppressWord =61440 # from enum SpeechPartOfSpeech
SPSUnknown =0 # from enum SpeechPartOfSpeech
SPSVerb =8192 # from enum SpeechPartOfSpeech
SRCS_Disabled =0 # from enum SpeechRecoContextState
SRCS_Enabled =1 # from enum SpeechRecoContextState
SREAdaptation =8192 # from enum SpeechRecoEvents
SREAllEvents =393215 # from enum SpeechRecoEvents
SREAudioLevel =65536 # from enum SpeechRecoEvents
SREBookmark =64 # from enum SpeechRecoEvents
SREFalseRecognition =512 # from enum SpeechRecoEvents
SREHypothesis =32 # from enum SpeechRecoEvents
SREInterference =1024 # from enum SpeechRecoEvents
SREPhraseStart =8 # from enum SpeechRecoEvents
SREPrivate =262144 # from enum SpeechRecoEvents
SREPropertyNumChange =128 # from enum SpeechRecoEvents
SREPropertyStringChange =256 # from enum SpeechRecoEvents
SRERecoOtherContext =32768 # from enum SpeechRecoEvents
SRERecognition =16 # from enum SpeechRecoEvents
SRERequestUI =2048 # from enum SpeechRecoEvents
SRESoundEnd =4 # from enum SpeechRecoEvents
SRESoundStart =2 # from enum SpeechRecoEvents
SREStateChange =4096 # from enum SpeechRecoEvents
SREStreamEnd =1 # from enum SpeechRecoEvents
SREStreamStart =16384 # from enum SpeechRecoEvents
SRTAutopause =1 # from enum SpeechRecognitionType
SRTEmulated =2 # from enum SpeechRecognitionType
SRTExtendableParse =8 # from enum SpeechRecognitionType
SRTReSent =16 # from enum SpeechRecognitionType
SRTSMLTimeout =4 # from enum SpeechRecognitionType
SRTStandard =0 # from enum SpeechRecognitionType
SRSActive =1 # from enum SpeechRecognizerState
SRSActiveAlways =2 # from enum SpeechRecognizerState
SRSInactive =0 # from enum SpeechRecognizerState
SRSInactiveWithPurge =3 # from enum SpeechRecognizerState
SRAONone =0 # from enum SpeechRetainedAudioOptions
SRAORetainAudio =1 # from enum SpeechRetainedAudioOptions
SRADefaultToActive =2 # from enum SpeechRuleAttributes
SRADynamic =32 # from enum SpeechRuleAttributes
SRAExport =4 # from enum SpeechRuleAttributes
SRAImport =8 # from enum SpeechRuleAttributes
SRAInterpreter =16 # from enum SpeechRuleAttributes
SRARoot =64 # from enum SpeechRuleAttributes
SRATopLevel =1 # from enum SpeechRuleAttributes
SGDSActive =1 # from enum SpeechRuleState
SGDSActiveUserDelimited =4 # from enum SpeechRuleState
SGDSActiveWithAutoPause =3 # from enum SpeechRuleState
SGDSInactive =0 # from enum SpeechRuleState
SRSEDone =1 # from enum SpeechRunState
SRSEIsSpeaking =2 # from enum SpeechRunState
SSTTDictation =2 # from enum SpeechSpecialTransitionType
SSTTTextBuffer =3 # from enum SpeechSpecialTransitionType
SSTTWildcard =1 # from enum SpeechSpecialTransitionType
SSFMCreate =2 # from enum SpeechStreamFileMode
SSFMCreateForWrite =3 # from enum SpeechStreamFileMode
SSFMOpenForRead =0 # from enum SpeechStreamFileMode
SSFMOpenReadWrite =1 # from enum SpeechStreamFileMode
SSSPTRelativeToCurrentPosition=1 # from enum SpeechStreamSeekPositionType
SSSPTRelativeToEnd =2 # from enum SpeechStreamSeekPositionType
SSSPTRelativeToStart =0 # from enum SpeechStreamSeekPositionType
SpeechAddRemoveWord =u'AddRemoveWord' # from enum SpeechStringConstants
SpeechAudioFormatGUIDText =u'{7CEEF9F9-3D13-11d2-9EE7-00C04F797396}' # from enum SpeechStringConstants
SpeechAudioFormatGUIDWave =u'{C31ADBAE-527F-4ff5-A230-F62BB61FF70C}' # from enum SpeechStringConstants
SpeechAudioProperties =u'AudioProperties' # from enum SpeechStringConstants
SpeechAudioVolume =u'AudioVolume' # from enum SpeechStringConstants
SpeechCategoryAppLexicons =u'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\AppLexicons' # from enum SpeechStringConstants
SpeechCategoryAudioIn =u'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\AudioInput' # from enum SpeechStringConstants
SpeechCategoryAudioOut =u'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\AudioOutput' # from enum SpeechStringConstants
SpeechCategoryPhoneConverters =u'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\PhoneConverters' # from enum SpeechStringConstants
SpeechCategoryRecoProfiles =u'HKEY_CURRENT_USER\\SOFTWARE\\Microsoft\\Speech\\RecoProfiles' # from enum SpeechStringConstants
SpeechCategoryRecognizers =u'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Recognizers' # from enum SpeechStringConstants
SpeechCategoryVoices =u'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices' # from enum SpeechStringConstants
SpeechDictationTopicSpelling =u'Spelling' # from enum SpeechStringConstants
SpeechEngineProperties =u'EngineProperties' # from enum SpeechStringConstants
SpeechGrammarTagDictation =u'*' # from enum SpeechStringConstants
SpeechGrammarTagUnlimitedDictation=u'*+' # from enum SpeechStringConstants
SpeechGrammarTagWildcard =u'...' # from enum SpeechStringConstants
SpeechMicTraining =u'MicTraining' # from enum SpeechStringConstants
SpeechPropertyAdaptationOn =u'AdaptationOn' # from enum SpeechStringConstants
SpeechPropertyComplexResponseSpeed=u'ComplexResponseSpeed' # from enum SpeechStringConstants
SpeechPropertyHighConfidenceThreshold=u'HighConfidenceThreshold' # from enum SpeechStringConstants
SpeechPropertyLowConfidenceThreshold=u'LowConfidenceThreshold' # from enum SpeechStringConstants
SpeechPropertyNormalConfidenceThreshold=u'NormalConfidenceThreshold' # from enum SpeechStringConstants
SpeechPropertyResourceUsage =u'ResourceUsage' # from enum SpeechStringConstants
SpeechPropertyResponseSpeed =u'ResponseSpeed' # from enum SpeechStringConstants
SpeechRecoProfileProperties =u'RecoProfileProperties' # from enum SpeechStringConstants
SpeechRegistryLocalMachineRoot=u'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech' # from enum SpeechStringConstants
SpeechRegistryUserRoot =u'HKEY_CURRENT_USER\\SOFTWARE\\Microsoft\\Speech' # from enum SpeechStringConstants
SpeechTokenIdUserLexicon =u'HKEY_CURRENT_USER\\SOFTWARE\\Microsoft\\Speech\\CurrentUserLexicon' # from enum SpeechStringConstants
SpeechTokenKeyAttributes =u'Attributes' # from enum SpeechStringConstants
SpeechTokenKeyFiles =u'Files' # from enum SpeechStringConstants
SpeechTokenKeyUI =u'UI' # from enum SpeechStringConstants
SpeechTokenValueCLSID =u'CLSID' # from enum SpeechStringConstants
SpeechUserTraining =u'UserTraining' # from enum SpeechStringConstants
SpeechVoiceCategoryTTSRate =u'DefaultTTSRate' # from enum SpeechStringConstants
SpeechVoiceSkipTypeSentence =u'Sentence' # from enum SpeechStringConstants
STCAll =23 # from enum SpeechTokenContext
STCInprocHandler =2 # from enum SpeechTokenContext
STCInprocServer =1 # from enum SpeechTokenContext
STCLocalServer =4 # from enum SpeechTokenContext
STCRemoteServer =16 # from enum SpeechTokenContext
STSF_AppData =26 # from enum SpeechTokenShellFolder
STSF_CommonAppData =35 # from enum SpeechTokenShellFolder
STSF_FlagCreate =32768 # from enum SpeechTokenShellFolder
STSF_LocalAppData =28 # from enum SpeechTokenShellFolder
SVF_Emphasis =2 # from enum SpeechVisemeFeature
SVF_None =0 # from enum SpeechVisemeFeature
SVF_Stressed =1 # from enum SpeechVisemeFeature
SVP_0 =0 # from enum SpeechVisemeType
SVP_1 =1 # from enum SpeechVisemeType
SVP_10 =10 # from enum SpeechVisemeType
SVP_11 =11 # from enum SpeechVisemeType
SVP_12 =12 # from enum SpeechVisemeType
SVP_13 =13 # from enum SpeechVisemeType
SVP_14 =14 # from enum SpeechVisemeType
SVP_15 =15 # from enum SpeechVisemeType
SVP_16 =16 # from enum SpeechVisemeType
SVP_17 =17 # from enum SpeechVisemeType
SVP_18 =18 # from enum SpeechVisemeType
SVP_19 =19 # from enum SpeechVisemeType
SVP_2 =2 # from enum SpeechVisemeType
SVP_20 =20 # from enum SpeechVisemeType
SVP_21 =21 # from enum SpeechVisemeType
SVP_3 =3 # from enum SpeechVisemeType
SVP_4 =4 # from enum SpeechVisemeType
SVP_5 =5 # from enum SpeechVisemeType
SVP_6 =6 # from enum SpeechVisemeType
SVP_7 =7 # from enum SpeechVisemeType
SVP_8 =8 # from enum SpeechVisemeType
SVP_9 =9 # from enum SpeechVisemeType
SVEAllEvents =33790 # from enum SpeechVoiceEvents
SVEAudioLevel =512 # from enum SpeechVoiceEvents
SVEBookmark =16 # from enum SpeechVoiceEvents
SVEEndInputStream =4 # from enum SpeechVoiceEvents
SVEPhoneme =64 # from enum SpeechVoiceEvents
SVEPrivate =32768 # from enum SpeechVoiceEvents
SVESentenceBoundary =128 # from enum SpeechVoiceEvents
SVEStartInputStream =2 # from enum SpeechVoiceEvents
SVEViseme =256 # from enum SpeechVoiceEvents
SVEVoiceChange =8 # from enum SpeechVoiceEvents
SVEWordBoundary =32 # from enum SpeechVoiceEvents
SVPAlert =1 # from enum SpeechVoicePriority
SVPNormal =0 # from enum SpeechVoicePriority
SVPOver =2 # from enum SpeechVoicePriority
SVSFDefault =0 # from enum SpeechVoiceSpeakFlags
SVSFIsFilename =4 # from enum SpeechVoiceSpeakFlags
SVSFIsNotXML =16 # from enum SpeechVoiceSpeakFlags
SVSFIsXML =8 # from enum SpeechVoiceSpeakFlags
SVSFNLPMask =64 # from enum SpeechVoiceSpeakFlags
SVSFNLPSpeakPunc =64 # from enum SpeechVoiceSpeakFlags
SVSFParseAutodetect =0 # from enum SpeechVoiceSpeakFlags
SVSFParseMask =384 # from enum SpeechVoiceSpeakFlags
SVSFParseSapi =128 # from enum SpeechVoiceSpeakFlags
SVSFParseSsml =256 # from enum SpeechVoiceSpeakFlags
SVSFPersistXML =32 # from enum SpeechVoiceSpeakFlags
SVSFPurgeBeforeSpeak =2 # from enum SpeechVoiceSpeakFlags
SVSFUnusedFlags =-512 # from enum SpeechVoiceSpeakFlags
SVSFVoiceMask =511 # from enum SpeechVoiceSpeakFlags
SVSFlagsAsync =1 # from enum SpeechVoiceSpeakFlags
SWPKnownWordPronounceable =2 # from enum SpeechWordPronounceable
SWPUnknownWordPronounceable =1 # from enum SpeechWordPronounceable
SWPUnknownWordUnpronounceable =0 # from enum SpeechWordPronounceable
SWTAdded =1 # from enum SpeechWordType
SWTDeleted =2 # from enum SpeechWordType
SPAS_CLOSED =0 # from enum _SPAUDIOSTATE
SPAS_PAUSE =2 # from enum _SPAUDIOSTATE
SPAS_RUN =3 # from enum _SPAUDIOSTATE
SPAS_STOP =1 # from enum _SPAUDIOSTATE
from win32com.client import DispatchBaseClass
class ISpeechAudio(DispatchBaseClass):
'ISpeechAudio Interface'
CLSID = IID('{CFF8E175-019E-11D3-A08E-00C04F8EF9B5}')
coclass_clsid = None
def Read(self, Buffer=pythoncom.Missing, NumberOfBytes=defaultNamedNotOptArg):
'Read'
return self._ApplyTypes_(2, 1, (3, 0), ((16396, 2), (3, 1)), u'Read', None,Buffer
, NumberOfBytes)
def Seek(self, Position=defaultNamedNotOptArg, Origin=0):
'Seek'
return self._ApplyTypes_(4, 1, (12, 0), ((12, 1), (3, 49)), u'Seek', None,Position
, Origin)
def SetState(self, State=defaultNamedNotOptArg):
'SetState'
return self._oleobj_.InvokeTypes(206, LCID, 1, (24, 0), ((3, 1),),State
)
def Write(self, Buffer=defaultNamedNotOptArg):
'Write'
return self._oleobj_.InvokeTypes(3, LCID, 1, (3, 0), ((12, 1),),Buffer
)
_prop_map_get_ = {
# Method 'BufferInfo' returns object of type 'ISpeechAudioBufferInfo'
"BufferInfo": (201, 2, (9, 0), (), "BufferInfo", '{11B103D8-1142-4EDF-A093-82FB3915F8CC}'),
"BufferNotifySize": (204, 2, (3, 0), (), "BufferNotifySize", None),
# Method 'DefaultFormat' returns object of type 'ISpeechAudioFormat'
"DefaultFormat": (202, 2, (9, 0), (), "DefaultFormat", '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}'),
"EventHandle": (205, 2, (3, 0), (), "EventHandle", None),
# Method 'Format' returns object of type 'ISpeechAudioFormat'
"Format": (1, 2, (9, 0), (), "Format", '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}'),
# Method 'Status' returns object of type 'ISpeechAudioStatus'
"Status": (200, 2, (9, 0), (), "Status", '{C62D9C91-7458-47F6-862D-1EF86FB0B278}'),
"Volume": (203, 2, (3, 0), (), "Volume", None),
}
_prop_map_put_ = {
"BufferNotifySize": ((204, LCID, 4, 0),()),
"Format": ((1, LCID, 8, 0),()),
"Volume": ((203, LCID, 4, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechAudioBufferInfo(DispatchBaseClass):
'ISpeechAudioBufferInfo Interface'
CLSID = IID('{11B103D8-1142-4EDF-A093-82FB3915F8CC}')
coclass_clsid = None
_prop_map_get_ = {
"BufferSize": (2, 2, (3, 0), (), "BufferSize", None),
"EventBias": (3, 2, (3, 0), (), "EventBias", None),
"MinNotification": (1, 2, (3, 0), (), "MinNotification", None),
}
_prop_map_put_ = {
"BufferSize": ((2, LCID, 4, 0),()),
"EventBias": ((3, LCID, 4, 0),()),
"MinNotification": ((1, LCID, 4, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechAudioFormat(DispatchBaseClass):
'ISpeechAudioFormat Interface'
CLSID = IID('{E6E9C590-3E18-40E3-8299-061F98BDE7C7}')
coclass_clsid = IID('{9EF96870-E160-4792-820D-48CF0649E4EC}')
# Result is of type ISpeechWaveFormatEx
def GetWaveFormatEx(self):
'GetWaveFormatEx'
ret = self._oleobj_.InvokeTypes(3, LCID, 1, (9, 0), (),)
if ret is not None:
ret = Dispatch(ret, u'GetWaveFormatEx', '{7A1EF0D5-1581-4741-88E4-209A49F11A10}')
return ret
def SetWaveFormatEx(self, SpeechWaveFormatEx=defaultNamedNotOptArg):
'SetWaveFormatEx'
return self._oleobj_.InvokeTypes(4, LCID, 1, (24, 0), ((9, 1),),SpeechWaveFormatEx
)
_prop_map_get_ = {
"Guid": (2, 2, (8, 0), (), "Guid", None),
"Type": (1, 2, (3, 0), (), "Type", None),
}
_prop_map_put_ = {
"Guid": ((2, LCID, 4, 0),()),
"Type": ((1, LCID, 4, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechAudioStatus(DispatchBaseClass):
'ISpeechAudioStatus Interface'
CLSID = IID('{C62D9C91-7458-47F6-862D-1EF86FB0B278}')
coclass_clsid = None
_prop_map_get_ = {
"CurrentDevicePosition": (5, 2, (12, 0), (), "CurrentDevicePosition", None),
"CurrentSeekPosition": (4, 2, (12, 0), (), "CurrentSeekPosition", None),
"FreeBufferSpace": (1, 2, (3, 0), (), "FreeBufferSpace", None),
"NonBlockingIO": (2, 2, (3, 0), (), "NonBlockingIO", None),
"State": (3, 2, (3, 0), (), "State", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechBaseStream(DispatchBaseClass):
'ISpeechBaseStream Interface'
CLSID = IID('{6450336F-7D49-4CED-8097-49D6DEE37294}')
coclass_clsid = None
def Read(self, Buffer=pythoncom.Missing, NumberOfBytes=defaultNamedNotOptArg):
'Read'
return self._ApplyTypes_(2, 1, (3, 0), ((16396, 2), (3, 1)), u'Read', None,Buffer
, NumberOfBytes)
def Seek(self, Position=defaultNamedNotOptArg, Origin=0):
'Seek'
return self._ApplyTypes_(4, 1, (12, 0), ((12, 1), (3, 49)), u'Seek', None,Position
, Origin)
def Write(self, Buffer=defaultNamedNotOptArg):
'Write'
return self._oleobj_.InvokeTypes(3, LCID, 1, (3, 0), ((12, 1),),Buffer
)
_prop_map_get_ = {
# Method 'Format' returns object of type 'ISpeechAudioFormat'
"Format": (1, 2, (9, 0), (), "Format", '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}'),
}
_prop_map_put_ = {
"Format": ((1, LCID, 8, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechCustomStream(DispatchBaseClass):
'ISpeechCustomStream Interface'
CLSID = IID('{1A9E9F4F-104F-4DB8-A115-EFD7FD0C97AE}')
coclass_clsid = IID('{8DBEF13F-1948-4AA8-8CF0-048EEBED95D8}')
def Read(self, Buffer=pythoncom.Missing, NumberOfBytes=defaultNamedNotOptArg):
'Read'
return self._ApplyTypes_(2, 1, (3, 0), ((16396, 2), (3, 1)), u'Read', None,Buffer
, NumberOfBytes)
def Seek(self, Position=defaultNamedNotOptArg, Origin=0):
'Seek'
return self._ApplyTypes_(4, 1, (12, 0), ((12, 1), (3, 49)), u'Seek', None,Position
, Origin)
def Write(self, Buffer=defaultNamedNotOptArg):
'Write'
return self._oleobj_.InvokeTypes(3, LCID, 1, (3, 0), ((12, 1),),Buffer
)
_prop_map_get_ = {
"BaseStream": (100, 2, (13, 0), (), "BaseStream", None),
# Method 'Format' returns object of type 'ISpeechAudioFormat'
"Format": (1, 2, (9, 0), (), "Format", '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}'),
}
_prop_map_put_ = {
"BaseStream": ((100, LCID, 8, 0),()),
"Format": ((1, LCID, 8, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechDataKey(DispatchBaseClass):
'ISpeechDataKey Interface'
CLSID = IID('{CE17C09B-4EFA-44D5-A4C9-59D9585AB0CD}')
coclass_clsid = None
# Result is of type ISpeechDataKey
def CreateKey(self, SubKeyName=defaultNamedNotOptArg):
'CreateKey'
ret = self._oleobj_.InvokeTypes(8, LCID, 1, (9, 0), ((8, 1),),SubKeyName
)
if ret is not None:
ret = Dispatch(ret, u'CreateKey', '{CE17C09B-4EFA-44D5-A4C9-59D9585AB0CD}')
return ret
def DeleteKey(self, SubKeyName=defaultNamedNotOptArg):
'DeleteKey'
return self._oleobj_.InvokeTypes(9, LCID, 1, (24, 0), ((8, 1),),SubKeyName
)
def DeleteValue(self, ValueName=defaultNamedNotOptArg):
'DeleteValue'
return self._oleobj_.InvokeTypes(10, LCID, 1, (24, 0), ((8, 1),),ValueName
)
def EnumKeys(self, Index=defaultNamedNotOptArg):
'EnumKeys'
# Result is a Unicode object
return self._oleobj_.InvokeTypes(11, LCID, 1, (8, 0), ((3, 1),),Index
)
def EnumValues(self, Index=defaultNamedNotOptArg):
'EnumValues'
# Result is a Unicode object
return self._oleobj_.InvokeTypes(12, LCID, 1, (8, 0), ((3, 1),),Index
)
def GetBinaryValue(self, ValueName=defaultNamedNotOptArg):
'GetBinaryValue'
return self._ApplyTypes_(2, 1, (12, 0), ((8, 1),), u'GetBinaryValue', None,ValueName
)
def GetLongValue(self, ValueName=defaultNamedNotOptArg):
'GetlongValue'
return self._oleobj_.InvokeTypes(6, LCID, 1, (3, 0), ((8, 1),),ValueName
)
def GetStringValue(self, ValueName=defaultNamedNotOptArg):
'GetStringValue'
# Result is a Unicode object
return self._oleobj_.InvokeTypes(4, LCID, 1, (8, 0), ((8, 1),),ValueName
)
# Result is of type ISpeechDataKey
def OpenKey(self, SubKeyName=defaultNamedNotOptArg):
'OpenKey'
ret = self._oleobj_.InvokeTypes(7, LCID, 1, (9, 0), ((8, 1),),SubKeyName
)
if ret is not None:
ret = Dispatch(ret, u'OpenKey', '{CE17C09B-4EFA-44D5-A4C9-59D9585AB0CD}')
return ret
def SetBinaryValue(self, ValueName=defaultNamedNotOptArg, Value=defaultNamedNotOptArg):
'SetBinaryValue'
return self._oleobj_.InvokeTypes(1, LCID, 1, (24, 0), ((8, 1), (12, 1)),ValueName
, Value)
def SetLongValue(self, ValueName=defaultNamedNotOptArg, Value=defaultNamedNotOptArg):
'SetLongValue'
return self._oleobj_.InvokeTypes(5, LCID, 1, (24, 0), ((8, 1), (3, 1)),ValueName
, Value)
def SetStringValue(self, ValueName=defaultNamedNotOptArg, Value=defaultNamedNotOptArg):
'SetStringValue'
return self._oleobj_.InvokeTypes(3, LCID, 1, (24, 0), ((8, 1), (8, 1)),ValueName
, Value)
_prop_map_get_ = {
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechFileStream(DispatchBaseClass):
'ISpeechFileStream Interface'
CLSID = IID('{AF67F125-AB39-4E93-B4A2-CC2E66E182A7}')
coclass_clsid = IID('{947812B3-2AE1-4644-BA86-9E90DED7EC91}')
def Close(self):
'Close'
return self._oleobj_.InvokeTypes(101, LCID, 1, (24, 0), (),)
def Open(self, FileName=defaultNamedNotOptArg, FileMode=0, DoEvents=False):
'Open'
return self._oleobj_.InvokeTypes(100, LCID, 1, (24, 0), ((8, 1), (3, 49), (11, 49)),FileName
, FileMode, DoEvents)
def Read(self, Buffer=pythoncom.Missing, NumberOfBytes=defaultNamedNotOptArg):
'Read'
return self._ApplyTypes_(2, 1, (3, 0), ((16396, 2), (3, 1)), u'Read', None,Buffer
, NumberOfBytes)
def Seek(self, Position=defaultNamedNotOptArg, Origin=0):
'Seek'
return self._ApplyTypes_(4, 1, (12, 0), ((12, 1), (3, 49)), u'Seek', None,Position
, Origin)
def Write(self, Buffer=defaultNamedNotOptArg):
'Write'
return self._oleobj_.InvokeTypes(3, LCID, 1, (3, 0), ((12, 1),),Buffer
)
_prop_map_get_ = {
# Method 'Format' returns object of type 'ISpeechAudioFormat'
"Format": (1, 2, (9, 0), (), "Format", '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}'),
}
_prop_map_put_ = {
"Format": ((1, LCID, 8, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechGrammarRule(DispatchBaseClass):
'ISpeechGrammarRule Interface'
CLSID = IID('{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}')
coclass_clsid = None
def AddResource(self, ResourceName=defaultNamedNotOptArg, ResourceValue=defaultNamedNotOptArg):
'AddResource'
return self._oleobj_.InvokeTypes(6, LCID, 1, (24, 0), ((8, 1), (8, 1)),ResourceName
, ResourceValue)
# Result is of type ISpeechGrammarRuleState
def AddState(self):
'AddState'
ret = self._oleobj_.InvokeTypes(7, LCID, 1, (9, 0), (),)
if ret is not None:
ret = Dispatch(ret, u'AddState', '{D4286F2C-EE67-45AE-B928-28D695362EDA}')
return ret
def Clear(self):
'Clear'
return self._oleobj_.InvokeTypes(5, LCID, 1, (24, 0), (),)
_prop_map_get_ = {
"Attributes": (1, 2, (3, 0), (), "Attributes", None),
"Id": (4, 2, (3, 0), (), "Id", None),
# Method 'InitialState' returns object of type 'ISpeechGrammarRuleState'
"InitialState": (2, 2, (9, 0), (), "InitialState", '{D4286F2C-EE67-45AE-B928-28D695362EDA}'),
"Name": (3, 2, (8, 0), (), "Name", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechGrammarRuleState(DispatchBaseClass):
'ISpeechGrammarRuleState Interface'
CLSID = IID('{D4286F2C-EE67-45AE-B928-28D695362EDA}')
coclass_clsid = None
def AddRuleTransition(self, DestinationState=defaultNamedNotOptArg, Rule=defaultNamedNotOptArg, PropertyName=u'', PropertyId=0
, PropertyValue=u'', Weight=1.0):
'AddRuleTransition'
return self._ApplyTypes_(4, 1, (24, 32), ((9, 1), (9, 1), (8, 49), (3, 49), (16396, 49), (4, 49)), u'AddRuleTransition', None,DestinationState
, Rule, PropertyName, PropertyId, PropertyValue, Weight
)
def AddSpecialTransition(self, DestinationState=defaultNamedNotOptArg, Type=defaultNamedNotOptArg, PropertyName=u'', PropertyId=0
, PropertyValue=u'', Weight=1.0):
'AddSpecialTransition'
return self._ApplyTypes_(5, 1, (24, 32), ((9, 1), (3, 1), (8, 49), (3, 49), (16396, 49), (4, 49)), u'AddSpecialTransition', None,DestinationState
, Type, PropertyName, PropertyId, PropertyValue, Weight
)
def AddWordTransition(self, DestState=defaultNamedNotOptArg, Words=defaultNamedNotOptArg, Separators=u' ', Type=1
, PropertyName=u'', PropertyId=0, PropertyValue=u'', Weight=1.0):
'AddWordTransition'
return self._ApplyTypes_(3, 1, (24, 32), ((9, 1), (8, 1), (8, 49), (3, 49), (8, 49), (3, 49), (16396, 49), (4, 49)), u'AddWordTransition', None,DestState
, Words, Separators, Type, PropertyName, PropertyId
, PropertyValue, Weight)
_prop_map_get_ = {
# Method 'Rule' returns object of type 'ISpeechGrammarRule'
"Rule": (1, 2, (9, 0), (), "Rule", '{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}'),
# Method 'Transitions' returns object of type 'ISpeechGrammarRuleStateTransitions'
"Transitions": (2, 2, (9, 0), (), "Transitions", '{EABCE657-75BC-44A2-AA7F-C56476742963}'),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechGrammarRuleStateTransition(DispatchBaseClass):
'ISpeechGrammarRuleStateTransition Interface'
CLSID = IID('{CAFD1DB1-41D1-4A06-9863-E2E81DA17A9A}')
coclass_clsid = None
_prop_map_get_ = {
# Method 'NextState' returns object of type 'ISpeechGrammarRuleState'
"NextState": (8, 2, (9, 0), (), "NextState", '{D4286F2C-EE67-45AE-B928-28D695362EDA}'),
"PropertyId": (6, 2, (3, 0), (), "PropertyId", None),
"PropertyName": (5, 2, (8, 0), (), "PropertyName", None),
"PropertyValue": (7, 2, (12, 0), (), "PropertyValue", None),
# Method 'Rule' returns object of type 'ISpeechGrammarRule'
"Rule": (3, 2, (9, 0), (), "Rule", '{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}'),
"Text": (2, 2, (8, 0), (), "Text", None),
"Type": (1, 2, (3, 0), (), "Type", None),
"Weight": (4, 2, (12, 0), (), "Weight", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechGrammarRuleStateTransitions(DispatchBaseClass):
'ISpeechGrammarRuleStateTransitions Interface'
CLSID = IID('{EABCE657-75BC-44A2-AA7F-C56476742963}')
coclass_clsid = None
# Result is of type ISpeechGrammarRuleStateTransition
def Item(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, u'Item', '{CAFD1DB1-41D1-4A06-9863-E2E81DA17A9A}')
return ret
_prop_map_get_ = {
"Count": (1, 2, (3, 0), (), "Count", None),
}
_prop_map_put_ = {
}
# Default method for this class is 'Item'
def __call__(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, '__call__', '{CAFD1DB1-41D1-4A06-9863-E2E81DA17A9A}')
return ret
def __unicode__(self, *args):
try:
return unicode(self.__call__(*args))
except pythoncom.com_error:
return repr(self)
def __str__(self, *args):
return str(self.__unicode__(*args))
def __int__(self, *args):
return int(self.__call__(*args))
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,2,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, '{CAFD1DB1-41D1-4A06-9863-E2E81DA17A9A}')
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(1, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class ISpeechGrammarRules(DispatchBaseClass):
'ISpeechGrammarRules Interface'
CLSID = IID('{6FFA3B44-FC2D-40D1-8AFC-32911C7F1AD1}')
coclass_clsid = None
# Result is of type ISpeechGrammarRule
def Add(self, RuleName=defaultNamedNotOptArg, Attributes=defaultNamedNotOptArg, RuleId=0):
'Add'
ret = self._oleobj_.InvokeTypes(3, LCID, 1, (9, 0), ((8, 1), (3, 1), (3, 49)),RuleName
, Attributes, RuleId)
if ret is not None:
ret = Dispatch(ret, u'Add', '{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}')
return ret
def Commit(self):
'Commit'
return self._oleobj_.InvokeTypes(4, LCID, 1, (24, 0), (),)
def CommitAndSave(self, ErrorText=pythoncom.Missing):
'CommitAndSave'
return self._ApplyTypes_(5, 1, (12, 0), ((16392, 2),), u'CommitAndSave', None,ErrorText
)
# Result is of type ISpeechGrammarRule
def FindRule(self, RuleNameOrId=defaultNamedNotOptArg):
'FindRule'
ret = self._oleobj_.InvokeTypes(6, LCID, 1, (9, 0), ((12, 1),),RuleNameOrId
)
if ret is not None:
ret = Dispatch(ret, u'FindRule', '{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}')
return ret
# Result is of type ISpeechGrammarRule
def Item(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, u'Item', '{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}')
return ret
_prop_map_get_ = {
"Count": (1, 2, (3, 0), (), "Count", None),
"Dynamic": (2, 2, (11, 0), (), "Dynamic", None),
}
_prop_map_put_ = {
}
# Default method for this class is 'Item'
def __call__(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, '__call__', '{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}')
return ret
def __unicode__(self, *args):
try:
return unicode(self.__call__(*args))
except pythoncom.com_error:
return repr(self)
def __str__(self, *args):
return str(self.__unicode__(*args))
def __int__(self, *args):
return int(self.__call__(*args))
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,2,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, '{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}')
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(1, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class ISpeechLexicon(DispatchBaseClass):
'ISpeechLexicon Interface'
CLSID = IID('{3DA7627A-C7AE-4B23-8708-638C50362C25}')
coclass_clsid = IID('{C9E37C15-DF92-4727-85D6-72E5EEB6995A}')
def AddPronunciation(self, bstrWord=defaultNamedNotOptArg, LangId=defaultNamedNotOptArg, PartOfSpeech=0, bstrPronunciation=u''):
'AddPronunciation'
return self._ApplyTypes_(3, 1, (24, 32), ((8, 1), (3, 1), (3, 49), (8, 49)), u'AddPronunciation', None,bstrWord
, LangId, PartOfSpeech, bstrPronunciation)
def AddPronunciationByPhoneIds(self, bstrWord=defaultNamedNotOptArg, LangId=defaultNamedNotOptArg, PartOfSpeech=0, PhoneIds=u''):
'AddPronunciationByPhoneIds'
return self._ApplyTypes_(4, 1, (24, 32), ((8, 1), (3, 1), (3, 49), (16396, 49)), u'AddPronunciationByPhoneIds', None,bstrWord
, LangId, PartOfSpeech, PhoneIds)
# Result is of type ISpeechLexiconWords
def GetGenerationChange(self, GenerationId=defaultNamedNotOptArg):
'GetGenerationChange'
return self._ApplyTypes_(8, 1, (9, 0), ((16387, 3),), u'GetGenerationChange', '{8D199862-415E-47D5-AC4F-FAA608B424E6}',GenerationId
)
# Result is of type ISpeechLexiconPronunciations
def GetPronunciations(self, bstrWord=defaultNamedNotOptArg, LangId=0, TypeFlags=3):
'GetPronunciations'
ret = self._oleobj_.InvokeTypes(7, LCID, 1, (9, 0), ((8, 1), (3, 49), (3, 49)),bstrWord
, LangId, TypeFlags)
if ret is not None:
ret = Dispatch(ret, u'GetPronunciations', '{72829128-5682-4704-A0D4-3E2BB6F2EAD3}')
return ret
# Result is of type ISpeechLexiconWords
def GetWords(self, Flags=3, GenerationId=0):
'GetWords'
return self._ApplyTypes_(2, 1, (9, 0), ((3, 49), (16387, 50)), u'GetWords', '{8D199862-415E-47D5-AC4F-FAA608B424E6}',Flags
, GenerationId)
def RemovePronunciation(self, bstrWord=defaultNamedNotOptArg, LangId=defaultNamedNotOptArg, PartOfSpeech=0, bstrPronunciation=u''):
'RemovePronunciation'
return self._ApplyTypes_(5, 1, (24, 32), ((8, 1), (3, 1), (3, 49), (8, 49)), u'RemovePronunciation', None,bstrWord
, LangId, PartOfSpeech, bstrPronunciation)
def RemovePronunciationByPhoneIds(self, bstrWord=defaultNamedNotOptArg, LangId=defaultNamedNotOptArg, PartOfSpeech=0, PhoneIds=u''):
'RemovePronunciationByPhoneIds'
return self._ApplyTypes_(6, 1, (24, 32), ((8, 1), (3, 1), (3, 49), (16396, 49)), u'RemovePronunciationByPhoneIds', None,bstrWord
, LangId, PartOfSpeech, PhoneIds)
_prop_map_get_ = {
"GenerationId": (1, 2, (3, 0), (), "GenerationId", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechLexiconPronunciation(DispatchBaseClass):
'ISpeechLexiconPronunciation Interface'
CLSID = IID('{95252C5D-9E43-4F4A-9899-48EE73352F9F}')
coclass_clsid = None
_prop_map_get_ = {
"LangId": (2, 2, (3, 0), (), "LangId", None),
"PartOfSpeech": (3, 2, (3, 0), (), "PartOfSpeech", None),
"PhoneIds": (4, 2, (12, 0), (), "PhoneIds", None),
"Symbolic": (5, 2, (8, 0), (), "Symbolic", None),
"Type": (1, 2, (3, 0), (), "Type", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechLexiconPronunciations(DispatchBaseClass):
'ISpeechLexiconPronunciations Interface'
CLSID = IID('{72829128-5682-4704-A0D4-3E2BB6F2EAD3}')
coclass_clsid = None
# Result is of type ISpeechLexiconPronunciation
def Item(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, u'Item', '{95252C5D-9E43-4F4A-9899-48EE73352F9F}')
return ret
_prop_map_get_ = {
"Count": (1, 2, (3, 0), (), "Count", None),
}
_prop_map_put_ = {
}
# Default method for this class is 'Item'
def __call__(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, '__call__', '{95252C5D-9E43-4F4A-9899-48EE73352F9F}')
return ret
def __unicode__(self, *args):
try:
return unicode(self.__call__(*args))
except pythoncom.com_error:
return repr(self)
def __str__(self, *args):
return str(self.__unicode__(*args))
def __int__(self, *args):
return int(self.__call__(*args))
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,2,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, '{95252C5D-9E43-4F4A-9899-48EE73352F9F}')
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(1, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class ISpeechLexiconWord(DispatchBaseClass):
'ISpeechLexiconWord Interface'
CLSID = IID('{4E5B933C-C9BE-48ED-8842-1EE51BB1D4FF}')
coclass_clsid = None
_prop_map_get_ = {
"LangId": (1, 2, (3, 0), (), "LangId", None),
# Method 'Pronunciations' returns object of type 'ISpeechLexiconPronunciations'
"Pronunciations": (4, 2, (9, 0), (), "Pronunciations", '{72829128-5682-4704-A0D4-3E2BB6F2EAD3}'),
"Type": (2, 2, (3, 0), (), "Type", None),
"Word": (3, 2, (8, 0), (), "Word", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechLexiconWords(DispatchBaseClass):
'ISpeechLexiconWords Interface'
CLSID = IID('{8D199862-415E-47D5-AC4F-FAA608B424E6}')
coclass_clsid = None
# Result is of type ISpeechLexiconWord
def Item(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, u'Item', '{4E5B933C-C9BE-48ED-8842-1EE51BB1D4FF}')
return ret
_prop_map_get_ = {
"Count": (1, 2, (3, 0), (), "Count", None),
}
_prop_map_put_ = {
}
# Default method for this class is 'Item'
def __call__(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, '__call__', '{4E5B933C-C9BE-48ED-8842-1EE51BB1D4FF}')
return ret
def __unicode__(self, *args):
try:
return unicode(self.__call__(*args))
except pythoncom.com_error:
return repr(self)
def __str__(self, *args):
return str(self.__unicode__(*args))
def __int__(self, *args):
return int(self.__call__(*args))
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,2,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, '{4E5B933C-C9BE-48ED-8842-1EE51BB1D4FF}')
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(1, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class ISpeechMMSysAudio(DispatchBaseClass):
'ISpeechMMSysAudio Interface'
CLSID = IID('{3C76AF6D-1FD7-4831-81D1-3B71D5A13C44}')
coclass_clsid = IID('{A8C680EB-3D32-11D2-9EE7-00C04F797396}')
def Read(self, Buffer=pythoncom.Missing, NumberOfBytes=defaultNamedNotOptArg):
'Read'
return self._ApplyTypes_(2, 1, (3, 0), ((16396, 2), (3, 1)), u'Read', None,Buffer
, NumberOfBytes)
def Seek(self, Position=defaultNamedNotOptArg, Origin=0):
'Seek'
return self._ApplyTypes_(4, 1, (12, 0), ((12, 1), (3, 49)), u'Seek', None,Position
, Origin)
def SetState(self, State=defaultNamedNotOptArg):
'SetState'
return self._oleobj_.InvokeTypes(206, LCID, 1, (24, 0), ((3, 1),),State
)
def Write(self, Buffer=defaultNamedNotOptArg):
'Write'
return self._oleobj_.InvokeTypes(3, LCID, 1, (3, 0), ((12, 1),),Buffer
)
_prop_map_get_ = {
# Method 'BufferInfo' returns object of type 'ISpeechAudioBufferInfo'
"BufferInfo": (201, 2, (9, 0), (), "BufferInfo", '{11B103D8-1142-4EDF-A093-82FB3915F8CC}'),
"BufferNotifySize": (204, 2, (3, 0), (), "BufferNotifySize", None),
# Method 'DefaultFormat' returns object of type 'ISpeechAudioFormat'
"DefaultFormat": (202, 2, (9, 0), (), "DefaultFormat", '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}'),
"DeviceId": (300, 2, (3, 0), (), "DeviceId", None),
"EventHandle": (205, 2, (3, 0), (), "EventHandle", None),
# Method 'Format' returns object of type 'ISpeechAudioFormat'
"Format": (1, 2, (9, 0), (), "Format", '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}'),
"LineId": (301, 2, (3, 0), (), "LineId", None),
"MMHandle": (302, 2, (3, 0), (), "MMHandle", None),
# Method 'Status' returns object of type 'ISpeechAudioStatus'
"Status": (200, 2, (9, 0), (), "Status", '{C62D9C91-7458-47F6-862D-1EF86FB0B278}'),
"Volume": (203, 2, (3, 0), (), "Volume", None),
}
_prop_map_put_ = {
"BufferNotifySize": ((204, LCID, 4, 0),()),
"DeviceId": ((300, LCID, 4, 0),()),
"Format": ((1, LCID, 8, 0),()),
"LineId": ((301, LCID, 4, 0),()),
"Volume": ((203, LCID, 4, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechMemoryStream(DispatchBaseClass):
'ISpeechMemoryStream Interface'
CLSID = IID('{EEB14B68-808B-4ABE-A5EA-B51DA7588008}')
coclass_clsid = IID('{5FB7EF7D-DFF4-468A-B6B7-2FCBD188F994}')
def GetData(self):
'GetData'
return self._ApplyTypes_(101, 1, (12, 0), (), u'GetData', None,)
def Read(self, Buffer=pythoncom.Missing, NumberOfBytes=defaultNamedNotOptArg):
'Read'
return self._ApplyTypes_(2, 1, (3, 0), ((16396, 2), (3, 1)), u'Read', None,Buffer
, NumberOfBytes)
def Seek(self, Position=defaultNamedNotOptArg, Origin=0):
'Seek'
return self._ApplyTypes_(4, 1, (12, 0), ((12, 1), (3, 49)), u'Seek', None,Position
, Origin)
def SetData(self, Data=defaultNamedNotOptArg):
'SetData'
return self._oleobj_.InvokeTypes(100, LCID, 1, (24, 0), ((12, 1),),Data
)
def Write(self, Buffer=defaultNamedNotOptArg):
'Write'
return self._oleobj_.InvokeTypes(3, LCID, 1, (3, 0), ((12, 1),),Buffer
)
_prop_map_get_ = {
# Method 'Format' returns object of type 'ISpeechAudioFormat'
"Format": (1, 2, (9, 0), (), "Format", '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}'),
}
_prop_map_put_ = {
"Format": ((1, LCID, 8, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechObjectToken(DispatchBaseClass):
'ISpeechObjectToken Interface'
CLSID = IID('{C74A3ADC-B727-4500-A84A-B526721C8B8C}')
coclass_clsid = IID('{EF411752-3736-4CB4-9C8C-8EF4CCB58EFE}')
def CreateInstance(self, pUnkOuter=None, ClsContext=23):
'CreateInstance'
ret = self._oleobj_.InvokeTypes(7, LCID, 1, (13, 0), ((13, 49), (3, 49)),pUnkOuter
, ClsContext)
if ret is not None:
# See if this IUnknown is really an IDispatch
try:
ret = ret.QueryInterface(pythoncom.IID_IDispatch)
except pythoncom.error:
return ret
ret = Dispatch(ret, u'CreateInstance', None)
return ret
def DisplayUI(self, hWnd=defaultNamedNotOptArg, Title=defaultNamedNotOptArg, TypeOfUI=defaultNamedNotOptArg, ExtraData=u''
, Object=None):
'DisplayUI'
return self._ApplyTypes_(12, 1, (24, 32), ((3, 1), (8, 1), (8, 1), (16396, 49), (13, 49)), u'DisplayUI', None,hWnd
, Title, TypeOfUI, ExtraData, Object)
def GetAttribute(self, AttributeName=defaultNamedNotOptArg):
'GetAttribute'
# Result is a Unicode object
return self._oleobj_.InvokeTypes(6, LCID, 1, (8, 0), ((8, 1),),AttributeName
)
def GetDescription(self, Locale=0):
'GetDescription'
# Result is a Unicode object
return self._oleobj_.InvokeTypes(4, LCID, 1, (8, 0), ((3, 49),),Locale
)
def GetStorageFileName(self, ObjectStorageCLSID=defaultNamedNotOptArg, KeyName=defaultNamedNotOptArg, FileName=defaultNamedNotOptArg, Folder=defaultNamedNotOptArg):
'GetStorageFileName'
# Result is a Unicode object
return self._oleobj_.InvokeTypes(9, LCID, 1, (8, 0), ((8, 1), (8, 1), (8, 1), (3, 1)),ObjectStorageCLSID
, KeyName, FileName, Folder)
def IsUISupported(self, TypeOfUI=defaultNamedNotOptArg, ExtraData=u'', Object=None):
'IsUISupported'
return self._ApplyTypes_(11, 1, (11, 32), ((8, 1), (16396, 49), (13, 49)), u'IsUISupported', None,TypeOfUI
, ExtraData, Object)
def MatchesAttributes(self, Attributes=defaultNamedNotOptArg):
'MatchesAttributes'
return self._oleobj_.InvokeTypes(13, LCID, 1, (11, 0), ((8, 1),),Attributes
)
def Remove(self, ObjectStorageCLSID=defaultNamedNotOptArg):
'Remove'
return self._oleobj_.InvokeTypes(8, LCID, 1, (24, 0), ((8, 1),),ObjectStorageCLSID
)
def RemoveStorageFileName(self, ObjectStorageCLSID=defaultNamedNotOptArg, KeyName=defaultNamedNotOptArg, DeleteFile=defaultNamedNotOptArg):
'RemoveStorageFileName'
return self._oleobj_.InvokeTypes(10, LCID, 1, (24, 0), ((8, 1), (8, 1), (11, 1)),ObjectStorageCLSID
, KeyName, DeleteFile)
def SetId(self, Id=defaultNamedNotOptArg, CategoryID=u'', CreateIfNotExist=False):
'SetId'
return self._ApplyTypes_(5, 1, (24, 32), ((8, 1), (8, 49), (11, 49)), u'SetId', None,Id
, CategoryID, CreateIfNotExist)
_prop_map_get_ = {
# Method 'Category' returns object of type 'ISpeechObjectTokenCategory'
"Category": (3, 2, (9, 0), (), "Category", '{CA7EAC50-2D01-4145-86D4-5AE7D70F4469}'),
# Method 'DataKey' returns object of type 'ISpeechDataKey'
"DataKey": (2, 2, (9, 0), (), "DataKey", '{CE17C09B-4EFA-44D5-A4C9-59D9585AB0CD}'),
"Id": (1, 2, (8, 0), (), "Id", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechObjectTokenCategory(DispatchBaseClass):
'ISpeechObjectTokenCategory Interface'
CLSID = IID('{CA7EAC50-2D01-4145-86D4-5AE7D70F4469}')
coclass_clsid = IID('{A910187F-0C7A-45AC-92CC-59EDAFB77B53}')
# Result is of type ISpeechObjectTokens
def EnumerateTokens(self, RequiredAttributes=u'', OptionalAttributes=u''):
'EnumerateTokens'
return self._ApplyTypes_(5, 1, (9, 32), ((8, 49), (8, 49)), u'EnumerateTokens', '{9285B776-2E7B-4BC0-B53E-580EB6FA967F}',RequiredAttributes
, OptionalAttributes)
# Result is of type ISpeechDataKey
def GetDataKey(self, Location=0):
'GetDataKey'
ret = self._oleobj_.InvokeTypes(4, LCID, 1, (9, 0), ((3, 49),),Location
)
if ret is not None:
ret = Dispatch(ret, u'GetDataKey', '{CE17C09B-4EFA-44D5-A4C9-59D9585AB0CD}')
return ret
def SetId(self, Id=defaultNamedNotOptArg, CreateIfNotExist=False):
'SetId'
return self._oleobj_.InvokeTypes(3, LCID, 1, (24, 0), ((8, 1), (11, 49)),Id
, CreateIfNotExist)
_prop_map_get_ = {
"Default": (2, 2, (8, 0), (), "Default", None),
"Id": (1, 2, (8, 0), (), "Id", None),
}
_prop_map_put_ = {
"Default": ((2, LCID, 4, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechObjectTokens(DispatchBaseClass):
'ISpeechObjectTokens Interface'
CLSID = IID('{9285B776-2E7B-4BC0-B53E-580EB6FA967F}')
coclass_clsid = None
# Result is of type ISpeechObjectToken
def Item(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, u'Item', '{C74A3ADC-B727-4500-A84A-B526721C8B8C}')
return ret
_prop_map_get_ = {
"Count": (1, 2, (3, 0), (), "Count", None),
}
_prop_map_put_ = {
}
# Default method for this class is 'Item'
def __call__(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, '__call__', '{C74A3ADC-B727-4500-A84A-B526721C8B8C}')
return ret
def __unicode__(self, *args):
try:
return unicode(self.__call__(*args))
except pythoncom.com_error:
return repr(self)
def __str__(self, *args):
return str(self.__unicode__(*args))
def __int__(self, *args):
return int(self.__call__(*args))
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,2,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, '{C74A3ADC-B727-4500-A84A-B526721C8B8C}')
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(1, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class ISpeechPhoneConverter(DispatchBaseClass):
'ISpeechPhoneConverter Interface'
CLSID = IID('{C3E4F353-433F-43D6-89A1-6A62A7054C3D}')
coclass_clsid = IID('{9185F743-1143-4C28-86B5-BFF14F20E5C8}')
def IdToPhone(self, IdArray=defaultNamedNotOptArg):
'IdToPhone'
# Result is a Unicode object
return self._oleobj_.InvokeTypes(3, LCID, 1, (8, 0), ((12, 1),),IdArray
)
def PhoneToId(self, Phonemes=defaultNamedNotOptArg):
'PhoneToId'
return self._ApplyTypes_(2, 1, (12, 0), ((8, 1),), u'PhoneToId', None,Phonemes
)
_prop_map_get_ = {
"LanguageId": (1, 2, (3, 0), (), "LanguageId", None),
}
_prop_map_put_ = {
"LanguageId": ((1, LCID, 4, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechPhraseAlternate(DispatchBaseClass):
'ISpeechPhraseAlternate Interface'
CLSID = IID('{27864A2A-2B9F-4CB8-92D3-0D2722FD1E73}')
coclass_clsid = None
def Commit(self):
'Commit'
return self._oleobj_.InvokeTypes(5, LCID, 1, (24, 0), (),)
_prop_map_get_ = {
"NumberOfElementsInResult": (3, 2, (3, 0), (), "NumberOfElementsInResult", None),
# Method 'PhraseInfo' returns object of type 'ISpeechPhraseInfo'
"PhraseInfo": (4, 2, (9, 0), (), "PhraseInfo", '{961559CF-4E67-4662-8BF0-D93F1FCD61B3}'),
# Method 'RecoResult' returns object of type 'ISpeechRecoResult'
"RecoResult": (1, 2, (9, 0), (), "RecoResult", '{ED2879CF-CED9-4EE6-A534-DE0191D5468D}'),
"StartElementInResult": (2, 2, (3, 0), (), "StartElementInResult", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechPhraseAlternates(DispatchBaseClass):
'ISpeechPhraseAlternates Interface'
CLSID = IID('{B238B6D5-F276-4C3D-A6C1-2974801C3CC2}')
coclass_clsid = None
# Result is of type ISpeechPhraseAlternate
def Item(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, u'Item', '{27864A2A-2B9F-4CB8-92D3-0D2722FD1E73}')
return ret
_prop_map_get_ = {
"Count": (1, 2, (3, 0), (), "Count", None),
}
_prop_map_put_ = {
}
# Default method for this class is 'Item'
def __call__(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, '__call__', '{27864A2A-2B9F-4CB8-92D3-0D2722FD1E73}')
return ret
def __unicode__(self, *args):
try:
return unicode(self.__call__(*args))
except pythoncom.com_error:
return repr(self)
def __str__(self, *args):
return str(self.__unicode__(*args))
def __int__(self, *args):
return int(self.__call__(*args))
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,2,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, '{27864A2A-2B9F-4CB8-92D3-0D2722FD1E73}')
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(1, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class ISpeechPhraseElement(DispatchBaseClass):
'ISpeechPhraseElement Interface'
CLSID = IID('{E6176F96-E373-4801-B223-3B62C068C0B4}')
coclass_clsid = None
_prop_map_get_ = {
"ActualConfidence": (12, 2, (3, 0), (), "ActualConfidence", None),
"AudioSizeBytes": (4, 2, (3, 0), (), "AudioSizeBytes", None),
"AudioSizeTime": (2, 2, (3, 0), (), "AudioSizeTime", None),
"AudioStreamOffset": (3, 2, (3, 0), (), "AudioStreamOffset", None),
"AudioTimeOffset": (1, 2, (3, 0), (), "AudioTimeOffset", None),
"DisplayAttributes": (10, 2, (3, 0), (), "DisplayAttributes", None),
"DisplayText": (7, 2, (8, 0), (), "DisplayText", None),
"EngineConfidence": (13, 2, (4, 0), (), "EngineConfidence", None),
"LexicalForm": (8, 2, (8, 0), (), "LexicalForm", None),
"Pronunciation": (9, 2, (12, 0), (), "Pronunciation", None),
"RequiredConfidence": (11, 2, (3, 0), (), "RequiredConfidence", None),
"RetainedSizeBytes": (6, 2, (3, 0), (), "RetainedSizeBytes", None),
"RetainedStreamOffset": (5, 2, (3, 0), (), "RetainedStreamOffset", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechPhraseElements(DispatchBaseClass):
'ISpeechPhraseElements Interface'
CLSID = IID('{0626B328-3478-467D-A0B3-D0853B93DDA3}')
coclass_clsid = None
# Result is of type ISpeechPhraseElement
def Item(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, u'Item', '{E6176F96-E373-4801-B223-3B62C068C0B4}')
return ret
_prop_map_get_ = {
"Count": (1, 2, (3, 0), (), "Count", None),
}
_prop_map_put_ = {
}
# Default method for this class is 'Item'
def __call__(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, '__call__', '{E6176F96-E373-4801-B223-3B62C068C0B4}')
return ret
def __unicode__(self, *args):
try:
return unicode(self.__call__(*args))
except pythoncom.com_error:
return repr(self)
def __str__(self, *args):
return str(self.__unicode__(*args))
def __int__(self, *args):
return int(self.__call__(*args))
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,2,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, '{E6176F96-E373-4801-B223-3B62C068C0B4}')
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(1, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class ISpeechPhraseInfo(DispatchBaseClass):
'ISpeechPhraseInfo Interface'
CLSID = IID('{961559CF-4E67-4662-8BF0-D93F1FCD61B3}')
coclass_clsid = None
def GetDisplayAttributes(self, StartElement=0, Elements=-1, UseReplacements=True):
'DisplayAttributes'
return self._oleobj_.InvokeTypes(16, LCID, 1, (3, 0), ((3, 49), (3, 49), (11, 49)),StartElement
, Elements, UseReplacements)
def GetText(self, StartElement=0, Elements=-1, UseReplacements=True):
'GetText'
# Result is a Unicode object
return self._oleobj_.InvokeTypes(15, LCID, 1, (8, 0), ((3, 49), (3, 49), (11, 49)),StartElement
, Elements, UseReplacements)
def SaveToMemory(self):
'SaveToMemory'
return self._ApplyTypes_(14, 1, (12, 0), (), u'SaveToMemory', None,)
_prop_map_get_ = {
"AudioSizeBytes": (5, 2, (3, 0), (), "AudioSizeBytes", None),
"AudioSizeTime": (7, 2, (3, 0), (), "AudioSizeTime", None),
"AudioStreamPosition": (4, 2, (12, 0), (), "AudioStreamPosition", None),
# Method 'Elements' returns object of type 'ISpeechPhraseElements'
"Elements": (10, 2, (9, 0), (), "Elements", '{0626B328-3478-467D-A0B3-D0853B93DDA3}'),
"EngineId": (12, 2, (8, 0), (), "EngineId", None),
"EnginePrivateData": (13, 2, (12, 0), (), "EnginePrivateData", None),
"GrammarId": (2, 2, (12, 0), (), "GrammarId", None),
"LanguageId": (1, 2, (3, 0), (), "LanguageId", None),
# Method 'Properties' returns object of type 'ISpeechPhraseProperties'
"Properties": (9, 2, (9, 0), (), "Properties", '{08166B47-102E-4B23-A599-BDB98DBFD1F4}'),
# Method 'Replacements' returns object of type 'ISpeechPhraseReplacements'
"Replacements": (11, 2, (9, 0), (), "Replacements", '{38BC662F-2257-4525-959E-2069D2596C05}'),
"RetainedSizeBytes": (6, 2, (3, 0), (), "RetainedSizeBytes", None),
# Method 'Rule' returns object of type 'ISpeechPhraseRule'
"Rule": (8, 2, (9, 0), (), "Rule", '{A7BFE112-A4A0-48D9-B602-C313843F6964}'),
"StartTime": (3, 2, (12, 0), (), "StartTime", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechPhraseInfoBuilder(DispatchBaseClass):
'ISpeechPhraseInfoBuilder Interface'
CLSID = IID('{3B151836-DF3A-4E0A-846C-D2ADC9334333}')
coclass_clsid = IID('{C23FC28D-C55F-4720-8B32-91F73C2BD5D1}')
# Result is of type ISpeechPhraseInfo
def RestorePhraseFromMemory(self, PhraseInMemory=defaultNamedNotOptArg):
'RestorePhraseFromMemory'
ret = self._oleobj_.InvokeTypes(1, LCID, 1, (9, 0), ((16396, 1),),PhraseInMemory
)
if ret is not None:
ret = Dispatch(ret, u'RestorePhraseFromMemory', '{961559CF-4E67-4662-8BF0-D93F1FCD61B3}')
return ret
_prop_map_get_ = {
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechPhraseProperties(DispatchBaseClass):
'ISpeechPhraseProperties Interface'
CLSID = IID('{08166B47-102E-4B23-A599-BDB98DBFD1F4}')
coclass_clsid = None
# Result is of type ISpeechPhraseProperty
def Item(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, u'Item', '{CE563D48-961E-4732-A2E1-378A42B430BE}')
return ret
_prop_map_get_ = {
"Count": (1, 2, (3, 0), (), "Count", None),
}
_prop_map_put_ = {
}
# Default method for this class is 'Item'
def __call__(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, '__call__', '{CE563D48-961E-4732-A2E1-378A42B430BE}')
return ret
def __unicode__(self, *args):
try:
return unicode(self.__call__(*args))
except pythoncom.com_error:
return repr(self)
def __str__(self, *args):
return str(self.__unicode__(*args))
def __int__(self, *args):
return int(self.__call__(*args))
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,2,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, '{CE563D48-961E-4732-A2E1-378A42B430BE}')
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(1, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class ISpeechPhraseProperty(DispatchBaseClass):
'ISpeechPhraseProperty Interface'
CLSID = IID('{CE563D48-961E-4732-A2E1-378A42B430BE}')
coclass_clsid = None
_prop_map_get_ = {
# Method 'Children' returns object of type 'ISpeechPhraseProperties'
"Children": (9, 2, (9, 0), (), "Children", '{08166B47-102E-4B23-A599-BDB98DBFD1F4}'),
"Confidence": (7, 2, (3, 0), (), "Confidence", None),
"EngineConfidence": (6, 2, (4, 0), (), "EngineConfidence", None),
"FirstElement": (4, 2, (3, 0), (), "FirstElement", None),
"Id": (2, 2, (3, 0), (), "Id", None),
"Name": (1, 2, (8, 0), (), "Name", None),
"NumberOfElements": (5, 2, (3, 0), (), "NumberOfElements", None),
# Method 'Parent' returns object of type 'ISpeechPhraseProperty'
"Parent": (8, 2, (9, 0), (), "Parent", '{CE563D48-961E-4732-A2E1-378A42B430BE}'),
"Value": (3, 2, (12, 0), (), "Value", None),
}
_prop_map_put_ = {
}
# Default property for this class is 'Value'
def __call__(self):
return self._ApplyTypes_(*(3, 2, (12, 0), (), "Value", None))
def __unicode__(self, *args):
try:
return unicode(self.__call__(*args))
except pythoncom.com_error:
return repr(self)
def __str__(self, *args):
return str(self.__unicode__(*args))
def __int__(self, *args):
return int(self.__call__(*args))
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechPhraseReplacement(DispatchBaseClass):
'ISpeechPhraseReplacement Interface'
CLSID = IID('{2890A410-53A7-4FB5-94EC-06D4998E3D02}')
coclass_clsid = None
_prop_map_get_ = {
"DisplayAttributes": (1, 2, (3, 0), (), "DisplayAttributes", None),
"FirstElement": (3, 2, (3, 0), (), "FirstElement", None),
"NumberOfElements": (4, 2, (3, 0), (), "NumberOfElements", None),
"Text": (2, 2, (8, 0), (), "Text", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechPhraseReplacements(DispatchBaseClass):
'ISpeechPhraseReplacements Interface'
CLSID = IID('{38BC662F-2257-4525-959E-2069D2596C05}')
coclass_clsid = None
# Result is of type ISpeechPhraseReplacement
def Item(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, u'Item', '{2890A410-53A7-4FB5-94EC-06D4998E3D02}')
return ret
_prop_map_get_ = {
"Count": (1, 2, (3, 0), (), "Count", None),
}
_prop_map_put_ = {
}
# Default method for this class is 'Item'
def __call__(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, '__call__', '{2890A410-53A7-4FB5-94EC-06D4998E3D02}')
return ret
def __unicode__(self, *args):
try:
return unicode(self.__call__(*args))
except pythoncom.com_error:
return repr(self)
def __str__(self, *args):
return str(self.__unicode__(*args))
def __int__(self, *args):
return int(self.__call__(*args))
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,2,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, '{2890A410-53A7-4FB5-94EC-06D4998E3D02}')
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(1, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class ISpeechPhraseRule(DispatchBaseClass):
'ISpeechPhraseRule Interface'
CLSID = IID('{A7BFE112-A4A0-48D9-B602-C313843F6964}')
coclass_clsid = None
_prop_map_get_ = {
# Method 'Children' returns object of type 'ISpeechPhraseRules'
"Children": (6, 2, (9, 0), (), "Children", '{9047D593-01DD-4B72-81A3-E4A0CA69F407}'),
"Confidence": (7, 2, (3, 0), (), "Confidence", None),
"EngineConfidence": (8, 2, (4, 0), (), "EngineConfidence", None),
"FirstElement": (3, 2, (3, 0), (), "FirstElement", None),
"Id": (2, 2, (3, 0), (), "Id", None),
"Name": (1, 2, (8, 0), (), "Name", None),
"NumberOfElements": (4, 2, (3, 0), (), "NumberOfElements", None),
# Method 'Parent' returns object of type 'ISpeechPhraseRule'
"Parent": (5, 2, (9, 0), (), "Parent", '{A7BFE112-A4A0-48D9-B602-C313843F6964}'),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechPhraseRules(DispatchBaseClass):
'ISpeechPhraseRules Interface'
CLSID = IID('{9047D593-01DD-4B72-81A3-E4A0CA69F407}')
coclass_clsid = None
# Result is of type ISpeechPhraseRule
def Item(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, u'Item', '{A7BFE112-A4A0-48D9-B602-C313843F6964}')
return ret
_prop_map_get_ = {
"Count": (1, 2, (3, 0), (), "Count", None),
}
_prop_map_put_ = {
}
# Default method for this class is 'Item'
def __call__(self, Index=defaultNamedNotOptArg):
'Item'
ret = self._oleobj_.InvokeTypes(0, LCID, 1, (9, 0), ((3, 1),),Index
)
if ret is not None:
ret = Dispatch(ret, '__call__', '{A7BFE112-A4A0-48D9-B602-C313843F6964}')
return ret
def __unicode__(self, *args):
try:
return unicode(self.__call__(*args))
except pythoncom.com_error:
return repr(self)
def __str__(self, *args):
return str(self.__unicode__(*args))
def __int__(self, *args):
return int(self.__call__(*args))
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,2,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, '{A7BFE112-A4A0-48D9-B602-C313843F6964}')
#This class has Count() property - allow len(ob) to provide this
def __len__(self):
return self._ApplyTypes_(*(1, 2, (3, 0), (), "Count", None))
#This class has a __len__ - this is needed so 'if object:' always returns TRUE.
def __nonzero__(self):
return True
class ISpeechRecoContext(DispatchBaseClass):
'ISpeechRecoContext Interface'
CLSID = IID('{580AA49D-7E1E-4809-B8E2-57DA806104B8}')
coclass_clsid = IID('{73AD6842-ACE0-45E8-A4DD-8795881A2C2A}')
def Bookmark(self, Options=defaultNamedNotOptArg, StreamPos=defaultNamedNotOptArg, BookmarkId=defaultNamedNotOptArg):
'Bookmark'
return self._oleobj_.InvokeTypes(16, LCID, 1, (24, 0), ((3, 1), (12, 1), (12, 1)),Options
, StreamPos, BookmarkId)
# Result is of type ISpeechRecoGrammar
def CreateGrammar(self, GrammarId=0):
'CreateGrammar'
ret = self._oleobj_.InvokeTypes(14, LCID, 1, (9, 0), ((12, 49),),GrammarId
)
if ret is not None:
ret = Dispatch(ret, u'CreateGrammar', '{B6D6F79F-2158-4E50-B5BC-9A9CCD852A09}')
return ret
# Result is of type ISpeechRecoResult
def CreateResultFromMemory(self, ResultBlock=defaultNamedNotOptArg):
'CreateResultFromMemory'
ret = self._oleobj_.InvokeTypes(15, LCID, 1, (9, 0), ((16396, 1),),ResultBlock
)
if ret is not None:
ret = Dispatch(ret, u'CreateResultFromMemory', '{ED2879CF-CED9-4EE6-A534-DE0191D5468D}')
return ret
def Pause(self):
'Pause'
return self._oleobj_.InvokeTypes(12, LCID, 1, (24, 0), (),)
def Resume(self):
'Resume'
return self._oleobj_.InvokeTypes(13, LCID, 1, (24, 0), (),)
def SetAdaptationData(self, AdaptationString=defaultNamedNotOptArg):
'SetAdaptationData'
return self._oleobj_.InvokeTypes(17, LCID, 1, (24, 0), ((8, 1),),AdaptationString
)
_prop_map_get_ = {
"AllowVoiceFormatMatchingOnNextSet": (5, 2, (11, 0), (), "AllowVoiceFormatMatchingOnNextSet", None),
"AudioInputInterferenceStatus": (2, 2, (3, 0), (), "AudioInputInterferenceStatus", None),
"CmdMaxAlternates": (8, 2, (3, 0), (), "CmdMaxAlternates", None),
"EventInterests": (7, 2, (3, 0), (), "EventInterests", None),
# Method 'Recognizer' returns object of type 'ISpeechRecognizer'
"Recognizer": (1, 2, (9, 0), (), "Recognizer", '{2D5F1C0C-BD75-4B08-9478-3B11FEA2586C}'),
"RequestedUIType": (3, 2, (8, 0), (), "RequestedUIType", None),
"RetainedAudio": (10, 2, (3, 0), (), "RetainedAudio", None),
# Method 'RetainedAudioFormat' returns object of type 'ISpeechAudioFormat'
"RetainedAudioFormat": (11, 2, (9, 0), (), "RetainedAudioFormat", '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}'),
"State": (9, 2, (3, 0), (), "State", None),
# Method 'Voice' returns object of type 'ISpeechVoice'
"Voice": (4, 2, (9, 0), (), "Voice", '{269316D8-57BD-11D2-9EEE-00C04F797396}'),
"VoicePurgeEvent": (6, 2, (3, 0), (), "VoicePurgeEvent", None),
}
_prop_map_put_ = {
"AllowVoiceFormatMatchingOnNextSet": ((5, LCID, 4, 0),()),
"CmdMaxAlternates": ((8, LCID, 4, 0),()),
"EventInterests": ((7, LCID, 4, 0),()),
"RetainedAudio": ((10, LCID, 4, 0),()),
"RetainedAudioFormat": ((11, LCID, 8, 0),()),
"State": ((9, LCID, 4, 0),()),
"Voice": ((4, LCID, 8, 0),()),
"VoicePurgeEvent": ((6, LCID, 4, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechRecoGrammar(DispatchBaseClass):
'ISpeechRecoGrammar Interface'
CLSID = IID('{B6D6F79F-2158-4E50-B5BC-9A9CCD852A09}')
coclass_clsid = None
def CmdLoadFromFile(self, FileName=defaultNamedNotOptArg, LoadOption=0):
'CmdLoadFromFile'
return self._oleobj_.InvokeTypes(7, LCID, 1, (24, 0), ((8, 1), (3, 49)),FileName
, LoadOption)
def CmdLoadFromMemory(self, GrammarData=defaultNamedNotOptArg, LoadOption=0):
'CmdLoadFromMemory'
return self._oleobj_.InvokeTypes(10, LCID, 1, (24, 0), ((12, 1), (3, 49)),GrammarData
, LoadOption)
def CmdLoadFromObject(self, ClassId=defaultNamedNotOptArg, GrammarName=defaultNamedNotOptArg, LoadOption=0):
'CmdLoadFromObject'
return self._oleobj_.InvokeTypes(8, LCID, 1, (24, 0), ((8, 1), (8, 1), (3, 49)),ClassId
, GrammarName, LoadOption)
def CmdLoadFromProprietaryGrammar(self, ProprietaryGuid=defaultNamedNotOptArg, ProprietaryString=defaultNamedNotOptArg, ProprietaryData=defaultNamedNotOptArg, LoadOption=0):
'CmdLoadFromProprietaryGrammar'
return self._oleobj_.InvokeTypes(11, LCID, 1, (24, 0), ((8, 1), (8, 1), (12, 1), (3, 49)),ProprietaryGuid
, ProprietaryString, ProprietaryData, LoadOption)
def CmdLoadFromResource(self, hModule=defaultNamedNotOptArg, ResourceName=defaultNamedNotOptArg, ResourceType=defaultNamedNotOptArg, LanguageId=defaultNamedNotOptArg
, LoadOption=0):
'CmdLoadFromResource'
return self._oleobj_.InvokeTypes(9, LCID, 1, (24, 0), ((3, 1), (12, 1), (12, 1), (3, 1), (3, 49)),hModule
, ResourceName, ResourceType, LanguageId, LoadOption)
def CmdSetRuleIdState(self, RuleId=defaultNamedNotOptArg, State=defaultNamedNotOptArg):
'CmdSetRuleIdState'
return self._oleobj_.InvokeTypes(13, LCID, 1, (24, 0), ((3, 1), (3, 1)),RuleId
, State)
def CmdSetRuleState(self, Name=defaultNamedNotOptArg, State=defaultNamedNotOptArg):
'CmdSetRuleState'
return self._oleobj_.InvokeTypes(12, LCID, 1, (24, 0), ((8, 1), (3, 1)),Name
, State)
def DictationLoad(self, TopicName=u'', LoadOption=0):
'DictationLoad'
return self._ApplyTypes_(14, 1, (24, 32), ((8, 49), (3, 49)), u'DictationLoad', None,TopicName
, LoadOption)
def DictationSetState(self, State=defaultNamedNotOptArg):
'DictationSetState'
return self._oleobj_.InvokeTypes(16, LCID, 1, (24, 0), ((3, 1),),State
)
def DictationUnload(self):
'DictationUnload'
return self._oleobj_.InvokeTypes(15, LCID, 1, (24, 0), (),)
def IsPronounceable(self, Word=defaultNamedNotOptArg):
'IsPronounceable'
return self._oleobj_.InvokeTypes(19, LCID, 1, (3, 0), ((8, 1),),Word
)
def Reset(self, NewLanguage=0):
'Reset'
return self._oleobj_.InvokeTypes(5, LCID, 1, (24, 0), ((3, 49),),NewLanguage
)
def SetTextSelection(self, Info=defaultNamedNotOptArg):
'SetTextSelection'
return self._oleobj_.InvokeTypes(18, LCID, 1, (24, 0), ((9, 1),),Info
)
def SetWordSequenceData(self, Text=defaultNamedNotOptArg, TextLength=defaultNamedNotOptArg, Info=defaultNamedNotOptArg):
'SetWordSequenceData'
return self._oleobj_.InvokeTypes(17, LCID, 1, (24, 0), ((8, 1), (3, 1), (9, 1)),Text
, TextLength, Info)
_prop_map_get_ = {
"Id": (1, 2, (12, 0), (), "Id", None),
# Method 'RecoContext' returns object of type 'ISpeechRecoContext'
"RecoContext": (2, 2, (9, 0), (), "RecoContext", '{580AA49D-7E1E-4809-B8E2-57DA806104B8}'),
# Method 'Rules' returns object of type 'ISpeechGrammarRules'
"Rules": (4, 2, (9, 0), (), "Rules", '{6FFA3B44-FC2D-40D1-8AFC-32911C7F1AD1}'),
"State": (3, 2, (3, 0), (), "State", None),
}
_prop_map_put_ = {
"State": ((3, LCID, 4, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechRecoResult(DispatchBaseClass):
'ISpeechRecoResult Interface'
CLSID = IID('{ED2879CF-CED9-4EE6-A534-DE0191D5468D}')
coclass_clsid = None
# Result is of type ISpeechPhraseAlternates
def Alternates(self, RequestCount=defaultNamedNotOptArg, StartElement=0, Elements=-1):
'Alternates'
ret = self._oleobj_.InvokeTypes(5, LCID, 1, (9, 0), ((3, 1), (3, 49), (3, 49)),RequestCount
, StartElement, Elements)
if ret is not None:
ret = Dispatch(ret, u'Alternates', '{B238B6D5-F276-4C3D-A6C1-2974801C3CC2}')
return ret
# Result is of type ISpeechMemoryStream
def Audio(self, StartElement=0, Elements=-1):
'Audio'
ret = self._oleobj_.InvokeTypes(6, LCID, 1, (9, 0), ((3, 49), (3, 49)),StartElement
, Elements)
if ret is not None:
ret = Dispatch(ret, u'Audio', '{EEB14B68-808B-4ABE-A5EA-B51DA7588008}')
return ret
def DiscardResultInfo(self, ValueTypes=defaultNamedNotOptArg):
'DiscardResultInfo'
return self._oleobj_.InvokeTypes(9, LCID, 1, (24, 0), ((3, 1),),ValueTypes
)
def SaveToMemory(self):
'SaveToMemory'
return self._ApplyTypes_(8, 1, (12, 0), (), u'SaveToMemory', None,)
def SpeakAudio(self, StartElement=0, Elements=-1, Flags=0):
'SpeakAudio'
return self._oleobj_.InvokeTypes(7, LCID, 1, (3, 0), ((3, 49), (3, 49), (3, 49)),StartElement
, Elements, Flags)
_prop_map_get_ = {
# Method 'AudioFormat' returns object of type 'ISpeechAudioFormat'
"AudioFormat": (3, 2, (9, 0), (), "AudioFormat", '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}'),
# Method 'PhraseInfo' returns object of type 'ISpeechPhraseInfo'
"PhraseInfo": (4, 2, (9, 0), (), "PhraseInfo", '{961559CF-4E67-4662-8BF0-D93F1FCD61B3}'),
# Method 'RecoContext' returns object of type 'ISpeechRecoContext'
"RecoContext": (1, 2, (9, 0), (), "RecoContext", '{580AA49D-7E1E-4809-B8E2-57DA806104B8}'),
# Method 'Times' returns object of type 'ISpeechRecoResultTimes'
"Times": (2, 2, (9, 0), (), "Times", '{62B3B8FB-F6E7-41BE-BDCB-056B1C29EFC0}'),
}
_prop_map_put_ = {
"AudioFormat": ((3, LCID, 8, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechRecoResult2(DispatchBaseClass):
'ISpeechRecoResult2 Interface'
CLSID = IID('{8E0A246D-D3C8-45DE-8657-04290C458C3C}')
coclass_clsid = None
# Result is of type ISpeechPhraseAlternates
def Alternates(self, RequestCount=defaultNamedNotOptArg, StartElement=0, Elements=-1):
'Alternates'
ret = self._oleobj_.InvokeTypes(5, LCID, 1, (9, 0), ((3, 1), (3, 49), (3, 49)),RequestCount
, StartElement, Elements)
if ret is not None:
ret = Dispatch(ret, u'Alternates', '{B238B6D5-F276-4C3D-A6C1-2974801C3CC2}')
return ret
# Result is of type ISpeechMemoryStream
def Audio(self, StartElement=0, Elements=-1):
'Audio'
ret = self._oleobj_.InvokeTypes(6, LCID, 1, (9, 0), ((3, 49), (3, 49)),StartElement
, Elements)
if ret is not None:
ret = Dispatch(ret, u'Audio', '{EEB14B68-808B-4ABE-A5EA-B51DA7588008}')
return ret
def DiscardResultInfo(self, ValueTypes=defaultNamedNotOptArg):
'DiscardResultInfo'
return self._oleobj_.InvokeTypes(9, LCID, 1, (24, 0), ((3, 1),),ValueTypes
)
def SaveToMemory(self):
'SaveToMemory'
return self._ApplyTypes_(8, 1, (12, 0), (), u'SaveToMemory', None,)
def SetTextFeedback(self, Feedback=defaultNamedNotOptArg, WasSuccessful=defaultNamedNotOptArg):
'DiscardResultInfo'
return self._oleobj_.InvokeTypes(12, LCID, 1, (24, 0), ((8, 1), (11, 1)),Feedback
, WasSuccessful)
def SpeakAudio(self, StartElement=0, Elements=-1, Flags=0):
'SpeakAudio'
return self._oleobj_.InvokeTypes(7, LCID, 1, (3, 0), ((3, 49), (3, 49), (3, 49)),StartElement
, Elements, Flags)
_prop_map_get_ = {
# Method 'AudioFormat' returns object of type 'ISpeechAudioFormat'
"AudioFormat": (3, 2, (9, 0), (), "AudioFormat", '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}'),
# Method 'PhraseInfo' returns object of type 'ISpeechPhraseInfo'
"PhraseInfo": (4, 2, (9, 0), (), "PhraseInfo", '{961559CF-4E67-4662-8BF0-D93F1FCD61B3}'),
# Method 'RecoContext' returns object of type 'ISpeechRecoContext'
"RecoContext": (1, 2, (9, 0), (), "RecoContext", '{580AA49D-7E1E-4809-B8E2-57DA806104B8}'),
# Method 'Times' returns object of type 'ISpeechRecoResultTimes'
"Times": (2, 2, (9, 0), (), "Times", '{62B3B8FB-F6E7-41BE-BDCB-056B1C29EFC0}'),
}
_prop_map_put_ = {
"AudioFormat": ((3, LCID, 8, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechRecoResultDispatch(DispatchBaseClass):
'ISpeechRecoResultDispatch Interface'
CLSID = IID('{6D60EB64-ACED-40A6-BBF3-4E557F71DEE2}')
coclass_clsid = None
# Result is of type ISpeechPhraseAlternates
def Alternates(self, RequestCount=defaultNamedNotOptArg, StartElement=0, Elements=-1):
'Alternates'
ret = self._oleobj_.InvokeTypes(5, LCID, 1, (9, 0), ((3, 1), (3, 49), (3, 49)),RequestCount
, StartElement, Elements)
if ret is not None:
ret = Dispatch(ret, u'Alternates', '{B238B6D5-F276-4C3D-A6C1-2974801C3CC2}')
return ret
# Result is of type ISpeechMemoryStream
def Audio(self, StartElement=0, Elements=-1):
'Audio'
ret = self._oleobj_.InvokeTypes(6, LCID, 1, (9, 0), ((3, 49), (3, 49)),StartElement
, Elements)
if ret is not None:
ret = Dispatch(ret, u'Audio', '{EEB14B68-808B-4ABE-A5EA-B51DA7588008}')
return ret
def DiscardResultInfo(self, ValueTypes=defaultNamedNotOptArg):
'DiscardResultInfo'
return self._oleobj_.InvokeTypes(9, LCID, 1, (24, 0), ((3, 1),),ValueTypes
)
def GetXMLErrorInfo(self, LineNumber=pythoncom.Missing, ScriptLine=pythoncom.Missing, Source=pythoncom.Missing, Description=pythoncom.Missing
, ResultCode=pythoncom.Missing):
'GetXMLErrorInfo'
return self._ApplyTypes_(11, 1, (11, 0), ((16387, 2), (16392, 2), (16392, 2), (16392, 2), (16387, 2)), u'GetXMLErrorInfo', None,LineNumber
, ScriptLine, Source, Description, ResultCode)
def GetXMLResult(self, Options=defaultNamedNotOptArg):
'GetXMLResult'
# Result is a Unicode object
return self._oleobj_.InvokeTypes(10, LCID, 1, (8, 0), ((3, 1),),Options
)
def SaveToMemory(self):
'SaveToMemory'
return self._ApplyTypes_(8, 1, (12, 0), (), u'SaveToMemory', None,)
def SetTextFeedback(self, Feedback=defaultNamedNotOptArg, WasSuccessful=defaultNamedNotOptArg):
'SetTextFeedback'
return self._oleobj_.InvokeTypes(12, LCID, 1, (24, 0), ((8, 1), (11, 1)),Feedback
, WasSuccessful)
def SpeakAudio(self, StartElement=0, Elements=-1, Flags=0):
'SpeakAudio'
return self._oleobj_.InvokeTypes(7, LCID, 1, (3, 0), ((3, 49), (3, 49), (3, 49)),StartElement
, Elements, Flags)
_prop_map_get_ = {
# Method 'AudioFormat' returns object of type 'ISpeechAudioFormat'
"AudioFormat": (3, 2, (9, 0), (), "AudioFormat", '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}'),
# Method 'PhraseInfo' returns object of type 'ISpeechPhraseInfo'
"PhraseInfo": (4, 2, (9, 0), (), "PhraseInfo", '{961559CF-4E67-4662-8BF0-D93F1FCD61B3}'),
# Method 'RecoContext' returns object of type 'ISpeechRecoContext'
"RecoContext": (1, 2, (9, 0), (), "RecoContext", '{580AA49D-7E1E-4809-B8E2-57DA806104B8}'),
# Method 'Times' returns object of type 'ISpeechRecoResultTimes'
"Times": (2, 2, (9, 0), (), "Times", '{62B3B8FB-F6E7-41BE-BDCB-056B1C29EFC0}'),
}
_prop_map_put_ = {
"AudioFormat": ((3, LCID, 8, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechRecoResultTimes(DispatchBaseClass):
'ISpeechRecoResultTimes Interface'
CLSID = IID('{62B3B8FB-F6E7-41BE-BDCB-056B1C29EFC0}')
coclass_clsid = None
_prop_map_get_ = {
"Length": (2, 2, (12, 0), (), "Length", None),
"OffsetFromStart": (4, 2, (12, 0), (), "OffsetFromStart", None),
"StreamTime": (1, 2, (12, 0), (), "StreamTime", None),
"TickCount": (3, 2, (3, 0), (), "TickCount", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechRecognizer(DispatchBaseClass):
'ISpeechRecognizer Interface'
CLSID = IID('{2D5F1C0C-BD75-4B08-9478-3B11FEA2586C}')
coclass_clsid = IID('{3BEE4890-4FE9-4A37-8C1E-5E7E12791C1F}')
# Result is of type ISpeechRecoContext
def CreateRecoContext(self):
'CreateRecoContext'
ret = self._oleobj_.InvokeTypes(10, LCID, 1, (9, 0), (),)
if ret is not None:
ret = Dispatch(ret, u'CreateRecoContext', '{580AA49D-7E1E-4809-B8E2-57DA806104B8}')
return ret
def DisplayUI(self, hWndParent=defaultNamedNotOptArg, Title=defaultNamedNotOptArg, TypeOfUI=defaultNamedNotOptArg, ExtraData=u''):
'DisplayUI'
return self._ApplyTypes_(17, 1, (24, 32), ((3, 1), (8, 1), (8, 1), (16396, 49)), u'DisplayUI', None,hWndParent
, Title, TypeOfUI, ExtraData)
def EmulateRecognition(self, TextElements=defaultNamedNotOptArg, ElementDisplayAttributes=u'', LanguageId=0):
'EmulateRecognition'
return self._ApplyTypes_(9, 1, (24, 32), ((12, 1), (16396, 49), (3, 49)), u'EmulateRecognition', None,TextElements
, ElementDisplayAttributes, LanguageId)
# Result is of type ISpeechObjectTokens
def GetAudioInputs(self, RequiredAttributes=u'', OptionalAttributes=u''):
'GetAudioInputs'
return self._ApplyTypes_(19, 1, (9, 32), ((8, 49), (8, 49)), u'GetAudioInputs', '{9285B776-2E7B-4BC0-B53E-580EB6FA967F}',RequiredAttributes
, OptionalAttributes)
# Result is of type ISpeechAudioFormat
def GetFormat(self, Type=defaultNamedNotOptArg):
'GetFormat'
ret = self._oleobj_.InvokeTypes(11, LCID, 1, (9, 0), ((3, 1),),Type
)
if ret is not None:
ret = Dispatch(ret, u'GetFormat', '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}')
return ret
# Result is of type ISpeechObjectTokens
def GetProfiles(self, RequiredAttributes=u'', OptionalAttributes=u''):
'GetProfiles'
return self._ApplyTypes_(20, 1, (9, 32), ((8, 49), (8, 49)), u'GetProfiles', '{9285B776-2E7B-4BC0-B53E-580EB6FA967F}',RequiredAttributes
, OptionalAttributes)
def GetPropertyNumber(self, Name=defaultNamedNotOptArg, Value=defaultNamedNotOptArg):
'GetPropertyNumber'
return self._ApplyTypes_(13, 1, (11, 0), ((8, 1), (16387, 3)), u'GetPropertyNumber', None,Name
, Value)
def GetPropertyString(self, Name=defaultNamedNotOptArg, Value=defaultNamedNotOptArg):
'GetPropertyString'
return self._ApplyTypes_(15, 1, (11, 0), ((8, 1), (16392, 3)), u'GetPropertyString', None,Name
, Value)
# Result is of type ISpeechObjectTokens
def GetRecognizers(self, RequiredAttributes=u'', OptionalAttributes=u''):
'GetRecognizers'
return self._ApplyTypes_(18, 1, (9, 32), ((8, 49), (8, 49)), u'GetRecognizers', '{9285B776-2E7B-4BC0-B53E-580EB6FA967F}',RequiredAttributes
, OptionalAttributes)
def IsUISupported(self, TypeOfUI=defaultNamedNotOptArg, ExtraData=u''):
'IsUISupported'
return self._ApplyTypes_(16, 1, (11, 32), ((8, 1), (16396, 49)), u'IsUISupported', None,TypeOfUI
, ExtraData)
def SetPropertyNumber(self, Name=defaultNamedNotOptArg, Value=defaultNamedNotOptArg):
'SetPropertyNumber'
return self._oleobj_.InvokeTypes(12, LCID, 1, (11, 0), ((8, 1), (3, 1)),Name
, Value)
def SetPropertyString(self, Name=defaultNamedNotOptArg, Value=defaultNamedNotOptArg):
'SetPropertyString'
return self._oleobj_.InvokeTypes(14, LCID, 1, (11, 0), ((8, 1), (8, 1)),Name
, Value)
_prop_map_get_ = {
"AllowAudioInputFormatChangesOnNextSet": (2, 2, (11, 0), (), "AllowAudioInputFormatChangesOnNextSet", None),
# Method 'AudioInput' returns object of type 'ISpeechObjectToken'
"AudioInput": (3, 2, (9, 0), (), "AudioInput", '{C74A3ADC-B727-4500-A84A-B526721C8B8C}'),
# Method 'AudioInputStream' returns object of type 'ISpeechBaseStream'
"AudioInputStream": (4, 2, (9, 0), (), "AudioInputStream", '{6450336F-7D49-4CED-8097-49D6DEE37294}'),
"IsShared": (5, 2, (11, 0), (), "IsShared", None),
# Method 'Profile' returns object of type 'ISpeechObjectToken'
"Profile": (8, 2, (9, 0), (), "Profile", '{C74A3ADC-B727-4500-A84A-B526721C8B8C}'),
# Method 'Recognizer' returns object of type 'ISpeechObjectToken'
"Recognizer": (1, 2, (9, 0), (), "Recognizer", '{C74A3ADC-B727-4500-A84A-B526721C8B8C}'),
"State": (6, 2, (3, 0), (), "State", None),
# Method 'Status' returns object of type 'ISpeechRecognizerStatus'
"Status": (7, 2, (9, 0), (), "Status", '{BFF9E781-53EC-484E-BB8A-0E1B5551E35C}'),
}
_prop_map_put_ = {
"AllowAudioInputFormatChangesOnNextSet": ((2, LCID, 4, 0),()),
"AudioInput": ((3, LCID, 8, 0),()),
"AudioInputStream": ((4, LCID, 8, 0),()),
"Profile": ((8, LCID, 8, 0),()),
"Recognizer": ((1, LCID, 8, 0),()),
"State": ((6, LCID, 4, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechRecognizerStatus(DispatchBaseClass):
'ISpeechRecognizerStatus Interface'
CLSID = IID('{BFF9E781-53EC-484E-BB8A-0E1B5551E35C}')
coclass_clsid = None
_prop_map_get_ = {
# Method 'AudioStatus' returns object of type 'ISpeechAudioStatus'
"AudioStatus": (1, 2, (9, 0), (), "AudioStatus", '{C62D9C91-7458-47F6-862D-1EF86FB0B278}'),
"ClsidEngine": (5, 2, (8, 0), (), "ClsidEngine", None),
"CurrentStreamNumber": (3, 2, (3, 0), (), "CurrentStreamNumber", None),
"CurrentStreamPosition": (2, 2, (12, 0), (), "CurrentStreamPosition", None),
"NumberOfActiveRules": (4, 2, (3, 0), (), "NumberOfActiveRules", None),
"SupportedLanguages": (6, 2, (12, 0), (), "SupportedLanguages", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechResourceLoader(DispatchBaseClass):
'ISpeechResourceLoader Interface'
CLSID = IID('{B9AC5783-FCD0-4B21-B119-B4F8DA8FD2C3}')
coclass_clsid = None
def GetLocalCopy(self, bstrResourceUri=defaultNamedNotOptArg, pbstrLocalPath=pythoncom.Missing, pbstrMIMEType=pythoncom.Missing, pbstrRedirectUrl=pythoncom.Missing):
return self._ApplyTypes_(2, 1, (24, 0), ((8, 1), (16392, 2), (16392, 2), (16392, 2)), u'GetLocalCopy', None,bstrResourceUri
, pbstrLocalPath, pbstrMIMEType, pbstrRedirectUrl)
def LoadResource(self, bstrResourceUri=defaultNamedNotOptArg, fAlwaysReload=defaultNamedNotOptArg, pStream=pythoncom.Missing, pbstrMIMEType=pythoncom.Missing
, pfModified=pythoncom.Missing, pbstrRedirectUrl=pythoncom.Missing):
return self._ApplyTypes_(1, 1, (24, 0), ((8, 1), (11, 1), (16397, 2), (16392, 2), (16395, 2), (16392, 2)), u'LoadResource', None,bstrResourceUri
, fAlwaysReload, pStream, pbstrMIMEType, pfModified, pbstrRedirectUrl
)
def ReleaseLocalCopy(self, pbstrLocalPath=defaultNamedNotOptArg):
return self._oleobj_.InvokeTypes(3, LCID, 1, (24, 0), ((8, 1),),pbstrLocalPath
)
_prop_map_get_ = {
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechTextSelectionInformation(DispatchBaseClass):
'ISpeechTextSelectionInformation Interface'
CLSID = IID('{3B9C7E7A-6EEE-4DED-9092-11657279ADBE}')
coclass_clsid = IID('{0F92030A-CBFD-4AB8-A164-FF5985547FF6}')
_prop_map_get_ = {
"ActiveLength": (2, 2, (3, 0), (), "ActiveLength", None),
"ActiveOffset": (1, 2, (3, 0), (), "ActiveOffset", None),
"SelectionLength": (4, 2, (3, 0), (), "SelectionLength", None),
"SelectionOffset": (3, 2, (3, 0), (), "SelectionOffset", None),
}
_prop_map_put_ = {
"ActiveLength": ((2, LCID, 4, 0),()),
"ActiveOffset": ((1, LCID, 4, 0),()),
"SelectionLength": ((4, LCID, 4, 0),()),
"SelectionOffset": ((3, LCID, 4, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechVoice(DispatchBaseClass):
'ISpeechVoice Interface'
CLSID = IID('{269316D8-57BD-11D2-9EEE-00C04F797396}')
coclass_clsid = IID('{96749377-3391-11D2-9EE3-00C04F797396}')
def DisplayUI(self, hWndParent=defaultNamedNotOptArg, Title=defaultNamedNotOptArg, TypeOfUI=defaultNamedNotOptArg, ExtraData=u''):
'DisplayUI'
return self._ApplyTypes_(22, 1, (24, 32), ((3, 1), (8, 1), (8, 1), (16396, 49)), u'DisplayUI', None,hWndParent
, Title, TypeOfUI, ExtraData)
# Result is of type ISpeechObjectTokens
def GetAudioOutputs(self, RequiredAttributes=u'', OptionalAttributes=u''):
'GetAudioOutputs'
return self._ApplyTypes_(18, 1, (9, 32), ((8, 49), (8, 49)), u'GetAudioOutputs', '{9285B776-2E7B-4BC0-B53E-580EB6FA967F}',RequiredAttributes
, OptionalAttributes)
# Result is of type ISpeechObjectTokens
def GetVoices(self, RequiredAttributes=u'', OptionalAttributes=u''):
'GetVoices'
return self._ApplyTypes_(17, 1, (9, 32), ((8, 49), (8, 49)), u'GetVoices', '{9285B776-2E7B-4BC0-B53E-580EB6FA967F}',RequiredAttributes
, OptionalAttributes)
def IsUISupported(self, TypeOfUI=defaultNamedNotOptArg, ExtraData=u''):
'IsUISupported'
return self._ApplyTypes_(21, 1, (11, 32), ((8, 1), (16396, 49)), u'IsUISupported', None,TypeOfUI
, ExtraData)
def Pause(self):
'Pauses the voices rendering.'
return self._oleobj_.InvokeTypes(14, LCID, 1, (24, 0), (),)
def Resume(self):
'Resumes the voices rendering.'
return self._oleobj_.InvokeTypes(15, LCID, 1, (24, 0), (),)
def Skip(self, Type=defaultNamedNotOptArg, NumItems=defaultNamedNotOptArg):
'Skips rendering the specified number of items.'
return self._oleobj_.InvokeTypes(16, LCID, 1, (3, 0), ((8, 1), (3, 1)),Type
, NumItems)
def Speak(self, Text=defaultNamedNotOptArg, Flags=0):
'Speak'
return self._oleobj_.InvokeTypes(12, LCID, 1, (3, 0), ((8, 1), (3, 49)),Text
, Flags)
def SpeakCompleteEvent(self):
'SpeakCompleteEvent'
return self._oleobj_.InvokeTypes(20, LCID, 1, (3, 0), (),)
def SpeakStream(self, Stream=defaultNamedNotOptArg, Flags=0):
'SpeakStream'
return self._oleobj_.InvokeTypes(13, LCID, 1, (3, 0), ((9, 1), (3, 49)),Stream
, Flags)
def WaitUntilDone(self, msTimeout=defaultNamedNotOptArg):
'WaitUntilDone'
return self._oleobj_.InvokeTypes(19, LCID, 1, (11, 0), ((3, 1),),msTimeout
)
_prop_map_get_ = {
"AlertBoundary": (10, 2, (3, 0), (), "AlertBoundary", None),
"AllowAudioOutputFormatChangesOnNextSet": (7, 2, (11, 0), (), "AllowAudioOutputFormatChangesOnNextSet", None),
# Method 'AudioOutput' returns object of type 'ISpeechObjectToken'
"AudioOutput": (3, 2, (9, 0), (), "AudioOutput", '{C74A3ADC-B727-4500-A84A-B526721C8B8C}'),
# Method 'AudioOutputStream' returns object of type 'ISpeechBaseStream'
"AudioOutputStream": (4, 2, (9, 0), (), "AudioOutputStream", '{6450336F-7D49-4CED-8097-49D6DEE37294}'),
"EventInterests": (8, 2, (3, 0), (), "EventInterests", None),
"Priority": (9, 2, (3, 0), (), "Priority", None),
"Rate": (5, 2, (3, 0), (), "Rate", None),
# Method 'Status' returns object of type 'ISpeechVoiceStatus'
"Status": (1, 2, (9, 0), (), "Status", '{8BE47B07-57F6-11D2-9EEE-00C04F797396}'),
"SynchronousSpeakTimeout": (11, 2, (3, 0), (), "SynchronousSpeakTimeout", None),
# Method 'Voice' returns object of type 'ISpeechObjectToken'
"Voice": (2, 2, (9, 0), (), "Voice", '{C74A3ADC-B727-4500-A84A-B526721C8B8C}'),
"Volume": (6, 2, (3, 0), (), "Volume", None),
}
_prop_map_put_ = {
"AlertBoundary": ((10, LCID, 4, 0),()),
"AllowAudioOutputFormatChangesOnNextSet": ((7, LCID, 4, 0),()),
"AudioOutput": ((3, LCID, 8, 0),()),
"AudioOutputStream": ((4, LCID, 8, 0),()),
"EventInterests": ((8, LCID, 4, 0),()),
"Priority": ((9, LCID, 4, 0),()),
"Rate": ((5, LCID, 4, 0),()),
"SynchronousSpeakTimeout": ((11, LCID, 4, 0),()),
"Voice": ((2, LCID, 8, 0),()),
"Volume": ((6, LCID, 4, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechVoiceStatus(DispatchBaseClass):
'ISpeechVoiceStatus Interface'
CLSID = IID('{8BE47B07-57F6-11D2-9EEE-00C04F797396}')
coclass_clsid = None
_prop_map_get_ = {
"CurrentStreamNumber": (1, 2, (3, 0), (), "CurrentStreamNumber", None),
"InputSentenceLength": (8, 2, (3, 0), (), "InputSentenceLength", None),
"InputSentencePosition": (7, 2, (3, 0), (), "InputSentencePosition", None),
"InputWordLength": (6, 2, (3, 0), (), "InputWordLength", None),
"InputWordPosition": (5, 2, (3, 0), (), "InputWordPosition", None),
"LastBookmark": (9, 2, (8, 0), (), "LastBookmark", None),
"LastBookmarkId": (10, 2, (3, 0), (), "LastBookmarkId", None),
"LastHResult": (3, 2, (3, 0), (), "LastHResult", None),
"LastStreamNumberQueued": (2, 2, (3, 0), (), "LastStreamNumberQueued", None),
"PhonemeId": (11, 2, (2, 0), (), "PhonemeId", None),
"RunningState": (4, 2, (3, 0), (), "RunningState", None),
"VisemeId": (12, 2, (2, 0), (), "VisemeId", None),
}
_prop_map_put_ = {
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechWaveFormatEx(DispatchBaseClass):
'ISpeechWaveFormatEx Interface'
CLSID = IID('{7A1EF0D5-1581-4741-88E4-209A49F11A10}')
coclass_clsid = IID('{C79A574C-63BE-44B9-801F-283F87F898BE}')
_prop_map_get_ = {
"AvgBytesPerSec": (4, 2, (3, 0), (), "AvgBytesPerSec", None),
"BitsPerSample": (6, 2, (2, 0), (), "BitsPerSample", None),
"BlockAlign": (5, 2, (2, 0), (), "BlockAlign", None),
"Channels": (2, 2, (2, 0), (), "Channels", None),
"ExtraData": (7, 2, (12, 0), (), "ExtraData", None),
"FormatTag": (1, 2, (2, 0), (), "FormatTag", None),
"SamplesPerSec": (3, 2, (3, 0), (), "SamplesPerSec", None),
}
_prop_map_put_ = {
"AvgBytesPerSec": ((4, LCID, 4, 0),()),
"BitsPerSample": ((6, LCID, 4, 0),()),
"BlockAlign": ((5, LCID, 4, 0),()),
"Channels": ((2, LCID, 4, 0),()),
"ExtraData": ((7, LCID, 4, 0),()),
"FormatTag": ((1, LCID, 4, 0),()),
"SamplesPerSec": ((3, LCID, 4, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class ISpeechXMLRecoResult(DispatchBaseClass):
'ISpeechXMLRecoResult Interface'
CLSID = IID('{AAEC54AF-8F85-4924-944D-B79D39D72E19}')
coclass_clsid = None
# Result is of type ISpeechPhraseAlternates
def Alternates(self, RequestCount=defaultNamedNotOptArg, StartElement=0, Elements=-1):
'Alternates'
ret = self._oleobj_.InvokeTypes(5, LCID, 1, (9, 0), ((3, 1), (3, 49), (3, 49)),RequestCount
, StartElement, Elements)
if ret is not None:
ret = Dispatch(ret, u'Alternates', '{B238B6D5-F276-4C3D-A6C1-2974801C3CC2}')
return ret
# Result is of type ISpeechMemoryStream
def Audio(self, StartElement=0, Elements=-1):
'Audio'
ret = self._oleobj_.InvokeTypes(6, LCID, 1, (9, 0), ((3, 49), (3, 49)),StartElement
, Elements)
if ret is not None:
ret = Dispatch(ret, u'Audio', '{EEB14B68-808B-4ABE-A5EA-B51DA7588008}')
return ret
def DiscardResultInfo(self, ValueTypes=defaultNamedNotOptArg):
'DiscardResultInfo'
return self._oleobj_.InvokeTypes(9, LCID, 1, (24, 0), ((3, 1),),ValueTypes
)
def GetXMLErrorInfo(self, LineNumber=pythoncom.Missing, ScriptLine=pythoncom.Missing, Source=pythoncom.Missing, Description=pythoncom.Missing
, ResultCode=pythoncom.Missing):
'GetXMLErrorInfo'
return self._ApplyTypes_(11, 1, (11, 0), ((16387, 2), (16392, 2), (16392, 2), (16392, 2), (16387, 2)), u'GetXMLErrorInfo', None,LineNumber
, ScriptLine, Source, Description, ResultCode)
def GetXMLResult(self, Options=defaultNamedNotOptArg):
'GetXMLResult'
# Result is a Unicode object
return self._oleobj_.InvokeTypes(10, LCID, 1, (8, 0), ((3, 1),),Options
)
def SaveToMemory(self):
'SaveToMemory'
return self._ApplyTypes_(8, 1, (12, 0), (), u'SaveToMemory', None,)
def SpeakAudio(self, StartElement=0, Elements=-1, Flags=0):
'SpeakAudio'
return self._oleobj_.InvokeTypes(7, LCID, 1, (3, 0), ((3, 49), (3, 49), (3, 49)),StartElement
, Elements, Flags)
_prop_map_get_ = {
# Method 'AudioFormat' returns object of type 'ISpeechAudioFormat'
"AudioFormat": (3, 2, (9, 0), (), "AudioFormat", '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}'),
# Method 'PhraseInfo' returns object of type 'ISpeechPhraseInfo'
"PhraseInfo": (4, 2, (9, 0), (), "PhraseInfo", '{961559CF-4E67-4662-8BF0-D93F1FCD61B3}'),
# Method 'RecoContext' returns object of type 'ISpeechRecoContext'
"RecoContext": (1, 2, (9, 0), (), "RecoContext", '{580AA49D-7E1E-4809-B8E2-57DA806104B8}'),
# Method 'Times' returns object of type 'ISpeechRecoResultTimes'
"Times": (2, 2, (9, 0), (), "Times", '{62B3B8FB-F6E7-41BE-BDCB-056B1C29EFC0}'),
}
_prop_map_put_ = {
"AudioFormat": ((3, LCID, 8, 0),()),
}
def __iter__(self):
"Return a Python iterator for this object"
try:
ob = self._oleobj_.InvokeTypes(-4,LCID,3,(13, 10),())
except pythoncom.error:
raise TypeError("This object does not support enumeration")
return win32com.client.util.Iterator(ob, None)
class _ISpeechRecoContextEvents:
CLSID = CLSID_Sink = IID('{7B8FCB42-0E9D-4F00-A048-7B04D6179D3D}')
coclass_clsid = IID('{73AD6842-ACE0-45E8-A4DD-8795881A2C2A}')
_public_methods_ = [] # For COM Server support
_dispid_to_func_ = {
11 : "OnFalseRecognition",
16 : "OnRecognitionForOtherContext",
18 : "OnEnginePrivate",
17 : "OnAudioLevel",
1 : "OnStartStream",
3 : "OnBookmark",
13 : "OnRequestUI",
12 : "OnInterference",
4 : "OnSoundStart",
2 : "OnEndStream",
14 : "OnRecognizerStateChange",
9 : "OnPropertyNumberChange",
8 : "OnHypothesis",
15 : "OnAdaptation",
6 : "OnPhraseStart",
5 : "OnSoundEnd",
10 : "OnPropertyStringChange",
7 : "OnRecognition",
}
def __init__(self, oobj = None):
if oobj is None:
self._olecp = None
else:
import win32com.server.util
from win32com.server.policy import EventHandlerPolicy
cpc=oobj._oleobj_.QueryInterface(pythoncom.IID_IConnectionPointContainer)
cp=cpc.FindConnectionPoint(self.CLSID_Sink)
cookie=cp.Advise(win32com.server.util.wrap(self, usePolicy=EventHandlerPolicy))
self._olecp,self._olecp_cookie = cp,cookie
def __del__(self):
try:
self.close()
except pythoncom.com_error:
pass
def close(self):
if self._olecp is not None:
cp,cookie,self._olecp,self._olecp_cookie = self._olecp,self._olecp_cookie,None,None
cp.Unadvise(cookie)
def _query_interface_(self, iid):
import win32com.server.util
if iid==self.CLSID_Sink: return win32com.server.util.wrap(self)
# Event Handlers
# If you create handlers, they should have the following prototypes:
# def OnFalseRecognition(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, Result=defaultNamedNotOptArg):
# 'FalseRecognition'
# def OnRecognitionForOtherContext(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg):
# 'RecognitionForOtherContext'
# def OnEnginePrivate(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, EngineData=defaultNamedNotOptArg):
# 'EnginePrivate'
# def OnAudioLevel(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, AudioLevel=defaultNamedNotOptArg):
# 'AudioLevel'
# def OnStartStream(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg):
# 'StartStream'
# def OnBookmark(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, BookmarkId=defaultNamedNotOptArg, Options=defaultNamedNotOptArg):
# 'Bookmark'
# def OnRequestUI(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, UIType=defaultNamedNotOptArg):
# 'RequestUI'
# def OnInterference(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, Interference=defaultNamedNotOptArg):
# 'Interference'
# def OnSoundStart(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg):
# 'SoundStart'
# def OnEndStream(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, StreamReleased=defaultNamedNotOptArg):
# 'EndStream'
# def OnRecognizerStateChange(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, NewState=defaultNamedNotOptArg):
# 'RecognizerStateChange'
# def OnPropertyNumberChange(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, PropertyName=defaultNamedNotOptArg, NewNumberValue=defaultNamedNotOptArg):
# 'PropertyNumberChange'
# def OnHypothesis(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, Result=defaultNamedNotOptArg):
# 'Hypothesis'
# def OnAdaptation(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg):
# 'Adaptation'
# def OnPhraseStart(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg):
# 'PhraseStart'
# def OnSoundEnd(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg):
# 'SoundEnd'
# def OnPropertyStringChange(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, PropertyName=defaultNamedNotOptArg, NewStringValue=defaultNamedNotOptArg):
# 'PropertyStringChange'
# def OnRecognition(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, RecognitionType=defaultNamedNotOptArg, Result=defaultNamedNotOptArg):
# 'Recognition'
class _ISpeechVoiceEvents:
CLSID = CLSID_Sink = IID('{A372ACD1-3BEF-4BBD-8FFB-CB3E2B416AF8}')
coclass_clsid = IID('{96749377-3391-11D2-9EE3-00C04F797396}')
_public_methods_ = [] # For COM Server support
_dispid_to_func_ = {
3 : "OnVoiceChange",
9 : "OnAudioLevel",
5 : "OnWord",
10 : "OnEnginePrivate",
7 : "OnSentence",
4 : "OnBookmark",
1 : "OnStartStream",
2 : "OnEndStream",
6 : "OnPhoneme",
8 : "OnViseme",
}
def __init__(self, oobj = None):
if oobj is None:
self._olecp = None
else:
import win32com.server.util
from win32com.server.policy import EventHandlerPolicy
cpc=oobj._oleobj_.QueryInterface(pythoncom.IID_IConnectionPointContainer)
cp=cpc.FindConnectionPoint(self.CLSID_Sink)
cookie=cp.Advise(win32com.server.util.wrap(self, usePolicy=EventHandlerPolicy))
self._olecp,self._olecp_cookie = cp,cookie
def __del__(self):
try:
self.close()
except pythoncom.com_error:
pass
def close(self):
if self._olecp is not None:
cp,cookie,self._olecp,self._olecp_cookie = self._olecp,self._olecp_cookie,None,None
cp.Unadvise(cookie)
def _query_interface_(self, iid):
import win32com.server.util
if iid==self.CLSID_Sink: return win32com.server.util.wrap(self)
# Event Handlers
# If you create handlers, they should have the following prototypes:
# def OnVoiceChange(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, VoiceObjectToken=defaultNamedNotOptArg):
# 'VoiceChange'
# def OnAudioLevel(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, AudioLevel=defaultNamedNotOptArg):
# 'AudioLevel'
# def OnWord(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, CharacterPosition=defaultNamedNotOptArg, Length=defaultNamedNotOptArg):
# 'Word'
# def OnEnginePrivate(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, EngineData=defaultNamedNotOptArg):
# 'EnginePrivate'
# def OnSentence(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, CharacterPosition=defaultNamedNotOptArg, Length=defaultNamedNotOptArg):
# 'Sentence'
# def OnBookmark(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, Bookmark=defaultNamedNotOptArg, BookmarkId=defaultNamedNotOptArg):
# 'Bookmark'
# def OnStartStream(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg):
# 'StartStream'
# def OnEndStream(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg):
# 'EndStream'
# def OnPhoneme(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, Duration=defaultNamedNotOptArg, NextPhoneId=defaultNamedNotOptArg
# , Feature=defaultNamedNotOptArg, CurrentPhoneId=defaultNamedNotOptArg):
# 'Phoneme'
# def OnViseme(self, StreamNumber=defaultNamedNotOptArg, StreamPosition=defaultNamedNotOptArg, Duration=defaultNamedNotOptArg, NextVisemeId=defaultNamedNotOptArg
# , Feature=defaultNamedNotOptArg, CurrentVisemeId=defaultNamedNotOptArg):
# 'Viseme'
from win32com.client import CoClassBaseClass
# This CoClass is known by the name 'SAPI.SpAudioFormat.1'
class SpAudioFormat(CoClassBaseClass): # A CoClass
# SpAudioFormat Class
CLSID = IID('{9EF96870-E160-4792-820D-48CF0649E4EC}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechAudioFormat,
]
default_interface = ISpeechAudioFormat
# This CoClass is known by the name 'SAPI.SpCompressedLexicon.1'
class SpCompressedLexicon(CoClassBaseClass): # A CoClass
# SpCompressedLexicon Class
CLSID = IID('{90903716-2F42-11D3-9C26-00C04F8EF87C}')
coclass_sources = [
]
coclass_interfaces = [
]
# This CoClass is known by the name 'SAPI.SpCustomStream.1'
class SpCustomStream(CoClassBaseClass): # A CoClass
# SpCustomStream Class
CLSID = IID('{8DBEF13F-1948-4AA8-8CF0-048EEBED95D8}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechCustomStream,
]
default_interface = ISpeechCustomStream
# This CoClass is known by the name 'SAPI.SpFileStream.1'
class SpFileStream(CoClassBaseClass): # A CoClass
# SpFileStream Class
CLSID = IID('{947812B3-2AE1-4644-BA86-9E90DED7EC91}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechFileStream,
]
default_interface = ISpeechFileStream
# This CoClass is known by the name 'SAPI.SpInProcRecoContext.1'
class SpInProcRecoContext(CoClassBaseClass): # A CoClass
# SpInProcRecoContext Class
CLSID = IID('{73AD6842-ACE0-45E8-A4DD-8795881A2C2A}')
coclass_sources = [
_ISpeechRecoContextEvents,
]
default_source = _ISpeechRecoContextEvents
coclass_interfaces = [
ISpeechRecoContext,
]
default_interface = ISpeechRecoContext
# This CoClass is known by the name 'Sapi.SpInprocRecognizer.1'
class SpInprocRecognizer(CoClassBaseClass): # A CoClass
# SpInprocRecognizer Class
CLSID = IID('{41B89B6B-9399-11D2-9623-00C04F8EE628}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechRecognizer,
]
default_interface = ISpeechRecognizer
# This CoClass is known by the name 'SAPI.SpLexicon.1'
class SpLexicon(CoClassBaseClass): # A CoClass
# SpLexicon Class
CLSID = IID('{0655E396-25D0-11D3-9C26-00C04F8EF87C}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechLexicon,
]
default_interface = ISpeechLexicon
# This CoClass is known by the name 'SAPI.SpMMAudioEnum.1'
class SpMMAudioEnum(CoClassBaseClass): # A CoClass
# SpMMAudioEnum Class
CLSID = IID('{AB1890A0-E91F-11D2-BB91-00C04F8EE6C0}')
coclass_sources = [
]
coclass_interfaces = [
]
# This CoClass is known by the name 'SAPI.SpMMAudioIn.1'
class SpMMAudioIn(CoClassBaseClass): # A CoClass
# SpMMAudioIn Class
CLSID = IID('{CF3D2E50-53F2-11D2-960C-00C04F8EE628}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechMMSysAudio,
]
default_interface = ISpeechMMSysAudio
# This CoClass is known by the name 'SAPI.SpMMAudioOut.1'
class SpMMAudioOut(CoClassBaseClass): # A CoClass
# SpMMAudioOut Class
CLSID = IID('{A8C680EB-3D32-11D2-9EE7-00C04F797396}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechMMSysAudio,
]
default_interface = ISpeechMMSysAudio
# This CoClass is known by the name 'SAPI.SpMemoryStream.1'
class SpMemoryStream(CoClassBaseClass): # A CoClass
# SpMemoryStream Class
CLSID = IID('{5FB7EF7D-DFF4-468A-B6B7-2FCBD188F994}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechMemoryStream,
]
default_interface = ISpeechMemoryStream
# This CoClass is known by the name 'SAPI.SpNotifyTranslator.1'
class SpNotifyTranslator(CoClassBaseClass): # A CoClass
# SpNotify
CLSID = IID('{E2AE5372-5D40-11D2-960E-00C04F8EE628}')
coclass_sources = [
]
coclass_interfaces = [
]
# This CoClass is known by the name 'SAPI.SpNullPhoneConverter.1'
class SpNullPhoneConverter(CoClassBaseClass): # A CoClass
# SpNullPhoneConverter Class
CLSID = IID('{455F24E9-7396-4A16-9715-7C0FDBE3EFE3}')
coclass_sources = [
]
coclass_interfaces = [
]
# This CoClass is known by the name 'SAPI.SpObjectToken.1'
class SpObjectToken(CoClassBaseClass): # A CoClass
# SpObjectToken Class
CLSID = IID('{EF411752-3736-4CB4-9C8C-8EF4CCB58EFE}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechObjectToken,
]
default_interface = ISpeechObjectToken
# This CoClass is known by the name 'SAPI.SpObjectTokenCategory.1'
class SpObjectTokenCategory(CoClassBaseClass): # A CoClass
# SpObjectTokenCategory Class
CLSID = IID('{A910187F-0C7A-45AC-92CC-59EDAFB77B53}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechObjectTokenCategory,
]
default_interface = ISpeechObjectTokenCategory
# This CoClass is known by the name 'SAPI.SpPhoneConverter.1'
class SpPhoneConverter(CoClassBaseClass): # A CoClass
# SpPhoneConverter Class
CLSID = IID('{9185F743-1143-4C28-86B5-BFF14F20E5C8}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechPhoneConverter,
]
default_interface = ISpeechPhoneConverter
class SpPhoneticAlphabetConverter(CoClassBaseClass): # A CoClass
# SpPhoneticAlphabetConverter Class
CLSID = IID('{4F414126-DFE3-4629-99EE-797978317EAD}')
coclass_sources = [
]
coclass_interfaces = [
]
# This CoClass is known by the name 'SAPI.SpPhraseInfoBuilder.1'
class SpPhraseInfoBuilder(CoClassBaseClass): # A CoClass
# SpPhraseInfoBuilder Class
CLSID = IID('{C23FC28D-C55F-4720-8B32-91F73C2BD5D1}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechPhraseInfoBuilder,
]
default_interface = ISpeechPhraseInfoBuilder
# This CoClass is known by the name 'SAPI.SpResourceManager.1'
class SpResourceManager(CoClassBaseClass): # A CoClass
# SpResourceManger
CLSID = IID('{96749373-3391-11D2-9EE3-00C04F797396}')
coclass_sources = [
]
coclass_interfaces = [
]
# This CoClass is known by the name 'SAPI.SpSharedRecoContext.1'
class SpSharedRecoContext(CoClassBaseClass): # A CoClass
# SpSharedRecoContext Class
CLSID = IID('{47206204-5ECA-11D2-960F-00C04F8EE628}')
coclass_sources = [
_ISpeechRecoContextEvents,
]
default_source = _ISpeechRecoContextEvents
coclass_interfaces = [
ISpeechRecoContext,
]
default_interface = ISpeechRecoContext
# This CoClass is known by the name 'Sapi.SpSharedRecognizer.1'
class SpSharedRecognizer(CoClassBaseClass): # A CoClass
# SpSharedRecognizer Class
CLSID = IID('{3BEE4890-4FE9-4A37-8C1E-5E7E12791C1F}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechRecognizer,
]
default_interface = ISpeechRecognizer
# This CoClass is known by the name 'SAPI.SpShortcut.1'
class SpShortcut(CoClassBaseClass): # A CoClass
# SpShortcut Class
CLSID = IID('{0D722F1A-9FCF-4E62-96D8-6DF8F01A26AA}')
coclass_sources = [
]
coclass_interfaces = [
]
# This CoClass is known by the name 'SAPI.SpStream.1'
class SpStream(CoClassBaseClass): # A CoClass
# SpStream Class
CLSID = IID('{715D9C59-4442-11D2-9605-00C04F8EE628}')
coclass_sources = [
]
coclass_interfaces = [
]
# This CoClass is known by the name 'SAPI.SpStreamFormatConverter.1'
class SpStreamFormatConverter(CoClassBaseClass): # A CoClass
# FormatConverter Class
CLSID = IID('{7013943A-E2EC-11D2-A086-00C04F8EF9B5}')
coclass_sources = [
]
coclass_interfaces = [
]
# This CoClass is known by the name 'SAPI.SpTextSelectionInformation.1'
class SpTextSelectionInformation(CoClassBaseClass): # A CoClass
# SpTextSelectionInformation Class
CLSID = IID('{0F92030A-CBFD-4AB8-A164-FF5985547FF6}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechTextSelectionInformation,
]
default_interface = ISpeechTextSelectionInformation
# This CoClass is known by the name 'SAPI.SpUncompressedLexicon.1'
class SpUnCompressedLexicon(CoClassBaseClass): # A CoClass
# SpUnCompressedLexicon Class
CLSID = IID('{C9E37C15-DF92-4727-85D6-72E5EEB6995A}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechLexicon,
]
default_interface = ISpeechLexicon
# This CoClass is known by the name 'SAPI.SpVoice.1'
class SpVoice(CoClassBaseClass): # A CoClass
# SpVoice Class
CLSID = IID('{96749377-3391-11D2-9EE3-00C04F797396}')
coclass_sources = [
_ISpeechVoiceEvents,
]
default_source = _ISpeechVoiceEvents
coclass_interfaces = [
ISpeechVoice,
]
default_interface = ISpeechVoice
# This CoClass is known by the name 'SAPI.SpWaveFormatEx.1'
class SpWaveFormatEx(CoClassBaseClass): # A CoClass
# SpWaveFormatEx Class
CLSID = IID('{C79A574C-63BE-44B9-801F-283F87F898BE}')
coclass_sources = [
]
coclass_interfaces = [
ISpeechWaveFormatEx,
]
default_interface = ISpeechWaveFormatEx
IEnumSpObjectTokens_vtables_dispatch_ = 0
IEnumSpObjectTokens_vtables_ = [
(( u'Next' , u'celt' , u'pelt' , u'pceltFetched' , ), 1610678272, (1610678272, (), [
(19, 1, None, None) , (16397, 2, None, "IID('{14056589-E16C-11D2-BB90-00C04F8EE6C0}')") , (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'Skip' , u'celt' , ), 1610678273, (1610678273, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'Reset' , ), 1610678274, (1610678274, (), [ ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( u'Clone' , u'ppEnum' , ), 1610678275, (1610678275, (), [ (16397, 2, None, "IID('{06B64F9E-7FDA-11D2-B4F2-00C04F797396}')") , ], 1 , 1 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
(( u'Item' , u'Index' , u'ppToken' , ), 1610678276, (1610678276, (), [ (19, 1, None, None) ,
(16397, 2, None, "IID('{14056589-E16C-11D2-BB90-00C04F8EE6C0}')") , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'GetCount' , u'pCount' , ), 1610678277, (1610678277, (), [ (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
]
IEnumString_vtables_dispatch_ = 0
IEnumString_vtables_ = [
(( u'RemoteNext' , u'celt' , u'rgelt' , u'pceltFetched' , ), 1610678272, (1610678272, (), [
(19, 1, None, None) , (16415, 2, None, None) , (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'Skip' , u'celt' , ), 1610678273, (1610678273, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'Reset' , ), 1610678274, (1610678274, (), [ ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( u'Clone' , u'ppEnum' , ), 1610678275, (1610678275, (), [ (16397, 2, None, "IID('{00000101-0000-0000-C000-000000000046}')") , ], 1 , 1 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
]
IInternetSecurityManager_vtables_dispatch_ = 0
IInternetSecurityManager_vtables_ = [
(( u'SetSecuritySite' , u'pSite' , ), 1610678272, (1610678272, (), [ (13, 1, None, "IID('{79EAC9ED-BAF9-11CE-8C82-00AA004BA90B}')") , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'GetSecuritySite' , u'ppSite' , ), 1610678273, (1610678273, (), [ (16397, 2, None, "IID('{79EAC9ED-BAF9-11CE-8C82-00AA004BA90B}')") , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'MapUrlToZone' , u'pwszUrl' , u'pdwZone' , u'dwFlags' , ), 1610678274, (1610678274, (), [
(31, 1, None, None) , (16403, 2, None, None) , (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( u'GetSecurityId' , u'pwszUrl' , u'pbSecurityId' , u'pcbSecurityId' , u'dwReserved' ,
), 1610678275, (1610678275, (), [ (31, 1, None, None) , (16401, 2, None, None) , (16403, 3, None, None) , (21, 1, None, None) , ], 1 , 1 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
(( u'ProcessUrlAction' , u'pwszUrl' , u'dwAction' , u'pPolicy' , u'cbPolicy' ,
u'pContext' , u'cbContext' , u'dwFlags' , u'dwReserved' , ), 1610678276, (1610678276, (), [
(31, 1, None, None) , (19, 1, None, None) , (16401, 2, None, None) , (19, 1, None, None) , (16401, 1, None, None) ,
(19, 1, None, None) , (19, 1, None, None) , (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'QueryCustomPolicy' , u'pwszUrl' , u'guidKey' , u'ppPolicy' , u'pcbPolicy' ,
u'pContext' , u'cbContext' , u'dwReserved' , ), 1610678277, (1610678277, (), [ (31, 1, None, None) ,
(36, 1, None, None) , (16401, 2, None, None) , (16403, 2, None, None) , (16401, 1, None, None) , (19, 1, None, None) ,
(19, 1, None, None) , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'SetZoneMapping' , u'dwZone' , u'lpszPattern' , u'dwFlags' , ), 1610678278, (1610678278, (), [
(19, 1, None, None) , (31, 1, None, None) , (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'GetZoneMappings' , u'dwZone' , u'ppenumString' , u'dwFlags' , ), 1610678279, (1610678279, (), [
(19, 1, None, None) , (16397, 2, None, "IID('{00000101-0000-0000-C000-000000000046}')") , (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
]
IInternetSecurityMgrSite_vtables_dispatch_ = 0
IInternetSecurityMgrSite_vtables_ = [
(( u'GetWindow' , u'phwnd' , ), 1610678272, (1610678272, (), [ (36, 2, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'EnableModeless' , u'fEnable' , ), 1610678273, (1610678273, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
]
ISequentialStream_vtables_dispatch_ = 0
ISequentialStream_vtables_ = [
(( u'RemoteRead' , u'pv' , u'cb' , u'pcbRead' , ), 1610678272, (1610678272, (), [
(16401, 2, None, None) , (19, 1, None, None) , (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'RemoteWrite' , u'pv' , u'cb' , u'pcbWritten' , ), 1610678273, (1610678273, (), [
(16401, 1, None, None) , (19, 1, None, None) , (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
]
IServiceProvider_vtables_dispatch_ = 0
IServiceProvider_vtables_ = [
(( u'RemoteQueryService' , u'guidService' , u'riid' , u'ppvObject' , ), 1610678272, (1610678272, (), [
(36, 1, None, None) , (36, 1, None, None) , (16397, 2, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
]
ISpAudio_vtables_dispatch_ = 0
ISpAudio_vtables_ = [
(( u'SetState' , u'NewState' , u'ullReserved' , ), 1610874880, (1610874880, (), [ (3, 1, None, None) ,
(21, 1, None, None) , ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'SetFormat' , u'rguidFmtId' , u'pWaveFormatEx' , ), 1610874881, (1610874881, (), [ (36, 1, None, None) ,
(36, 1, None, None) , ], 1 , 1 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'GetStatus' , u'pStatus' , ), 1610874882, (1610874882, (), [ (36, 2, None, None) , ], 1 , 1 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'SetBufferInfo' , u'pBuffInfo' , ), 1610874883, (1610874883, (), [ (36, 1, None, None) , ], 1 , 1 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'GetBufferInfo' , u'pBuffInfo' , ), 1610874884, (1610874884, (), [ (36, 2, None, None) , ], 1 , 1 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
(( u'GetDefaultFormat' , u'pFormatId' , u'ppCoMemWaveFormatEx' , ), 1610874885, (1610874885, (), [ (36, 2, None, None) ,
(16420, 2, None, None) , ], 1 , 1 , 4 , 0 , 160 , (3, 0, None, None) , 0 , )),
(( u'EventHandle' , ), 1610874886, (1610874886, (), [ ], 1 , 1 , 4 , 0 , 168 , (16408, 0, None, None) , 0 , )),
(( u'GetVolumeLevel' , u'pLevel' , ), 1610874887, (1610874887, (), [ (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 176 , (3, 0, None, None) , 0 , )),
(( u'SetVolumeLevel' , u'Level' , ), 1610874888, (1610874888, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 184 , (3, 0, None, None) , 0 , )),
(( u'GetBufferNotifySize' , u'pcbSize' , ), 1610874889, (1610874889, (), [ (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 192 , (3, 0, None, None) , 0 , )),
(( u'SetBufferNotifySize' , u'cbSize' , ), 1610874890, (1610874890, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 200 , (3, 0, None, None) , 0 , )),
]
ISpDataKey_vtables_dispatch_ = 0
ISpDataKey_vtables_ = [
(( u'SetData' , u'pszValueName' , u'cbData' , u'pData' , ), 1610678272, (1610678272, (), [
(31, 1, None, None) , (19, 1, None, None) , (16401, 1, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'GetData' , u'pszValueName' , u'pcbData' , u'pData' , ), 1610678273, (1610678273, (), [
(31, 1, None, None) , (16403, 1, None, None) , (16401, 2, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'SetStringValue' , u'pszValueName' , u'pszValue' , ), 1610678274, (1610678274, (), [ (31, 1, None, None) ,
(31, 1, None, None) , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( u'GetStringValue' , u'pszValueName' , u'ppszValue' , ), 1610678275, (1610678275, (), [ (31, 1, None, None) ,
(16415, 2, None, None) , ], 1 , 1 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
(( u'SetDWORD' , u'pszValueName' , u'dwValue' , ), 1610678276, (1610678276, (), [ (31, 1, None, None) ,
(19, 1, None, None) , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'GetDWORD' , u'pszValueName' , u'pdwValue' , ), 1610678277, (1610678277, (), [ (31, 1, None, None) ,
(16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'OpenKey' , u'pszSubKeyName' , u'ppSubKey' , ), 1610678278, (1610678278, (), [ (31, 1, None, None) ,
(16397, 2, None, "IID('{14056581-E16C-11D2-BB90-00C04F8EE6C0}')") , ], 1 , 1 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'CreateKey' , u'pszSubKey' , u'ppSubKey' , ), 1610678279, (1610678279, (), [ (31, 1, None, None) ,
(16397, 2, None, "IID('{14056581-E16C-11D2-BB90-00C04F8EE6C0}')") , ], 1 , 1 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'DeleteKey' , u'pszSubKey' , ), 1610678280, (1610678280, (), [ (31, 1, None, None) , ], 1 , 1 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'DeleteValue' , u'pszValueName' , ), 1610678281, (1610678281, (), [ (31, 1, None, None) , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'EnumKeys' , u'Index' , u'ppszSubKeyName' , ), 1610678282, (1610678282, (), [ (19, 1, None, None) ,
(16415, 2, None, None) , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'EnumValues' , u'Index' , u'ppszValueName' , ), 1610678283, (1610678283, (), [ (19, 1, None, None) ,
(16415, 2, None, None) , ], 1 , 1 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
]
ISpEventSink_vtables_dispatch_ = 0
ISpEventSink_vtables_ = [
(( u'AddEvents' , u'pEventArray' , u'ulCount' , ), 1610678272, (1610678272, (), [ (36, 1, None, None) ,
(19, 1, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'GetEventInterest' , u'pullEventInterest' , ), 1610678273, (1610678273, (), [ (16405, 2, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
]
ISpEventSource_vtables_dispatch_ = 0
ISpEventSource_vtables_ = [
(( u'SetInterest' , u'ullEventInterest' , u'ullQueuedInterest' , ), 1610743808, (1610743808, (), [ (21, 1, None, None) ,
(21, 1, None, None) , ], 1 , 1 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'GetEvents' , u'ulCount' , u'pEventArray' , u'pulFetched' , ), 1610743809, (1610743809, (), [
(19, 1, None, None) , (36, 2, None, None) , (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'GetInfo' , u'pInfo' , ), 1610743810, (1610743810, (), [ (36, 2, None, None) , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
]
ISpGrammarBuilder_vtables_dispatch_ = 0
ISpGrammarBuilder_vtables_ = [
(( u'ResetGrammar' , u'NewLanguage' , ), 1610678272, (1610678272, (), [ (18, 1, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'GetRule' , u'pszRuleName' , u'dwRuleId' , u'dwAttributes' , u'fCreateIfNotExist' ,
u'phInitialState' , ), 1610678273, (1610678273, (), [ (31, 1, None, None) , (19, 1, None, None) , (19, 1, None, None) ,
(3, 1, None, None) , (16408, 2, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'ClearRule' , u'hState' , ), 1610678274, (1610678274, (), [ (16408, 1, None, None) , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( u'CreateNewState' , u'hState' , u'phState' , ), 1610678275, (1610678275, (), [ (16408, 1, None, None) ,
(16408, 2, None, None) , ], 1 , 1 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
(( u'AddWordTransition' , u'hFromState' , u'hToState' , u'psz' , u'pszSeparators' ,
u'eWordType' , u'Weight' , u'pPropInfo' , ), 1610678276, (1610678276, (), [ (16408, 1, None, None) ,
(16408, 1, None, None) , (31, 1, None, None) , (31, 1, None, None) , (3, 1, None, None) , (4, 1, None, None) ,
(36, 1, None, None) , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'AddRuleTransition' , u'hFromState' , u'hToState' , u'hRule' , u'Weight' ,
u'pPropInfo' , ), 1610678277, (1610678277, (), [ (16408, 1, None, None) , (16408, 1, None, None) , (16408, 1, None, None) ,
(4, 1, None, None) , (36, 1, None, None) , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'AddResource' , u'hRuleState' , u'pszResourceName' , u'pszResourceValue' , ), 1610678278, (1610678278, (), [
(16408, 1, None, None) , (31, 1, None, None) , (31, 1, None, None) , ], 1 , 1 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'Commit' , u'dwReserved' , ), 1610678279, (1610678279, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
]
ISpLexicon_vtables_dispatch_ = 0
ISpLexicon_vtables_ = [
(( u'GetPronunciations' , u'pszWord' , u'LangId' , u'dwFlags' , u'pWordPronunciationList' ,
), 1610678272, (1610678272, (), [ (31, 1, None, None) , (18, 1, None, None) , (19, 1, None, None) , (36, 3, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'AddPronunciation' , u'pszWord' , u'LangId' , u'ePartOfSpeech' , u'pszPronunciation' ,
), 1610678273, (1610678273, (), [ (31, 1, None, None) , (18, 1, None, None) , (3, 1, None, None) , (31, 1, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'RemovePronunciation' , u'pszWord' , u'LangId' , u'ePartOfSpeech' , u'pszPronunciation' ,
), 1610678274, (1610678274, (), [ (31, 1, None, None) , (18, 1, None, None) , (3, 1, None, None) , (31, 1, None, None) , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( u'GetGeneration' , u'pdwGeneration' , ), 1610678275, (1610678275, (), [ (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
(( u'GetGenerationChange' , u'dwFlags' , u'pdwGeneration' , u'pWordList' , ), 1610678276, (1610678276, (), [
(19, 1, None, None) , (16403, 3, None, None) , (36, 3, None, None) , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'GetWords' , u'dwFlags' , u'pdwGeneration' , u'pdwCookie' , u'pWordList' ,
), 1610678277, (1610678277, (), [ (19, 1, None, None) , (16403, 3, None, None) , (16403, 3, None, None) , (36, 3, None, None) , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
]
ISpMMSysAudio_vtables_dispatch_ = 0
ISpMMSysAudio_vtables_ = [
(( u'GetDeviceId' , u'puDeviceId' , ), 1610940416, (1610940416, (), [ (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 208 , (3, 0, None, None) , 0 , )),
(( u'SetDeviceId' , u'uDeviceId' , ), 1610940417, (1610940417, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 216 , (3, 0, None, None) , 0 , )),
(( u'GetMMHandle' , u'pHandle' , ), 1610940418, (1610940418, (), [ (16408, 2, None, None) , ], 1 , 1 , 4 , 0 , 224 , (3, 0, None, None) , 0 , )),
(( u'GetLineId' , u'puLineId' , ), 1610940419, (1610940419, (), [ (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 232 , (3, 0, None, None) , 0 , )),
(( u'SetLineId' , u'uLineId' , ), 1610940420, (1610940420, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 240 , (3, 0, None, None) , 0 , )),
]
ISpNotifySink_vtables_dispatch_ = 0
ISpNotifySink_vtables_ = [
(( u'Notify' , ), 1610678272, (1610678272, (), [ ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
]
ISpNotifySource_vtables_dispatch_ = 0
ISpNotifySource_vtables_ = [
(( u'SetNotifySink' , u'pNotifySink' , ), 1610678272, (1610678272, (), [ (13, 1, None, "IID('{259684DC-37C3-11D2-9603-00C04F8EE628}')") , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'SetNotifyWindowMessage' , u'hWnd' , u'Msg' , u'wParam' , u'lParam' ,
), 1610678273, (1610678273, (), [ (36, 1, None, None) , (19, 1, None, None) , (21, 1, None, None) , (20, 1, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'SetNotifyCallbackFunction' , u'pfnCallback' , u'wParam' , u'lParam' , ), 1610678274, (1610678274, (), [
(16408, 1, None, None) , (21, 1, None, None) , (20, 1, None, None) , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( u'SetNotifyCallbackInterface' , u'pSpCallback' , u'wParam' , u'lParam' , ), 1610678275, (1610678275, (), [
(16408, 1, None, None) , (21, 1, None, None) , (20, 1, None, None) , ], 1 , 1 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
(( u'SetNotifyWin32Event' , ), 1610678276, (1610678276, (), [ ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'WaitForNotifyEvent' , u'dwMilliseconds' , ), 1610678277, (1610678277, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'GetNotifyEventHandle' , ), 1610678278, (1610678278, (), [ ], 1 , 1 , 4 , 0 , 72 , (16408, 0, None, None) , 0 , )),
]
ISpNotifyTranslator_vtables_dispatch_ = 0
ISpNotifyTranslator_vtables_ = [
(( u'InitWindowMessage' , u'hWnd' , u'Msg' , u'wParam' , u'lParam' ,
), 1610743808, (1610743808, (), [ (36, 1, None, None) , (19, 1, None, None) , (21, 1, None, None) , (20, 1, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'InitCallback' , u'pfnCallback' , u'wParam' , u'lParam' , ), 1610743809, (1610743809, (), [
(16408, 1, None, None) , (21, 1, None, None) , (20, 1, None, None) , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( u'InitSpNotifyCallback' , u'pSpCallback' , u'wParam' , u'lParam' , ), 1610743810, (1610743810, (), [
(16408, 1, None, None) , (21, 1, None, None) , (20, 1, None, None) , ], 1 , 1 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
(( u'InitWin32Event' , u'hEvent' , u'fCloseHandleOnRelease' , ), 1610743811, (1610743811, (), [ (16408, 1, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Wait' , u'dwMilliseconds' , ), 1610743812, (1610743812, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'GetEventHandle' , ), 1610743813, (1610743813, (), [ ], 1 , 1 , 4 , 0 , 72 , (16408, 0, None, None) , 0 , )),
]
ISpObjectToken_vtables_dispatch_ = 0
ISpObjectToken_vtables_ = [
(( u'SetId' , u'pszCategoryId' , u'pszTokenId' , u'fCreateIfNotExist' , ), 1610743808, (1610743808, (), [
(31, 0, None, None) , (31, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'GetId' , u'ppszCoMemTokenId' , ), 1610743809, (1610743809, (), [ (16415, 2, None, None) , ], 1 , 1 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'GetCategory' , u'ppTokenCategory' , ), 1610743810, (1610743810, (), [ (16397, 2, None, "IID('{2D3D3845-39AF-4850-BBF9-40B49780011D}')") , ], 1 , 1 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'CreateInstance' , u'pUnkOuter' , u'dwClsContext' , u'riid' , u'ppvObject' ,
), 1610743811, (1610743811, (), [ (13, 1, None, None) , (19, 1, None, None) , (36, 1, None, None) , (16408, 2, None, None) , ], 1 , 1 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'GetStorageFileName' , u'clsidCaller' , u'pszValueName' , u'pszFileNameSpecifier' , u'nFolder' ,
u'ppszFilePath' , ), 1610743812, (1610743812, (), [ (36, 1, None, None) , (31, 1, None, None) , (31, 1, None, None) ,
(19, 1, None, None) , (16415, 2, None, None) , ], 1 , 1 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
(( u'RemoveStorageFileName' , u'clsidCaller' , u'pszKeyName' , u'fDeleteFile' , ), 1610743813, (1610743813, (), [
(36, 1, None, None) , (31, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 160 , (3, 0, None, None) , 0 , )),
(( u'Remove' , u'pclsidCaller' , ), 1610743814, (1610743814, (), [ (36, 0, None, None) , ], 1 , 1 , 4 , 0 , 168 , (3, 0, None, None) , 0 , )),
(( u'IsUISupported' , u'pszTypeOfUI' , u'pvExtraData' , u'cbExtraData' , u'punkObject' ,
u'pfSupported' , ), 1610743815, (1610743815, (), [ (31, 1, None, None) , (16408, 1, None, None) , (19, 1, None, None) ,
(13, 1, None, None) , (16387, 2, None, None) , ], 1 , 1 , 4 , 0 , 176 , (3, 0, None, None) , 0 , )),
(( u'DisplayUI' , u'hWndParent' , u'pszTitle' , u'pszTypeOfUI' , u'pvExtraData' ,
u'cbExtraData' , u'punkObject' , ), 1610743816, (1610743816, (), [ (36, 1, None, None) , (31, 1, None, None) ,
(31, 1, None, None) , (16408, 1, None, None) , (19, 1, None, None) , (13, 1, None, None) , ], 1 , 1 , 4 , 0 , 184 , (3, 0, None, None) , 0 , )),
(( u'MatchesAttributes' , u'pszAttributes' , u'pfMatches' , ), 1610743817, (1610743817, (), [ (31, 1, None, None) ,
(16387, 2, None, None) , ], 1 , 1 , 4 , 0 , 192 , (3, 0, None, None) , 0 , )),
]
ISpObjectTokenCategory_vtables_dispatch_ = 0
ISpObjectTokenCategory_vtables_ = [
(( u'SetId' , u'pszCategoryId' , u'fCreateIfNotExist' , ), 1610743808, (1610743808, (), [ (31, 1, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'GetId' , u'ppszCoMemCategoryId' , ), 1610743809, (1610743809, (), [ (16415, 2, None, None) , ], 1 , 1 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'GetDataKey' , u'spdkl' , u'ppDataKey' , ), 1610743810, (1610743810, (), [ (3, 1, None, None) ,
(16397, 2, None, "IID('{14056581-E16C-11D2-BB90-00C04F8EE6C0}')") , ], 1 , 1 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'EnumTokens' , u'pzsReqAttribs' , u'pszOptAttribs' , u'ppEnum' , ), 1610743811, (1610743811, (), [
(31, 1, None, None) , (31, 1, None, None) , (16397, 2, None, "IID('{06B64F9E-7FDA-11D2-B4F2-00C04F797396}')") , ], 1 , 1 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'SetDefaultTokenId' , u'pszTokenId' , ), 1610743812, (1610743812, (), [ (31, 1, None, None) , ], 1 , 1 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
(( u'GetDefaultTokenId' , u'ppszCoMemTokenId' , ), 1610743813, (1610743813, (), [ (16415, 2, None, None) , ], 1 , 1 , 4 , 0 , 160 , (3, 0, None, None) , 0 , )),
]
ISpObjectWithToken_vtables_dispatch_ = 0
ISpObjectWithToken_vtables_ = [
(( u'SetObjectToken' , u'pToken' , ), 1610678272, (1610678272, (), [ (13, 1, None, "IID('{14056589-E16C-11D2-BB90-00C04F8EE6C0}')") , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'GetObjectToken' , u'ppToken' , ), 1610678273, (1610678273, (), [ (16397, 2, None, "IID('{14056589-E16C-11D2-BB90-00C04F8EE6C0}')") , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
]
ISpPhoneConverter_vtables_dispatch_ = 0
ISpPhoneConverter_vtables_ = [
(( u'PhoneToId' , u'pszPhone' , u'pId' , ), 1610743808, (1610743808, (), [ (31, 1, None, None) ,
(16402, 2, None, None) , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( u'IdToPhone' , u'pId' , u'pszPhone' , ), 1610743809, (1610743809, (), [ (31, 1, None, None) ,
(16402, 2, None, None) , ], 1 , 1 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
]
ISpPhoneticAlphabetConverter_vtables_dispatch_ = 0
ISpPhoneticAlphabetConverter_vtables_ = [
(( u'GetLangId' , u'pLangID' , ), 1610678272, (1610678272, (), [ (16402, 2, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'SetLangId' , u'LangId' , ), 1610678273, (1610678273, (), [ (18, 1, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'SAPI2UPS' , u'pszSAPIId' , u'pszUPSId' , u'cMaxLength' , ), 1610678274, (1610678274, (), [
(16402, 1, None, None) , (16402, 2, None, None) , (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( u'UPS2SAPI' , u'pszUPSId' , u'pszSAPIId' , u'cMaxLength' , ), 1610678275, (1610678275, (), [
(16402, 1, None, None) , (16402, 2, None, None) , (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
(( u'GetMaxConvertLength' , u'cSrcLength' , u'bSAPI2UPS' , u'pcMaxDestLength' , ), 1610678276, (1610678276, (), [
(19, 1, None, None) , (3, 1, None, None) , (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
]
ISpPhoneticAlphabetSelection_vtables_dispatch_ = 0
ISpPhoneticAlphabetSelection_vtables_ = [
(( u'IsAlphabetUPS' , u'pfIsUPS' , ), 1610678272, (1610678272, (), [ (16387, 2, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'SetAlphabetToUPS' , u'fForceUPS' , ), 1610678273, (1610678273, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
]
ISpPhrase_vtables_dispatch_ = 0
ISpPhrase_vtables_ = [
(( u'GetPhrase' , u'ppCoMemPhrase' , ), 1610678272, (1610678272, (), [ (16420, 2, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'GetSerializedPhrase' , u'ppCoMemPhrase' , ), 1610678273, (1610678273, (), [ (16420, 2, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'GetText' , u'ulStart' , u'ulCount' , u'fUseTextReplacements' , u'ppszCoMemText' ,
u'pbDisplayAttributes' , ), 1610678274, (1610678274, (), [ (19, 1, None, None) , (19, 1, None, None) , (3, 1, None, None) ,
(16415, 2, None, None) , (16401, 18, None, None) , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( u'Discard' , u'dwValueTypes' , ), 1610678275, (1610678275, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
]
ISpPhraseAlt_vtables_dispatch_ = 0
ISpPhraseAlt_vtables_ = [
(( u'GetAltInfo' , u'ppParent' , u'pulStartElementInParent' , u'pcElementsInParent' , u'pcElementsInAlt' ,
), 1610743808, (1610743808, (), [ (16397, 2, None, "IID('{1A5C0354-B621-4B5A-8791-D306ED379E53}')") , (16403, 2, None, None) , (16403, 2, None, None) , (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Commit' , ), 1610743809, (1610743809, (), [ ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
]
ISpProperties_vtables_dispatch_ = 0
ISpProperties_vtables_ = [
(( u'SetPropertyNum' , u'pName' , u'lValue' , ), 1610678272, (1610678272, (), [ (31, 1, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'GetPropertyNum' , u'pName' , u'plValue' , ), 1610678273, (1610678273, (), [ (31, 1, None, None) ,
(16387, 2, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'SetPropertyString' , u'pName' , u'pValue' , ), 1610678274, (1610678274, (), [ (31, 1, None, None) ,
(31, 1, None, None) , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( u'GetPropertyString' , u'pName' , u'ppCoMemValue' , ), 1610678275, (1610678275, (), [ (31, 1, None, None) ,
(16415, 2, None, None) , ], 1 , 1 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
]
ISpRecoCategory_vtables_dispatch_ = 0
ISpRecoCategory_vtables_ = [
(( u'GetType' , u'peCategoryType' , ), 1610678272, (1610678272, (), [ (16387, 2, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
]
ISpRecoContext_vtables_dispatch_ = 0
ISpRecoContext_vtables_ = [
(( u'GetRecognizer' , u'ppRecognizer' , ), 1610809344, (1610809344, (), [ (16397, 2, None, "IID('{C2B5F241-DAA0-4507-9E16-5A1EAA2B7A5C}')") , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'CreateGrammar' , u'ullGrammarID' , u'ppGrammar' , ), 1610809345, (1610809345, (), [ (21, 1, None, None) ,
(16397, 2, None, "IID('{2177DB29-7F45-47D0-8554-067E91C80502}')") , ], 1 , 1 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'GetStatus' , u'pStatus' , ), 1610809346, (1610809346, (), [ (36, 2, None, None) , ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'GetMaxAlternates' , u'pcAlternates' , ), 1610809347, (1610809347, (), [ (16403, 1, None, None) , ], 1 , 1 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'SetMaxAlternates' , u'cAlternates' , ), 1610809348, (1610809348, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'SetAudioOptions' , u'Options' , u'pAudioFormatId' , u'pWaveFormatEx' , ), 1610809349, (1610809349, (), [
(3, 1, None, None) , (36, 1, None, None) , (36, 1, None, None) , ], 1 , 1 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'GetAudioOptions' , u'pOptions' , u'pAudioFormatId' , u'ppCoMemWFEX' , ), 1610809350, (1610809350, (), [
(16387, 1, None, None) , (36, 2, None, None) , (16420, 2, None, None) , ], 1 , 1 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
(( u'DeserializeResult' , u'pSerializedResult' , u'ppResult' , ), 1610809351, (1610809351, (), [ (36, 1, None, None) ,
(16397, 2, None, "IID('{20B053BE-E235-43CD-9A2A-8D17A48B7842}')") , ], 1 , 1 , 4 , 0 , 160 , (3, 0, None, None) , 0 , )),
(( u'Bookmark' , u'Options' , u'ullStreamPosition' , u'lparamEvent' , ), 1610809352, (1610809352, (), [
(3, 1, None, None) , (21, 1, None, None) , (20, 1, None, None) , ], 1 , 1 , 4 , 0 , 168 , (3, 0, None, None) , 0 , )),
(( u'SetAdaptationData' , u'pAdaptationData' , u'cch' , ), 1610809353, (1610809353, (), [ (31, 1, None, None) ,
(19, 1, None, None) , ], 1 , 1 , 4 , 0 , 176 , (3, 0, None, None) , 0 , )),
(( u'Pause' , u'dwReserved' , ), 1610809354, (1610809354, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 184 , (3, 0, None, None) , 0 , )),
(( u'Resume' , u'dwReserved' , ), 1610809355, (1610809355, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 192 , (3, 0, None, None) , 0 , )),
(( u'SetVoice' , u'pVoice' , u'fAllowFormatChanges' , ), 1610809356, (1610809356, (), [ (13, 1, None, "IID('{6C44DF74-72B9-4992-A1EC-EF996E0422D4}')") ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 200 , (3, 0, None, None) , 0 , )),
(( u'GetVoice' , u'ppVoice' , ), 1610809357, (1610809357, (), [ (16397, 2, None, "IID('{6C44DF74-72B9-4992-A1EC-EF996E0422D4}')") , ], 1 , 1 , 4 , 0 , 208 , (3, 0, None, None) , 0 , )),
(( u'SetVoicePurgeEvent' , u'ullEventInterest' , ), 1610809358, (1610809358, (), [ (21, 1, None, None) , ], 1 , 1 , 4 , 0 , 216 , (3, 0, None, None) , 0 , )),
(( u'GetVoicePurgeEvent' , u'pullEventInterest' , ), 1610809359, (1610809359, (), [ (16405, 2, None, None) , ], 1 , 1 , 4 , 0 , 224 , (3, 0, None, None) , 0 , )),
(( u'SetContextState' , u'eContextState' , ), 1610809360, (1610809360, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 232 , (3, 0, None, None) , 0 , )),
(( u'GetContextState' , u'peContextState' , ), 1610809361, (1610809361, (), [ (16387, 2, None, None) , ], 1 , 1 , 4 , 0 , 240 , (3, 0, None, None) , 0 , )),
]
ISpRecoContext2_vtables_dispatch_ = 0
ISpRecoContext2_vtables_ = [
(( u'SetGrammarOptions' , u'eGrammarOptions' , ), 1610678272, (1610678272, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'GetGrammarOptions' , u'peGrammarOptions' , ), 1610678273, (1610678273, (), [ (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'SetAdaptationData2' , u'pAdaptationData' , u'cch' , u'pTopicName' , u'eAdaptationSettings' ,
u'eRelevance' , ), 1610678274, (1610678274, (), [ (31, 1, None, None) , (19, 1, None, None) , (31, 1, None, None) ,
(19, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
]
ISpRecoGrammar_vtables_dispatch_ = 0
ISpRecoGrammar_vtables_ = [
(( u'GetGrammarId' , u'pullGrammarId' , ), 1610743808, (1610743808, (), [ (16405, 2, None, None) , ], 1 , 1 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'GetRecoContext' , u'ppRecoCtxt' , ), 1610743809, (1610743809, (), [ (16397, 2, None, "IID('{F740A62F-7C15-489E-8234-940A33D9272D}')") , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'LoadCmdFromFile' , u'pszFileName' , u'Options' , ), 1610743810, (1610743810, (), [ (31, 1, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'LoadCmdFromObject' , u'rcid' , u'pszGrammarName' , u'Options' , ), 1610743811, (1610743811, (), [
(36, 1, None, None) , (31, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'LoadCmdFromResource' , u'hModule' , u'pszResourceName' , u'pszResourceType' , u'wLanguage' ,
u'Options' , ), 1610743812, (1610743812, (), [ (16408, 1, None, None) , (31, 1, None, None) , (31, 1, None, None) ,
(18, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'LoadCmdFromMemory' , u'pGrammar' , u'Options' , ), 1610743813, (1610743813, (), [ (36, 1, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'LoadCmdFromProprietaryGrammar' , u'rguidParam' , u'pszStringParam' , u'pvDataPrarm' , u'cbDataSize' ,
u'Options' , ), 1610743814, (1610743814, (), [ (36, 1, None, None) , (31, 1, None, None) , (16408, 1, None, None) ,
(19, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'SetRuleState' , u'pszName' , u'pReserved' , u'NewState' , ), 1610743815, (1610743815, (), [
(31, 1, None, None) , (16408, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'SetRuleIdState' , u'ulRuleId' , u'NewState' , ), 1610743816, (1610743816, (), [ (19, 1, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
(( u'LoadDictation' , u'pszTopicName' , u'Options' , ), 1610743817, (1610743817, (), [ (31, 1, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 160 , (3, 0, None, None) , 0 , )),
(( u'UnloadDictation' , ), 1610743818, (1610743818, (), [ ], 1 , 1 , 4 , 0 , 168 , (3, 0, None, None) , 0 , )),
(( u'SetDictationState' , u'NewState' , ), 1610743819, (1610743819, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 176 , (3, 0, None, None) , 0 , )),
(( u'SetWordSequenceData' , u'pText' , u'cchText' , u'pInfo' , ), 1610743820, (1610743820, (), [
(16402, 1, None, None) , (19, 1, None, None) , (36, 1, None, None) , ], 1 , 1 , 4 , 0 , 184 , (3, 0, None, None) , 0 , )),
(( u'SetTextSelection' , u'pInfo' , ), 1610743821, (1610743821, (), [ (36, 1, None, None) , ], 1 , 1 , 4 , 0 , 192 , (3, 0, None, None) , 0 , )),
(( u'IsPronounceable' , u'pszWord' , u'pWordPronounceable' , ), 1610743822, (1610743822, (), [ (31, 1, None, None) ,
(16387, 2, None, None) , ], 1 , 1 , 4 , 0 , 200 , (3, 0, None, None) , 0 , )),
(( u'SetGrammarState' , u'eGrammarState' , ), 1610743823, (1610743823, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 208 , (3, 0, None, None) , 0 , )),
(( u'SaveCmd' , u'pStream' , u'ppszCoMemErrorText' , ), 1610743824, (1610743824, (), [ (13, 1, None, "IID('{0000000C-0000-0000-C000-000000000046}')") ,
(16415, 18, None, None) , ], 1 , 1 , 4 , 0 , 216 , (3, 0, None, None) , 0 , )),
(( u'GetGrammarState' , u'peGrammarState' , ), 1610743825, (1610743825, (), [ (16387, 2, None, None) , ], 1 , 1 , 4 , 0 , 224 , (3, 0, None, None) , 0 , )),
]
ISpRecoGrammar2_vtables_dispatch_ = 0
ISpRecoGrammar2_vtables_ = [
(( u'GetRules' , u'ppCoMemRules' , u'puNumRules' , ), 1610678272, (1610678272, (), [ (16420, 2, None, None) ,
(16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'LoadCmdFromFile2' , u'pszFileName' , u'Options' , u'pszSharingUri' , u'pszBaseUri' ,
), 1610678273, (1610678273, (), [ (31, 1, None, None) , (3, 1, None, None) , (31, 1, None, None) , (31, 1, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'LoadCmdFromMemory2' , u'pGrammar' , u'Options' , u'pszSharingUri' , u'pszBaseUri' ,
), 1610678274, (1610678274, (), [ (36, 1, None, None) , (3, 1, None, None) , (31, 1, None, None) , (31, 1, None, None) , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( u'SetRulePriority' , u'pszRuleName' , u'ulRuleId' , u'nRulePriority' , ), 1610678275, (1610678275, (), [
(31, 1, None, None) , (19, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
(( u'SetRuleWeight' , u'pszRuleName' , u'ulRuleId' , u'flWeight' , ), 1610678276, (1610678276, (), [
(31, 1, None, None) , (19, 1, None, None) , (4, 1, None, None) , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'SetDictationWeight' , u'flWeight' , ), 1610678277, (1610678277, (), [ (4, 1, None, None) , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'SetGrammarLoader' , u'pLoader' , ), 1610678278, (1610678278, (), [ (9, 1, None, "IID('{B9AC5783-FCD0-4B21-B119-B4F8DA8FD2C3}')") , ], 1 , 1 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'SetSMLSecurityManager' , u'pSMLSecurityManager' , ), 1610678279, (1610678279, (), [ (13, 1, None, "IID('{79EAC9EE-BAF9-11CE-8C82-00AA004BA90B}')") , ], 1 , 1 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
]
ISpRecoResult_vtables_dispatch_ = 0
ISpRecoResult_vtables_ = [
(( u'GetResultTimes' , u'pTimes' , ), 1610743808, (1610743808, (), [ (36, 2, None, None) , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'GetAlternates' , u'ulStartElement' , u'cElements' , u'ulRequestCount' , u'ppPhrases' ,
u'pcPhrasesReturned' , ), 1610743809, (1610743809, (), [ (19, 1, None, None) , (19, 1, None, None) , (19, 1, None, None) ,
(16397, 2, None, "IID('{8FCEBC98-4E49-4067-9C6C-D86A0E092E3D}')") , (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'GetAudio' , u'ulStartElement' , u'cElements' , u'ppStream' , ), 1610743810, (1610743810, (), [
(19, 1, None, None) , (19, 1, None, None) , (16397, 2, None, "IID('{BED530BE-2606-4F4D-A1C0-54C5CDA5566F}')") , ], 1 , 1 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'SpeakAudio' , u'ulStartElement' , u'cElements' , u'dwFlags' , u'pulStreamNumber' ,
), 1610743811, (1610743811, (), [ (19, 1, None, None) , (19, 1, None, None) , (19, 1, None, None) , (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'Serialize' , u'ppCoMemSerializedResult' , ), 1610743812, (1610743812, (), [ (16420, 2, None, None) , ], 1 , 1 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'ScaleAudio' , u'pAudioFormatId' , u'pWaveFormatEx' , ), 1610743813, (1610743813, (), [ (36, 1, None, None) ,
(36, 1, None, None) , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'GetRecoContext' , u'ppRecoContext' , ), 1610743814, (1610743814, (), [ (16397, 2, None, "IID('{F740A62F-7C15-489E-8234-940A33D9272D}')") , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
]
ISpRecognizer_vtables_dispatch_ = 0
ISpRecognizer_vtables_ = [
(( u'SetRecognizer' , u'pRecognizer' , ), 1610743808, (1610743808, (), [ (13, 1, None, "IID('{14056589-E16C-11D2-BB90-00C04F8EE6C0}')") , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'GetRecognizer' , u'ppRecognizer' , ), 1610743809, (1610743809, (), [ (16397, 2, None, "IID('{14056589-E16C-11D2-BB90-00C04F8EE6C0}')") , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'SetInput' , u'pUnkInput' , u'fAllowFormatChanges' , ), 1610743810, (1610743810, (), [ (13, 1, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'GetInputObjectToken' , u'ppToken' , ), 1610743811, (1610743811, (), [ (16397, 2, None, "IID('{14056589-E16C-11D2-BB90-00C04F8EE6C0}')") , ], 1 , 1 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'GetInputStream' , u'ppStream' , ), 1610743812, (1610743812, (), [ (16397, 2, None, "IID('{BED530BE-2606-4F4D-A1C0-54C5CDA5566F}')") , ], 1 , 1 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'CreateRecoContext' , u'ppNewCtxt' , ), 1610743813, (1610743813, (), [ (16397, 2, None, "IID('{F740A62F-7C15-489E-8234-940A33D9272D}')") , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'GetRecoProfile' , u'ppToken' , ), 1610743814, (1610743814, (), [ (16397, 2, None, "IID('{14056589-E16C-11D2-BB90-00C04F8EE6C0}')") , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'SetRecoProfile' , u'pToken' , ), 1610743815, (1610743815, (), [ (13, 1, None, "IID('{14056589-E16C-11D2-BB90-00C04F8EE6C0}')") , ], 1 , 1 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'IsSharedInstance' , ), 1610743816, (1610743816, (), [ ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'GetRecoState' , u'pState' , ), 1610743817, (1610743817, (), [ (16387, 2, None, None) , ], 1 , 1 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'SetRecoState' , u'NewState' , ), 1610743818, (1610743818, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'GetStatus' , u'pStatus' , ), 1610743819, (1610743819, (), [ (36, 2, None, None) , ], 1 , 1 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'GetFormat' , u'WaveFormatType' , u'pFormatId' , u'ppCoMemWFEX' , ), 1610743820, (1610743820, (), [
(3, 1, None, None) , (36, 2, None, None) , (16420, 2, None, None) , ], 1 , 1 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
(( u'IsUISupported' , u'pszTypeOfUI' , u'pvExtraData' , u'cbExtraData' , u'pfSupported' ,
), 1610743821, (1610743821, (), [ (31, 1, None, None) , (16408, 1, None, None) , (19, 1, None, None) , (16387, 2, None, None) , ], 1 , 1 , 4 , 0 , 160 , (3, 0, None, None) , 0 , )),
(( u'DisplayUI' , u'hWndParent' , u'pszTitle' , u'pszTypeOfUI' , u'pvExtraData' ,
u'cbExtraData' , ), 1610743822, (1610743822, (), [ (36, 1, None, None) , (31, 1, None, None) , (31, 1, None, None) ,
(16408, 1, None, None) , (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 168 , (3, 0, None, None) , 0 , )),
(( u'EmulateRecognition' , u'pPhrase' , ), 1610743823, (1610743823, (), [ (13, 1, None, "IID('{1A5C0354-B621-4B5A-8791-D306ED379E53}')") , ], 1 , 1 , 4 , 0 , 176 , (3, 0, None, None) , 0 , )),
]
ISpRecognizer2_vtables_dispatch_ = 0
ISpRecognizer2_vtables_ = [
(( u'EmulateRecognitionEx' , u'pPhrase' , u'dwCompareFlags' , ), 1610678272, (1610678272, (), [ (13, 1, None, "IID('{1A5C0354-B621-4B5A-8791-D306ED379E53}')") ,
(19, 1, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'SetTrainingState' , u'fDoingTraining' , u'fAdaptFromTrainingData' , ), 1610678273, (1610678273, (), [ (3, 1, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'ResetAcousticModelAdaptation' , ), 1610678274, (1610678274, (), [ ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
]
ISpRecognizer3_vtables_dispatch_ = 0
ISpRecognizer3_vtables_ = [
(( u'GetCategory' , u'categoryType' , u'ppCategory' , ), 1610678272, (1610678272, (), [ (3, 1, None, None) ,
(16397, 2, None, "IID('{DA0CD0F9-14A2-4F09-8C2A-85CC48979345}')") , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'SetActiveCategory' , u'pCategory' , ), 1610678273, (1610678273, (), [ (13, 1, None, "IID('{DA0CD0F9-14A2-4F09-8C2A-85CC48979345}')") , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'GetActiveCategory' , u'ppCategory' , ), 1610678274, (1610678274, (), [ (16397, 2, None, "IID('{DA0CD0F9-14A2-4F09-8C2A-85CC48979345}')") , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
]
ISpResourceManager_vtables_dispatch_ = 0
ISpResourceManager_vtables_ = [
(( u'SetObject' , u'guidServiceId' , u'punkObject' , ), 1610743808, (1610743808, (), [ (36, 1, None, None) ,
(13, 1, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'GetObject' , u'guidServiceId' , u'ObjectCLSID' , u'ObjectIID' , u'fReleaseWhenLastExternalRefReleased' ,
u'ppObject' , ), 1610743809, (1610743809, (), [ (36, 1, None, None) , (36, 1, None, None) , (36, 1, None, None) ,
(3, 1, None, None) , (16408, 2, None, None) , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
]
ISpSerializeState_vtables_dispatch_ = 0
ISpSerializeState_vtables_ = [
(( u'GetSerializedState' , u'ppbData' , u'pulSize' , u'dwReserved' , ), 1610678272, (1610678272, (), [
(16401, 2, None, None) , (16403, 2, None, None) , (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'SetSerializedState' , u'pbData' , u'ulSize' , u'dwReserved' , ), 1610678273, (1610678273, (), [
(16401, 1, None, None) , (19, 1, None, None) , (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
]
ISpShortcut_vtables_dispatch_ = 0
ISpShortcut_vtables_ = [
(( u'AddShortcut' , u'pszDisplay' , u'LangId' , u'pszSpoken' , u'shType' ,
), 1610678272, (1610678272, (), [ (31, 1, None, None) , (18, 1, None, None) , (31, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 24 , (3, 0, None, None) , 0 , )),
(( u'RemoveShortcut' , u'pszDisplay' , u'LangId' , u'pszSpoken' , u'shType' ,
), 1610678273, (1610678273, (), [ (31, 1, None, None) , (18, 1, None, None) , (31, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 32 , (3, 0, None, None) , 0 , )),
(( u'GetShortcuts' , u'LangId' , u'pShortcutpairList' , ), 1610678274, (1610678274, (), [ (18, 1, None, None) ,
(36, 3, None, None) , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( u'GetGeneration' , u'pdwGeneration' , ), 1610678275, (1610678275, (), [ (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
(( u'GetWordsFromGenerationChange' , u'pdwGeneration' , u'pWordList' , ), 1610678276, (1610678276, (), [ (16403, 3, None, None) ,
(36, 3, None, None) , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'GetWords' , u'pdwGeneration' , u'pdwCookie' , u'pWordList' , ), 1610678277, (1610678277, (), [
(16403, 3, None, None) , (16403, 3, None, None) , (36, 3, None, None) , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'GetShortcutsForGeneration' , u'pdwGeneration' , u'pdwCookie' , u'pShortcutpairList' , ), 1610678278, (1610678278, (), [
(16403, 3, None, None) , (16403, 3, None, None) , (36, 3, None, None) , ], 1 , 1 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'GetGenerationChange' , u'pdwGeneration' , u'pShortcutpairList' , ), 1610678279, (1610678279, (), [ (16403, 3, None, None) ,
(36, 3, None, None) , ], 1 , 1 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
]
ISpStream_vtables_dispatch_ = 0
ISpStream_vtables_ = [
(( u'SetBaseStream' , u'pStream' , u'rguidFormat' , u'pWaveFormatEx' , ), 1610874880, (1610874880, (), [
(13, 1, None, "IID('{0000000C-0000-0000-C000-000000000046}')") , (36, 1, None, None) , (36, 1, None, None) , ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'GetBaseStream' , u'ppStream' , ), 1610874881, (1610874881, (), [ (16397, 2, None, "IID('{0000000C-0000-0000-C000-000000000046}')") , ], 1 , 1 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'BindToFile' , u'pszFileName' , u'eMode' , u'pFormatId' , u'pWaveFormatEx' ,
u'ullEventInterest' , ), 1610874882, (1610874882, (), [ (31, 1, None, None) , (3, 1, None, None) , (36, 1, None, None) ,
(36, 0, None, None) , (21, 1, None, None) , ], 1 , 1 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'Close' , ), 1610874883, (1610874883, (), [ ], 1 , 1 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
]
ISpStreamFormat_vtables_dispatch_ = 0
ISpStreamFormat_vtables_ = [
(( u'GetFormat' , u'pguidFormatId' , u'ppCoMemWaveFormatEx' , ), 1610809344, (1610809344, (), [ (36, 1, None, None) ,
(16420, 2, None, None) , ], 1 , 1 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
]
ISpStreamFormatConverter_vtables_dispatch_ = 0
ISpStreamFormatConverter_vtables_ = [
(( u'SetBaseStream' , u'pStream' , u'fSetFormatToBaseStreamFormat' , u'fWriteToBaseStream' , ), 1610874880, (1610874880, (), [
(13, 1, None, "IID('{BED530BE-2606-4F4D-A1C0-54C5CDA5566F}')") , (3, 1, None, None) , (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'GetBaseStream' , u'ppStream' , ), 1610874881, (1610874881, (), [ (16397, 2, None, "IID('{BED530BE-2606-4F4D-A1C0-54C5CDA5566F}')") , ], 1 , 1 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'SetFormat' , u'rguidFormatIdOfConvertedStream' , u'pWaveFormatExOfConvertedStream' , ), 1610874882, (1610874882, (), [ (36, 1, None, None) ,
(36, 1, None, None) , ], 1 , 1 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'ResetSeekPosition' , ), 1610874883, (1610874883, (), [ ], 1 , 1 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'ScaleConvertedToBaseOffset' , u'ullOffsetConvertedStream' , u'pullOffsetBaseStream' , ), 1610874884, (1610874884, (), [ (21, 1, None, None) ,
(16405, 2, None, None) , ], 1 , 1 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
(( u'ScaleBaseToConvertedOffset' , u'ullOffsetBaseStream' , u'pullOffsetConvertedStream' , ), 1610874885, (1610874885, (), [ (21, 1, None, None) ,
(16405, 2, None, None) , ], 1 , 1 , 4 , 0 , 160 , (3, 0, None, None) , 0 , )),
]
ISpVoice_vtables_dispatch_ = 0
ISpVoice_vtables_ = [
(( u'SetOutput' , u'pUnkOutput' , u'fAllowFormatChanges' , ), 1610809344, (1610809344, (), [ (13, 1, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'GetOutputObjectToken' , u'ppObjectToken' , ), 1610809345, (1610809345, (), [ (16397, 2, None, "IID('{14056589-E16C-11D2-BB90-00C04F8EE6C0}')") , ], 1 , 1 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'GetOutputStream' , u'ppStream' , ), 1610809346, (1610809346, (), [ (16397, 2, None, "IID('{BED530BE-2606-4F4D-A1C0-54C5CDA5566F}')") , ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'Pause' , ), 1610809347, (1610809347, (), [ ], 1 , 1 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'Resume' , ), 1610809348, (1610809348, (), [ ], 1 , 1 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'SetVoice' , u'pToken' , ), 1610809349, (1610809349, (), [ (13, 1, None, "IID('{14056589-E16C-11D2-BB90-00C04F8EE6C0}')") , ], 1 , 1 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'GetVoice' , u'ppToken' , ), 1610809350, (1610809350, (), [ (16397, 2, None, "IID('{14056589-E16C-11D2-BB90-00C04F8EE6C0}')") , ], 1 , 1 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
(( u'Speak' , u'pwcs' , u'dwFlags' , u'pulStreamNumber' , ), 1610809351, (1610809351, (), [
(31, 1, None, None) , (19, 1, None, None) , (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 160 , (3, 0, None, None) , 0 , )),
(( u'SpeakStream' , u'pStream' , u'dwFlags' , u'pulStreamNumber' , ), 1610809352, (1610809352, (), [
(13, 1, None, "IID('{0000000C-0000-0000-C000-000000000046}')") , (19, 1, None, None) , (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 168 , (3, 0, None, None) , 0 , )),
(( u'GetStatus' , u'pStatus' , u'ppszLastBookmark' , ), 1610809353, (1610809353, (), [ (36, 2, None, None) ,
(16415, 2, None, None) , ], 1 , 1 , 4 , 0 , 176 , (3, 0, None, None) , 0 , )),
(( u'Skip' , u'pItemType' , u'lNumItems' , u'pulNumSkipped' , ), 1610809354, (1610809354, (), [
(31, 1, None, None) , (3, 1, None, None) , (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 184 , (3, 0, None, None) , 0 , )),
(( u'SetPriority' , u'ePriority' , ), 1610809355, (1610809355, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 192 , (3, 0, None, None) , 0 , )),
(( u'GetPriority' , u'pePriority' , ), 1610809356, (1610809356, (), [ (16387, 2, None, None) , ], 1 , 1 , 4 , 0 , 200 , (3, 0, None, None) , 0 , )),
(( u'SetAlertBoundary' , u'eBoundary' , ), 1610809357, (1610809357, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 208 , (3, 0, None, None) , 0 , )),
(( u'GetAlertBoundary' , u'peBoundary' , ), 1610809358, (1610809358, (), [ (16387, 2, None, None) , ], 1 , 1 , 4 , 0 , 216 , (3, 0, None, None) , 0 , )),
(( u'SetRate' , u'RateAdjust' , ), 1610809359, (1610809359, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 224 , (3, 0, None, None) , 0 , )),
(( u'GetRate' , u'pRateAdjust' , ), 1610809360, (1610809360, (), [ (16387, 2, None, None) , ], 1 , 1 , 4 , 0 , 232 , (3, 0, None, None) , 0 , )),
(( u'SetVolume' , u'usVolume' , ), 1610809361, (1610809361, (), [ (18, 1, None, None) , ], 1 , 1 , 4 , 0 , 240 , (3, 0, None, None) , 0 , )),
(( u'GetVolume' , u'pusVolume' , ), 1610809362, (1610809362, (), [ (16402, 2, None, None) , ], 1 , 1 , 4 , 0 , 248 , (3, 0, None, None) , 0 , )),
(( u'WaitUntilDone' , u'msTimeout' , ), 1610809363, (1610809363, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 256 , (3, 0, None, None) , 0 , )),
(( u'SetSyncSpeakTimeout' , u'msTimeout' , ), 1610809364, (1610809364, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 264 , (3, 0, None, None) , 0 , )),
(( u'GetSyncSpeakTimeout' , u'pmsTimeout' , ), 1610809365, (1610809365, (), [ (16403, 2, None, None) , ], 1 , 1 , 4 , 0 , 272 , (3, 0, None, None) , 0 , )),
(( u'SpeakCompleteEvent' , ), 1610809366, (1610809366, (), [ ], 1 , 1 , 4 , 0 , 280 , (16408, 0, None, None) , 0 , )),
(( u'IsUISupported' , u'pszTypeOfUI' , u'pvExtraData' , u'cbExtraData' , u'pfSupported' ,
), 1610809367, (1610809367, (), [ (31, 1, None, None) , (16408, 1, None, None) , (19, 1, None, None) , (16387, 2, None, None) , ], 1 , 1 , 4 , 0 , 288 , (3, 0, None, None) , 0 , )),
(( u'DisplayUI' , u'hWndParent' , u'pszTitle' , u'pszTypeOfUI' , u'pvExtraData' ,
u'cbExtraData' , ), 1610809368, (1610809368, (), [ (36, 1, None, None) , (31, 1, None, None) , (31, 1, None, None) ,
(16408, 1, None, None) , (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 296 , (3, 0, None, None) , 0 , )),
]
ISpXMLRecoResult_vtables_dispatch_ = 0
ISpXMLRecoResult_vtables_ = [
(( u'GetXMLResult' , u'ppszCoMemXMLResult' , u'Options' , ), 1610809344, (1610809344, (), [ (16415, 2, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'GetXMLErrorInfo' , u'pSemanticErrorInfo' , ), 1610809345, (1610809345, (), [ (36, 2, None, None) , ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
]
ISpeechAudio_vtables_dispatch_ = 1
ISpeechAudio_vtables_ = [
(( u'Status' , u'Status' , ), 200, (200, (), [ (16393, 10, None, "IID('{C62D9C91-7458-47F6-862D-1EF86FB0B278}')") , ], 1 , 2 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'BufferInfo' , u'BufferInfo' , ), 201, (201, (), [ (16393, 10, None, "IID('{11B103D8-1142-4EDF-A093-82FB3915F8CC}')") , ], 1 , 2 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'DefaultFormat' , u'StreamFormat' , ), 202, (202, (), [ (16393, 10, None, "IID('{E6E9C590-3E18-40E3-8299-061F98BDE7C7}')") , ], 1 , 2 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'Volume' , u'Volume' , ), 203, (203, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'Volume' , u'Volume' , ), 203, (203, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'BufferNotifySize' , u'BufferNotifySize' , ), 204, (204, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'BufferNotifySize' , u'BufferNotifySize' , ), 204, (204, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'EventHandle' , u'EventHandle' , ), 205, (205, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 152 , (3, 0, None, None) , 64 , )),
(( u'SetState' , u'State' , ), 206, (206, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 160 , (3, 0, None, None) , 64 , )),
]
ISpeechAudioBufferInfo_vtables_dispatch_ = 1
ISpeechAudioBufferInfo_vtables_ = [
(( u'MinNotification' , u'MinNotification' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'MinNotification' , u'MinNotification' , ), 1, (1, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'BufferSize' , u'BufferSize' , ), 2, (2, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'BufferSize' , u'BufferSize' , ), 2, (2, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'EventBias' , u'EventBias' , ), 3, (3, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'EventBias' , u'EventBias' , ), 3, (3, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
]
ISpeechAudioFormat_vtables_dispatch_ = 1
ISpeechAudioFormat_vtables_ = [
(( u'Type' , u'AudioFormat' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Type' , u'AudioFormat' , ), 1, (1, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'Guid' , u'Guid' , ), 2, (2, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 64 , )),
(( u'Guid' , u'Guid' , ), 2, (2, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 80 , (3, 0, None, None) , 64 , )),
(( u'GetWaveFormatEx' , u'SpeechWaveFormatEx' , ), 3, (3, (), [ (16393, 10, None, "IID('{7A1EF0D5-1581-4741-88E4-209A49F11A10}')") , ], 1 , 1 , 4 , 0 , 88 , (3, 0, None, None) , 64 , )),
(( u'SetWaveFormatEx' , u'SpeechWaveFormatEx' , ), 4, (4, (), [ (9, 1, None, "IID('{7A1EF0D5-1581-4741-88E4-209A49F11A10}')") , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 64 , )),
]
ISpeechAudioStatus_vtables_dispatch_ = 1
ISpeechAudioStatus_vtables_ = [
(( u'FreeBufferSpace' , u'FreeBufferSpace' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'NonBlockingIO' , u'NonBlockingIO' , ), 2, (2, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'State' , u'State' , ), 3, (3, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'CurrentSeekPosition' , u'CurrentSeekPosition' , ), 4, (4, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'CurrentDevicePosition' , u'CurrentDevicePosition' , ), 5, (5, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
]
ISpeechBaseStream_vtables_dispatch_ = 1
ISpeechBaseStream_vtables_ = [
(( u'Format' , u'AudioFormat' , ), 1, (1, (), [ (16393, 10, None, "IID('{E6E9C590-3E18-40E3-8299-061F98BDE7C7}')") , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Format' , u'AudioFormat' , ), 1, (1, (), [ (9, 1, None, "IID('{E6E9C590-3E18-40E3-8299-061F98BDE7C7}')") , ], 1 , 8 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'Read' , u'Buffer' , u'NumberOfBytes' , u'BytesRead' , ), 2, (2, (), [
(16396, 2, None, None) , (3, 1, None, None) , (16387, 10, None, None) , ], 1 , 1 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'Write' , u'Buffer' , u'BytesWritten' , ), 3, (3, (), [ (12, 1, None, None) ,
(16387, 10, None, None) , ], 1 , 1 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'Seek' , u'Position' , u'Origin' , u'NewPosition' , ), 4, (4, (), [
(12, 1, None, None) , (3, 49, '0', None) , (16396, 10, None, None) , ], 1 , 1 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
]
ISpeechCustomStream_vtables_dispatch_ = 1
ISpeechCustomStream_vtables_ = [
(( u'BaseStream' , u'ppUnkStream' , ), 100, (100, (), [ (16397, 10, None, None) , ], 1 , 2 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'BaseStream' , u'ppUnkStream' , ), 100, (100, (), [ (13, 1, None, None) , ], 1 , 8 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
]
ISpeechDataKey_vtables_dispatch_ = 1
ISpeechDataKey_vtables_ = [
(( u'SetBinaryValue' , u'ValueName' , u'Value' , ), 1, (1, (), [ (8, 1, None, None) ,
(12, 1, None, None) , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'GetBinaryValue' , u'ValueName' , u'Value' , ), 2, (2, (), [ (8, 1, None, None) ,
(16396, 10, None, None) , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'SetStringValue' , u'ValueName' , u'Value' , ), 3, (3, (), [ (8, 1, None, None) ,
(8, 1, None, None) , ], 1 , 1 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'GetStringValue' , u'ValueName' , u'Value' , ), 4, (4, (), [ (8, 1, None, None) ,
(16392, 10, None, None) , ], 1 , 1 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'SetLongValue' , u'ValueName' , u'Value' , ), 5, (5, (), [ (8, 1, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'GetLongValue' , u'ValueName' , u'Value' , ), 6, (6, (), [ (8, 1, None, None) ,
(16387, 10, None, None) , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'OpenKey' , u'SubKeyName' , u'SubKey' , ), 7, (7, (), [ (8, 1, None, None) ,
(16393, 10, None, "IID('{CE17C09B-4EFA-44D5-A4C9-59D9585AB0CD}')") , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'CreateKey' , u'SubKeyName' , u'SubKey' , ), 8, (8, (), [ (8, 1, None, None) ,
(16393, 10, None, "IID('{CE17C09B-4EFA-44D5-A4C9-59D9585AB0CD}')") , ], 1 , 1 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'DeleteKey' , u'SubKeyName' , ), 9, (9, (), [ (8, 1, None, None) , ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'DeleteValue' , u'ValueName' , ), 10, (10, (), [ (8, 1, None, None) , ], 1 , 1 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'EnumKeys' , u'Index' , u'SubKeyName' , ), 11, (11, (), [ (3, 1, None, None) ,
(16392, 10, None, None) , ], 1 , 1 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'EnumValues' , u'Index' , u'ValueName' , ), 12, (12, (), [ (3, 1, None, None) ,
(16392, 10, None, None) , ], 1 , 1 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
]
ISpeechFileStream_vtables_dispatch_ = 1
ISpeechFileStream_vtables_ = [
(( u'Open' , u'FileName' , u'FileMode' , u'DoEvents' , ), 100, (100, (), [
(8, 1, None, None) , (3, 49, '0', None) , (11, 49, 'False', None) , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'Close' , ), 101, (101, (), [ ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
]
ISpeechGrammarRule_vtables_dispatch_ = 1
ISpeechGrammarRule_vtables_ = [
(( u'Attributes' , u'Attributes' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'InitialState' , u'State' , ), 2, (2, (), [ (16393, 10, None, "IID('{D4286F2C-EE67-45AE-B928-28D695362EDA}')") , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'Name' , u'Name' , ), 3, (3, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'Id' , u'Id' , ), 4, (4, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'Clear' , ), 5, (5, (), [ ], 1 , 1 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'AddResource' , u'ResourceName' , u'ResourceValue' , ), 6, (6, (), [ (8, 1, None, None) ,
(8, 1, None, None) , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'AddState' , u'State' , ), 7, (7, (), [ (16393, 10, None, "IID('{D4286F2C-EE67-45AE-B928-28D695362EDA}')") , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
]
ISpeechGrammarRuleState_vtables_dispatch_ = 1
ISpeechGrammarRuleState_vtables_ = [
(( u'Rule' , u'Rule' , ), 1, (1, (), [ (16393, 10, None, "IID('{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}')") , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Transitions' , u'Transitions' , ), 2, (2, (), [ (16393, 10, None, "IID('{EABCE657-75BC-44A2-AA7F-C56476742963}')") , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'AddWordTransition' , u'DestState' , u'Words' , u'Separators' , u'Type' ,
u'PropertyName' , u'PropertyId' , u'PropertyValue' , u'Weight' , ), 3, (3, (), [
(9, 1, None, "IID('{D4286F2C-EE67-45AE-B928-28D695362EDA}')") , (8, 1, None, None) , (8, 49, "u' '", None) , (3, 49, '1', None) , (8, 49, "u''", None) ,
(3, 49, '0', None) , (16396, 49, "u''", None) , (4, 49, '1.0', None) , ], 1 , 1 , 4 , 0 , 72 , (3, 32, None, None) , 0 , )),
(( u'AddRuleTransition' , u'DestinationState' , u'Rule' , u'PropertyName' , u'PropertyId' ,
u'PropertyValue' , u'Weight' , ), 4, (4, (), [ (9, 1, None, "IID('{D4286F2C-EE67-45AE-B928-28D695362EDA}')") , (9, 1, None, "IID('{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}')") ,
(8, 49, "u''", None) , (3, 49, '0', None) , (16396, 49, "u''", None) , (4, 49, '1.0', None) , ], 1 , 1 , 4 , 0 , 80 , (3, 32, None, None) , 0 , )),
(( u'AddSpecialTransition' , u'DestinationState' , u'Type' , u'PropertyName' , u'PropertyId' ,
u'PropertyValue' , u'Weight' , ), 5, (5, (), [ (9, 1, None, "IID('{D4286F2C-EE67-45AE-B928-28D695362EDA}')") , (3, 1, None, None) ,
(8, 49, "u''", None) , (3, 49, '0', None) , (16396, 49, "u''", None) , (4, 49, '1.0', None) , ], 1 , 1 , 4 , 0 , 88 , (3, 32, None, None) , 0 , )),
]
ISpeechGrammarRuleStateTransition_vtables_dispatch_ = 1
ISpeechGrammarRuleStateTransition_vtables_ = [
(( u'Type' , u'Type' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Text' , u'Text' , ), 2, (2, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'Rule' , u'Rule' , ), 3, (3, (), [ (16393, 10, None, "IID('{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}')") , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'Weight' , u'Weight' , ), 4, (4, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'PropertyName' , u'PropertyName' , ), 5, (5, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'PropertyId' , u'PropertyId' , ), 6, (6, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'PropertyValue' , u'PropertyValue' , ), 7, (7, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'NextState' , u'NextState' , ), 8, (8, (), [ (16393, 10, None, "IID('{D4286F2C-EE67-45AE-B928-28D695362EDA}')") , ], 1 , 2 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
]
ISpeechGrammarRuleStateTransitions_vtables_dispatch_ = 1
ISpeechGrammarRuleStateTransitions_vtables_ = [
(( u'Count' , u'Count' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Item' , u'Index' , u'Transition' , ), 0, (0, (), [ (3, 1, None, None) ,
(16393, 10, None, "IID('{CAFD1DB1-41D1-4A06-9863-E2E81DA17A9A}')") , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'_NewEnum' , u'EnumVARIANT' , ), -4, (-4, (), [ (16397, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 1 , )),
]
ISpeechGrammarRules_vtables_dispatch_ = 1
ISpeechGrammarRules_vtables_ = [
(( u'Count' , u'Count' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'FindRule' , u'RuleNameOrId' , u'Rule' , ), 6, (6, (), [ (12, 1, None, None) ,
(16393, 10, None, "IID('{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}')") , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'Item' , u'Index' , u'Rule' , ), 0, (0, (), [ (3, 1, None, None) ,
(16393, 10, None, "IID('{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}')") , ], 1 , 1 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'_NewEnum' , u'EnumVARIANT' , ), -4, (-4, (), [ (16397, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 1 , )),
(( u'Dynamic' , u'Dynamic' , ), 2, (2, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'Add' , u'RuleName' , u'Attributes' , u'RuleId' , u'Rule' ,
), 3, (3, (), [ (8, 1, None, None) , (3, 1, None, None) , (3, 49, '0', None) , (16393, 10, None, "IID('{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}')") , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'Commit' , ), 4, (4, (), [ ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'CommitAndSave' , u'ErrorText' , u'SaveStream' , ), 5, (5, (), [ (16392, 2, None, None) ,
(16396, 10, None, None) , ], 1 , 1 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
]
ISpeechLexicon_vtables_dispatch_ = 1
ISpeechLexicon_vtables_ = [
(( u'GenerationId' , u'GenerationId' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 64 , )),
(( u'GetWords' , u'Flags' , u'GenerationId' , u'Words' , ), 2, (2, (), [
(3, 49, '3', None) , (16387, 50, '0', None) , (16393, 10, None, "IID('{8D199862-415E-47D5-AC4F-FAA608B424E6}')") , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'AddPronunciation' , u'bstrWord' , u'LangId' , u'PartOfSpeech' , u'bstrPronunciation' ,
), 3, (3, (), [ (8, 1, None, None) , (3, 1, None, None) , (3, 49, '0', None) , (8, 49, "u''", None) , ], 1 , 1 , 4 , 0 , 72 , (3, 32, None, None) , 0 , )),
(( u'AddPronunciationByPhoneIds' , u'bstrWord' , u'LangId' , u'PartOfSpeech' , u'PhoneIds' ,
), 4, (4, (), [ (8, 1, None, None) , (3, 1, None, None) , (3, 49, '0', None) , (16396, 49, "u''", None) , ], 1 , 1 , 4 , 0 , 80 , (3, 32, None, None) , 64 , )),
(( u'RemovePronunciation' , u'bstrWord' , u'LangId' , u'PartOfSpeech' , u'bstrPronunciation' ,
), 5, (5, (), [ (8, 1, None, None) , (3, 1, None, None) , (3, 49, '0', None) , (8, 49, "u''", None) , ], 1 , 1 , 4 , 0 , 88 , (3, 32, None, None) , 0 , )),
(( u'RemovePronunciationByPhoneIds' , u'bstrWord' , u'LangId' , u'PartOfSpeech' , u'PhoneIds' ,
), 6, (6, (), [ (8, 1, None, None) , (3, 1, None, None) , (3, 49, '0', None) , (16396, 49, "u''", None) , ], 1 , 1 , 4 , 0 , 96 , (3, 32, None, None) , 64 , )),
(( u'GetPronunciations' , u'bstrWord' , u'LangId' , u'TypeFlags' , u'ppPronunciations' ,
), 7, (7, (), [ (8, 1, None, None) , (3, 49, '0', None) , (3, 49, '3', None) , (16393, 10, None, "IID('{72829128-5682-4704-A0D4-3E2BB6F2EAD3}')") , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'GetGenerationChange' , u'GenerationId' , u'ppWords' , ), 8, (8, (), [ (16387, 3, None, None) ,
(16393, 10, None, "IID('{8D199862-415E-47D5-AC4F-FAA608B424E6}')") , ], 1 , 1 , 4 , 0 , 112 , (3, 0, None, None) , 64 , )),
]
ISpeechLexiconPronunciation_vtables_dispatch_ = 1
ISpeechLexiconPronunciation_vtables_ = [
(( u'Type' , u'LexiconType' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'LangId' , u'LangId' , ), 2, (2, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'PartOfSpeech' , u'PartOfSpeech' , ), 3, (3, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'PhoneIds' , u'PhoneIds' , ), 4, (4, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'Symbolic' , u'Symbolic' , ), 5, (5, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
]
ISpeechLexiconPronunciations_vtables_dispatch_ = 1
ISpeechLexiconPronunciations_vtables_ = [
(( u'Count' , u'Count' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Item' , u'Index' , u'Pronunciation' , ), 0, (0, (), [ (3, 1, None, None) ,
(16393, 10, None, "IID('{95252C5D-9E43-4F4A-9899-48EE73352F9F}')") , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'_NewEnum' , u'EnumVARIANT' , ), -4, (-4, (), [ (16397, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 1 , )),
]
ISpeechLexiconWord_vtables_dispatch_ = 1
ISpeechLexiconWord_vtables_ = [
(( u'LangId' , u'LangId' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Type' , u'WordType' , ), 2, (2, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'Word' , u'Word' , ), 3, (3, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'Pronunciations' , u'Pronunciations' , ), 4, (4, (), [ (16393, 10, None, "IID('{72829128-5682-4704-A0D4-3E2BB6F2EAD3}')") , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
]
ISpeechLexiconWords_vtables_dispatch_ = 1
ISpeechLexiconWords_vtables_ = [
(( u'Count' , u'Count' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Item' , u'Index' , u'Word' , ), 0, (0, (), [ (3, 1, None, None) ,
(16393, 10, None, "IID('{4E5B933C-C9BE-48ED-8842-1EE51BB1D4FF}')") , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'_NewEnum' , u'EnumVARIANT' , ), -4, (-4, (), [ (16397, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 1 , )),
]
ISpeechMMSysAudio_vtables_dispatch_ = 1
ISpeechMMSysAudio_vtables_ = [
(( u'DeviceId' , u'DeviceId' , ), 300, (300, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 168 , (3, 0, None, None) , 0 , )),
(( u'DeviceId' , u'DeviceId' , ), 300, (300, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 176 , (3, 0, None, None) , 0 , )),
(( u'LineId' , u'LineId' , ), 301, (301, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 184 , (3, 0, None, None) , 0 , )),
(( u'LineId' , u'LineId' , ), 301, (301, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 192 , (3, 0, None, None) , 0 , )),
(( u'MMHandle' , u'Handle' , ), 302, (302, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 200 , (3, 0, None, None) , 64 , )),
]
ISpeechMemoryStream_vtables_dispatch_ = 1
ISpeechMemoryStream_vtables_ = [
(( u'SetData' , u'Data' , ), 100, (100, (), [ (12, 1, None, None) , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'GetData' , u'pData' , ), 101, (101, (), [ (16396, 10, None, None) , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
]
ISpeechObjectToken_vtables_dispatch_ = 1
ISpeechObjectToken_vtables_ = [
(( u'Id' , u'ObjectId' , ), 1, (1, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'DataKey' , u'DataKey' , ), 2, (2, (), [ (16393, 10, None, "IID('{CE17C09B-4EFA-44D5-A4C9-59D9585AB0CD}')") , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 64 , )),
(( u'Category' , u'Category' , ), 3, (3, (), [ (16393, 10, None, "IID('{CA7EAC50-2D01-4145-86D4-5AE7D70F4469}')") , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'GetDescription' , u'Locale' , u'Description' , ), 4, (4, (), [ (3, 49, '0', None) ,
(16392, 10, None, None) , ], 1 , 1 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'SetId' , u'Id' , u'CategoryID' , u'CreateIfNotExist' , ), 5, (5, (), [
(8, 1, None, None) , (8, 49, "u''", None) , (11, 49, 'False', None) , ], 1 , 1 , 4 , 0 , 88 , (3, 32, None, None) , 64 , )),
(( u'GetAttribute' , u'AttributeName' , u'AttributeValue' , ), 6, (6, (), [ (8, 1, None, None) ,
(16392, 10, None, None) , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'CreateInstance' , u'pUnkOuter' , u'ClsContext' , u'Object' , ), 7, (7, (), [
(13, 49, 'None', None) , (3, 49, '23', None) , (16397, 10, None, None) , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'Remove' , u'ObjectStorageCLSID' , ), 8, (8, (), [ (8, 1, None, None) , ], 1 , 1 , 4 , 0 , 112 , (3, 0, None, None) , 64 , )),
(( u'GetStorageFileName' , u'ObjectStorageCLSID' , u'KeyName' , u'FileName' , u'Folder' ,
u'FilePath' , ), 9, (9, (), [ (8, 1, None, None) , (8, 1, None, None) , (8, 1, None, None) ,
(3, 1, None, None) , (16392, 10, None, None) , ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 64 , )),
(( u'RemoveStorageFileName' , u'ObjectStorageCLSID' , u'KeyName' , u'DeleteFile' , ), 10, (10, (), [
(8, 1, None, None) , (8, 1, None, None) , (11, 1, None, None) , ], 1 , 1 , 4 , 0 , 128 , (3, 0, None, None) , 64 , )),
(( u'IsUISupported' , u'TypeOfUI' , u'ExtraData' , u'Object' , u'Supported' ,
), 11, (11, (), [ (8, 1, None, None) , (16396, 49, "u''", None) , (13, 49, 'None', None) , (16395, 10, None, None) , ], 1 , 1 , 4 , 0 , 136 , (3, 32, None, None) , 64 , )),
(( u'DisplayUI' , u'hWnd' , u'Title' , u'TypeOfUI' , u'ExtraData' ,
u'Object' , ), 12, (12, (), [ (3, 1, None, None) , (8, 1, None, None) , (8, 1, None, None) ,
(16396, 49, "u''", None) , (13, 49, 'None', None) , ], 1 , 1 , 4 , 0 , 144 , (3, 32, None, None) , 64 , )),
(( u'MatchesAttributes' , u'Attributes' , u'Matches' , ), 13, (13, (), [ (8, 1, None, None) ,
(16395, 10, None, None) , ], 1 , 1 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
]
ISpeechObjectTokenCategory_vtables_dispatch_ = 1
ISpeechObjectTokenCategory_vtables_ = [
(( u'Id' , u'Id' , ), 1, (1, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Default' , u'TokenId' , ), 2, (2, (), [ (8, 1, None, None) , ], 1 , 4 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'Default' , u'TokenId' , ), 2, (2, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'SetId' , u'Id' , u'CreateIfNotExist' , ), 3, (3, (), [ (8, 1, None, None) ,
(11, 49, 'False', None) , ], 1 , 1 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'GetDataKey' , u'Location' , u'DataKey' , ), 4, (4, (), [ (3, 49, '0', None) ,
(16393, 10, None, "IID('{CE17C09B-4EFA-44D5-A4C9-59D9585AB0CD}')") , ], 1 , 1 , 4 , 0 , 88 , (3, 0, None, None) , 64 , )),
(( u'EnumerateTokens' , u'RequiredAttributes' , u'OptionalAttributes' , u'Tokens' , ), 5, (5, (), [
(8, 49, "u''", None) , (8, 49, "u''", None) , (16393, 10, None, "IID('{9285B776-2E7B-4BC0-B53E-580EB6FA967F}')") , ], 1 , 1 , 4 , 0 , 96 , (3, 32, None, None) , 0 , )),
]
ISpeechObjectTokens_vtables_dispatch_ = 1
ISpeechObjectTokens_vtables_ = [
(( u'Count' , u'Count' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Item' , u'Index' , u'Token' , ), 0, (0, (), [ (3, 1, None, None) ,
(16393, 10, None, "IID('{C74A3ADC-B727-4500-A84A-B526721C8B8C}')") , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'_NewEnum' , u'ppEnumVARIANT' , ), -4, (-4, (), [ (16397, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 1 , )),
]
ISpeechPhoneConverter_vtables_dispatch_ = 1
ISpeechPhoneConverter_vtables_ = [
(( u'LanguageId' , u'LanguageId' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'LanguageId' , u'LanguageId' , ), 1, (1, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'PhoneToId' , u'Phonemes' , u'IdArray' , ), 2, (2, (), [ (8, 1, None, None) ,
(16396, 10, None, None) , ], 1 , 1 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'IdToPhone' , u'IdArray' , u'Phonemes' , ), 3, (3, (), [ (12, 1, None, None) ,
(16392, 10, None, None) , ], 1 , 1 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
]
ISpeechPhraseAlternate_vtables_dispatch_ = 1
ISpeechPhraseAlternate_vtables_ = [
(( u'RecoResult' , u'RecoResult' , ), 1, (1, (), [ (16393, 10, None, "IID('{ED2879CF-CED9-4EE6-A534-DE0191D5468D}')") , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'StartElementInResult' , u'StartElement' , ), 2, (2, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'NumberOfElementsInResult' , u'NumberOfElements' , ), 3, (3, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'PhraseInfo' , u'PhraseInfo' , ), 4, (4, (), [ (16393, 10, None, "IID('{961559CF-4E67-4662-8BF0-D93F1FCD61B3}')") , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'Commit' , ), 5, (5, (), [ ], 1 , 1 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
]
ISpeechPhraseAlternates_vtables_dispatch_ = 1
ISpeechPhraseAlternates_vtables_ = [
(( u'Count' , u'Count' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Item' , u'Index' , u'PhraseAlternate' , ), 0, (0, (), [ (3, 1, None, None) ,
(16393, 10, None, "IID('{27864A2A-2B9F-4CB8-92D3-0D2722FD1E73}')") , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'_NewEnum' , u'EnumVARIANT' , ), -4, (-4, (), [ (16397, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 1 , )),
]
ISpeechPhraseElement_vtables_dispatch_ = 1
ISpeechPhraseElement_vtables_ = [
(( u'AudioTimeOffset' , u'AudioTimeOffset' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'AudioSizeTime' , u'AudioSizeTime' , ), 2, (2, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'AudioStreamOffset' , u'AudioStreamOffset' , ), 3, (3, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'AudioSizeBytes' , u'AudioSizeBytes' , ), 4, (4, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'RetainedStreamOffset' , u'RetainedStreamOffset' , ), 5, (5, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'RetainedSizeBytes' , u'RetainedSizeBytes' , ), 6, (6, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'DisplayText' , u'DisplayText' , ), 7, (7, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'LexicalForm' , u'LexicalForm' , ), 8, (8, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'Pronunciation' , u'Pronunciation' , ), 9, (9, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'DisplayAttributes' , u'DisplayAttributes' , ), 10, (10, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'RequiredConfidence' , u'RequiredConfidence' , ), 11, (11, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'ActualConfidence' , u'ActualConfidence' , ), 12, (12, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'EngineConfidence' , u'EngineConfidence' , ), 13, (13, (), [ (16388, 10, None, None) , ], 1 , 2 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
]
ISpeechPhraseElements_vtables_dispatch_ = 1
ISpeechPhraseElements_vtables_ = [
(( u'Count' , u'Count' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Item' , u'Index' , u'Element' , ), 0, (0, (), [ (3, 1, None, None) ,
(16393, 10, None, "IID('{E6176F96-E373-4801-B223-3B62C068C0B4}')") , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'_NewEnum' , u'EnumVARIANT' , ), -4, (-4, (), [ (16397, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 1 , )),
]
ISpeechPhraseInfo_vtables_dispatch_ = 1
ISpeechPhraseInfo_vtables_ = [
(( u'LanguageId' , u'LanguageId' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'GrammarId' , u'GrammarId' , ), 2, (2, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'StartTime' , u'StartTime' , ), 3, (3, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'AudioStreamPosition' , u'AudioStreamPosition' , ), 4, (4, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'AudioSizeBytes' , u'pAudioSizeBytes' , ), 5, (5, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'RetainedSizeBytes' , u'RetainedSizeBytes' , ), 6, (6, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'AudioSizeTime' , u'AudioSizeTime' , ), 7, (7, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'Rule' , u'Rule' , ), 8, (8, (), [ (16393, 10, None, "IID('{A7BFE112-A4A0-48D9-B602-C313843F6964}')") , ], 1 , 2 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'Properties' , u'Properties' , ), 9, (9, (), [ (16393, 10, None, "IID('{08166B47-102E-4B23-A599-BDB98DBFD1F4}')") , ], 1 , 2 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'Elements' , u'Elements' , ), 10, (10, (), [ (16393, 10, None, "IID('{0626B328-3478-467D-A0B3-D0853B93DDA3}')") , ], 1 , 2 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'Replacements' , u'Replacements' , ), 11, (11, (), [ (16393, 10, None, "IID('{38BC662F-2257-4525-959E-2069D2596C05}')") , ], 1 , 2 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'EngineId' , u'EngineIdGuid' , ), 12, (12, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'EnginePrivateData' , u'PrivateData' , ), 13, (13, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
(( u'SaveToMemory' , u'PhraseBlock' , ), 14, (14, (), [ (16396, 10, None, None) , ], 1 , 1 , 4 , 0 , 160 , (3, 0, None, None) , 0 , )),
(( u'GetText' , u'StartElement' , u'Elements' , u'UseReplacements' , u'Text' ,
), 15, (15, (), [ (3, 49, '0', None) , (3, 49, '-1', None) , (11, 49, 'True', None) , (16392, 10, None, None) , ], 1 , 1 , 4 , 0 , 168 , (3, 0, None, None) , 0 , )),
(( u'GetDisplayAttributes' , u'StartElement' , u'Elements' , u'UseReplacements' , u'DisplayAttributes' ,
), 16, (16, (), [ (3, 49, '0', None) , (3, 49, '-1', None) , (11, 49, 'True', None) , (16387, 10, None, None) , ], 1 , 1 , 4 , 0 , 176 , (3, 0, None, None) , 0 , )),
]
ISpeechPhraseInfoBuilder_vtables_dispatch_ = 1
ISpeechPhraseInfoBuilder_vtables_ = [
(( u'RestorePhraseFromMemory' , u'PhraseInMemory' , u'PhraseInfo' , ), 1, (1, (), [ (16396, 1, None, None) ,
(16393, 10, None, "IID('{961559CF-4E67-4662-8BF0-D93F1FCD61B3}')") , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
]
ISpeechPhraseProperties_vtables_dispatch_ = 1
ISpeechPhraseProperties_vtables_ = [
(( u'Count' , u'Count' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Item' , u'Index' , u'Property' , ), 0, (0, (), [ (3, 1, None, None) ,
(16393, 10, None, "IID('{CE563D48-961E-4732-A2E1-378A42B430BE}')") , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'_NewEnum' , u'EnumVARIANT' , ), -4, (-4, (), [ (16397, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 1 , )),
]
ISpeechPhraseProperty_vtables_dispatch_ = 1
ISpeechPhraseProperty_vtables_ = [
(( u'Name' , u'Name' , ), 1, (1, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Id' , u'Id' , ), 2, (2, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'Value' , u'Value' , ), 3, (3, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'FirstElement' , u'FirstElement' , ), 4, (4, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'NumberOfElements' , u'NumberOfElements' , ), 5, (5, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'EngineConfidence' , u'Confidence' , ), 6, (6, (), [ (16388, 10, None, None) , ], 1 , 2 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'Confidence' , u'Confidence' , ), 7, (7, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'Parent' , u'ParentProperty' , ), 8, (8, (), [ (16393, 10, None, "IID('{CE563D48-961E-4732-A2E1-378A42B430BE}')") , ], 1 , 2 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'Children' , u'Children' , ), 9, (9, (), [ (16393, 10, None, "IID('{08166B47-102E-4B23-A599-BDB98DBFD1F4}')") , ], 1 , 2 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
]
ISpeechPhraseReplacement_vtables_dispatch_ = 1
ISpeechPhraseReplacement_vtables_ = [
(( u'DisplayAttributes' , u'DisplayAttributes' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Text' , u'Text' , ), 2, (2, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'FirstElement' , u'FirstElement' , ), 3, (3, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'NumberOfElements' , u'NumberOfElements' , ), 4, (4, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
]
ISpeechPhraseReplacements_vtables_dispatch_ = 1
ISpeechPhraseReplacements_vtables_ = [
(( u'Count' , u'Count' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Item' , u'Index' , u'Reps' , ), 0, (0, (), [ (3, 1, None, None) ,
(16393, 10, None, "IID('{2890A410-53A7-4FB5-94EC-06D4998E3D02}')") , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'_NewEnum' , u'EnumVARIANT' , ), -4, (-4, (), [ (16397, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 1 , )),
]
ISpeechPhraseRule_vtables_dispatch_ = 1
ISpeechPhraseRule_vtables_ = [
(( u'Name' , u'Name' , ), 1, (1, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Id' , u'Id' , ), 2, (2, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'FirstElement' , u'FirstElement' , ), 3, (3, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'NumberOfElements' , u'NumberOfElements' , ), 4, (4, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'Parent' , u'Parent' , ), 5, (5, (), [ (16393, 10, None, "IID('{A7BFE112-A4A0-48D9-B602-C313843F6964}')") , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'Children' , u'Children' , ), 6, (6, (), [ (16393, 10, None, "IID('{9047D593-01DD-4B72-81A3-E4A0CA69F407}')") , ], 1 , 2 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'Confidence' , u'ActualConfidence' , ), 7, (7, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'EngineConfidence' , u'EngineConfidence' , ), 8, (8, (), [ (16388, 10, None, None) , ], 1 , 2 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
]
ISpeechPhraseRules_vtables_dispatch_ = 1
ISpeechPhraseRules_vtables_ = [
(( u'Count' , u'Count' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Item' , u'Index' , u'Rule' , ), 0, (0, (), [ (3, 1, None, None) ,
(16393, 10, None, "IID('{A7BFE112-A4A0-48D9-B602-C313843F6964}')") , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'_NewEnum' , u'EnumVARIANT' , ), -4, (-4, (), [ (16397, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 1 , )),
]
ISpeechRecoContext_vtables_dispatch_ = 1
ISpeechRecoContext_vtables_ = [
(( u'Recognizer' , u'Recognizer' , ), 1, (1, (), [ (16393, 10, None, "IID('{2D5F1C0C-BD75-4B08-9478-3B11FEA2586C}')") , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'AudioInputInterferenceStatus' , u'Interference' , ), 2, (2, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'RequestedUIType' , u'UIType' , ), 3, (3, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'Voice' , u'Voice' , ), 4, (4, (), [ (9, 1, None, "IID('{269316D8-57BD-11D2-9EEE-00C04F797396}')") , ], 1 , 8 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'Voice' , u'Voice' , ), 4, (4, (), [ (16393, 10, None, "IID('{269316D8-57BD-11D2-9EEE-00C04F797396}')") , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'AllowVoiceFormatMatchingOnNextSet' , u'pAllow' , ), 5, (5, (), [ (11, 1, None, None) , ], 1 , 4 , 4 , 0 , 96 , (3, 0, None, None) , 64 , )),
(( u'AllowVoiceFormatMatchingOnNextSet' , u'pAllow' , ), 5, (5, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 104 , (3, 0, None, None) , 64 , )),
(( u'VoicePurgeEvent' , u'EventInterest' , ), 6, (6, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'VoicePurgeEvent' , u'EventInterest' , ), 6, (6, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'EventInterests' , u'EventInterest' , ), 7, (7, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'EventInterests' , u'EventInterest' , ), 7, (7, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'CmdMaxAlternates' , u'MaxAlternates' , ), 8, (8, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'CmdMaxAlternates' , u'MaxAlternates' , ), 8, (8, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
(( u'State' , u'State' , ), 9, (9, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 160 , (3, 0, None, None) , 0 , )),
(( u'State' , u'State' , ), 9, (9, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 168 , (3, 0, None, None) , 0 , )),
(( u'RetainedAudio' , u'Option' , ), 10, (10, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 176 , (3, 0, None, None) , 0 , )),
(( u'RetainedAudio' , u'Option' , ), 10, (10, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 184 , (3, 0, None, None) , 0 , )),
(( u'RetainedAudioFormat' , u'Format' , ), 11, (11, (), [ (9, 1, None, "IID('{E6E9C590-3E18-40E3-8299-061F98BDE7C7}')") , ], 1 , 8 , 4 , 0 , 192 , (3, 0, None, None) , 0 , )),
(( u'RetainedAudioFormat' , u'Format' , ), 11, (11, (), [ (16393, 10, None, "IID('{E6E9C590-3E18-40E3-8299-061F98BDE7C7}')") , ], 1 , 2 , 4 , 0 , 200 , (3, 0, None, None) , 0 , )),
(( u'Pause' , ), 12, (12, (), [ ], 1 , 1 , 4 , 0 , 208 , (3, 0, None, None) , 0 , )),
(( u'Resume' , ), 13, (13, (), [ ], 1 , 1 , 4 , 0 , 216 , (3, 0, None, None) , 0 , )),
(( u'CreateGrammar' , u'GrammarId' , u'Grammar' , ), 14, (14, (), [ (12, 49, '0', None) ,
(16393, 10, None, "IID('{B6D6F79F-2158-4E50-B5BC-9A9CCD852A09}')") , ], 1 , 1 , 4 , 0 , 224 , (3, 0, None, None) , 0 , )),
(( u'CreateResultFromMemory' , u'ResultBlock' , u'Result' , ), 15, (15, (), [ (16396, 1, None, None) ,
(16393, 10, None, "IID('{ED2879CF-CED9-4EE6-A534-DE0191D5468D}')") , ], 1 , 1 , 4 , 0 , 232 , (3, 0, None, None) , 0 , )),
(( u'Bookmark' , u'Options' , u'StreamPos' , u'BookmarkId' , ), 16, (16, (), [
(3, 1, None, None) , (12, 1, None, None) , (12, 1, None, None) , ], 1 , 1 , 4 , 0 , 240 , (3, 0, None, None) , 0 , )),
(( u'SetAdaptationData' , u'AdaptationString' , ), 17, (17, (), [ (8, 1, None, None) , ], 1 , 1 , 4 , 0 , 248 , (3, 0, None, None) , 0 , )),
]
ISpeechRecoGrammar_vtables_dispatch_ = 1
ISpeechRecoGrammar_vtables_ = [
(( u'Id' , u'Id' , ), 1, (1, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'RecoContext' , u'RecoContext' , ), 2, (2, (), [ (16393, 10, None, "IID('{580AA49D-7E1E-4809-B8E2-57DA806104B8}')") , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'State' , u'State' , ), 3, (3, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'State' , u'State' , ), 3, (3, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'Rules' , u'Rules' , ), 4, (4, (), [ (16393, 10, None, "IID('{6FFA3B44-FC2D-40D1-8AFC-32911C7F1AD1}')") , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'Reset' , u'NewLanguage' , ), 5, (5, (), [ (3, 49, '0', None) , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'CmdLoadFromFile' , u'FileName' , u'LoadOption' , ), 7, (7, (), [ (8, 1, None, None) ,
(3, 49, '0', None) , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'CmdLoadFromObject' , u'ClassId' , u'GrammarName' , u'LoadOption' , ), 8, (8, (), [
(8, 1, None, None) , (8, 1, None, None) , (3, 49, '0', None) , ], 1 , 1 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'CmdLoadFromResource' , u'hModule' , u'ResourceName' , u'ResourceType' , u'LanguageId' ,
u'LoadOption' , ), 9, (9, (), [ (3, 1, None, None) , (12, 1, None, None) , (12, 1, None, None) ,
(3, 1, None, None) , (3, 49, '0', None) , ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'CmdLoadFromMemory' , u'GrammarData' , u'LoadOption' , ), 10, (10, (), [ (12, 1, None, None) ,
(3, 49, '0', None) , ], 1 , 1 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'CmdLoadFromProprietaryGrammar' , u'ProprietaryGuid' , u'ProprietaryString' , u'ProprietaryData' , u'LoadOption' ,
), 11, (11, (), [ (8, 1, None, None) , (8, 1, None, None) , (12, 1, None, None) , (3, 49, '0', None) , ], 1 , 1 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'CmdSetRuleState' , u'Name' , u'State' , ), 12, (12, (), [ (8, 1, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'CmdSetRuleIdState' , u'RuleId' , u'State' , ), 13, (13, (), [ (3, 1, None, None) ,
(3, 1, None, None) , ], 1 , 1 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
(( u'DictationLoad' , u'TopicName' , u'LoadOption' , ), 14, (14, (), [ (8, 49, "u''", None) ,
(3, 49, '0', None) , ], 1 , 1 , 4 , 0 , 160 , (3, 32, None, None) , 0 , )),
(( u'DictationUnload' , ), 15, (15, (), [ ], 1 , 1 , 4 , 0 , 168 , (3, 0, None, None) , 0 , )),
(( u'DictationSetState' , u'State' , ), 16, (16, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 176 , (3, 0, None, None) , 0 , )),
(( u'SetWordSequenceData' , u'Text' , u'TextLength' , u'Info' , ), 17, (17, (), [
(8, 1, None, None) , (3, 1, None, None) , (9, 1, None, "IID('{3B9C7E7A-6EEE-4DED-9092-11657279ADBE}')") , ], 1 , 1 , 4 , 0 , 184 , (3, 0, None, None) , 0 , )),
(( u'SetTextSelection' , u'Info' , ), 18, (18, (), [ (9, 1, None, "IID('{3B9C7E7A-6EEE-4DED-9092-11657279ADBE}')") , ], 1 , 1 , 4 , 0 , 192 , (3, 0, None, None) , 0 , )),
(( u'IsPronounceable' , u'Word' , u'WordPronounceable' , ), 19, (19, (), [ (8, 1, None, None) ,
(16387, 10, None, None) , ], 1 , 1 , 4 , 0 , 200 , (3, 0, None, None) , 0 , )),
]
ISpeechRecoResult_vtables_dispatch_ = 1
ISpeechRecoResult_vtables_ = [
(( u'RecoContext' , u'RecoContext' , ), 1, (1, (), [ (16393, 10, None, "IID('{580AA49D-7E1E-4809-B8E2-57DA806104B8}')") , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Times' , u'Times' , ), 2, (2, (), [ (16393, 10, None, "IID('{62B3B8FB-F6E7-41BE-BDCB-056B1C29EFC0}')") , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'AudioFormat' , u'Format' , ), 3, (3, (), [ (9, 1, None, "IID('{E6E9C590-3E18-40E3-8299-061F98BDE7C7}')") , ], 1 , 8 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'AudioFormat' , u'Format' , ), 3, (3, (), [ (16393, 10, None, "IID('{E6E9C590-3E18-40E3-8299-061F98BDE7C7}')") , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'PhraseInfo' , u'PhraseInfo' , ), 4, (4, (), [ (16393, 10, None, "IID('{961559CF-4E67-4662-8BF0-D93F1FCD61B3}')") , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'Alternates' , u'RequestCount' , u'StartElement' , u'Elements' , u'Alternates' ,
), 5, (5, (), [ (3, 1, None, None) , (3, 49, '0', None) , (3, 49, '-1', None) , (16393, 10, None, "IID('{B238B6D5-F276-4C3D-A6C1-2974801C3CC2}')") , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'Audio' , u'StartElement' , u'Elements' , u'Stream' , ), 6, (6, (), [
(3, 49, '0', None) , (3, 49, '-1', None) , (16393, 10, None, "IID('{EEB14B68-808B-4ABE-A5EA-B51DA7588008}')") , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'SpeakAudio' , u'StartElement' , u'Elements' , u'Flags' , u'StreamNumber' ,
), 7, (7, (), [ (3, 49, '0', None) , (3, 49, '-1', None) , (3, 49, '0', None) , (16387, 10, None, None) , ], 1 , 1 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'SaveToMemory' , u'ResultBlock' , ), 8, (8, (), [ (16396, 10, None, None) , ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'DiscardResultInfo' , u'ValueTypes' , ), 9, (9, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
]
ISpeechRecoResult2_vtables_dispatch_ = 1
ISpeechRecoResult2_vtables_ = [
(( u'SetTextFeedback' , u'Feedback' , u'WasSuccessful' , ), 12, (12, (), [ (8, 1, None, None) ,
(11, 1, None, None) , ], 1 , 1 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
]
ISpeechRecoResultDispatch_vtables_dispatch_ = 1
ISpeechRecoResultDispatch_vtables_ = [
(( u'RecoContext' , u'RecoContext' , ), 1, (1, (), [ (16393, 10, None, "IID('{580AA49D-7E1E-4809-B8E2-57DA806104B8}')") , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Times' , u'Times' , ), 2, (2, (), [ (16393, 10, None, "IID('{62B3B8FB-F6E7-41BE-BDCB-056B1C29EFC0}')") , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'AudioFormat' , u'Format' , ), 3, (3, (), [ (9, 1, None, "IID('{E6E9C590-3E18-40E3-8299-061F98BDE7C7}')") , ], 1 , 8 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'AudioFormat' , u'Format' , ), 3, (3, (), [ (16393, 10, None, "IID('{E6E9C590-3E18-40E3-8299-061F98BDE7C7}')") , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'PhraseInfo' , u'PhraseInfo' , ), 4, (4, (), [ (16393, 10, None, "IID('{961559CF-4E67-4662-8BF0-D93F1FCD61B3}')") , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'Alternates' , u'RequestCount' , u'StartElement' , u'Elements' , u'Alternates' ,
), 5, (5, (), [ (3, 1, None, None) , (3, 49, '0', None) , (3, 49, '-1', None) , (16393, 10, None, "IID('{B238B6D5-F276-4C3D-A6C1-2974801C3CC2}')") , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'Audio' , u'StartElement' , u'Elements' , u'Stream' , ), 6, (6, (), [
(3, 49, '0', None) , (3, 49, '-1', None) , (16393, 10, None, "IID('{EEB14B68-808B-4ABE-A5EA-B51DA7588008}')") , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'SpeakAudio' , u'StartElement' , u'Elements' , u'Flags' , u'StreamNumber' ,
), 7, (7, (), [ (3, 49, '0', None) , (3, 49, '-1', None) , (3, 49, '0', None) , (16387, 10, None, None) , ], 1 , 1 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'SaveToMemory' , u'ResultBlock' , ), 8, (8, (), [ (16396, 10, None, None) , ], 1 , 1 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'DiscardResultInfo' , u'ValueTypes' , ), 9, (9, (), [ (3, 1, None, None) , ], 1 , 1 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'GetXMLResult' , u'Options' , u'pResult' , ), 10, (10, (), [ (3, 1, None, None) ,
(16392, 10, None, None) , ], 1 , 1 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'GetXMLErrorInfo' , u'LineNumber' , u'ScriptLine' , u'Source' , u'Description' ,
u'ResultCode' , u'IsError' , ), 11, (11, (), [ (16387, 2, None, None) , (16392, 2, None, None) ,
(16392, 2, None, None) , (16392, 2, None, None) , (16387, 2, None, None) , (16395, 10, None, None) , ], 1 , 1 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'SetTextFeedback' , u'Feedback' , u'WasSuccessful' , ), 12, (12, (), [ (8, 1, None, None) ,
(11, 1, None, None) , ], 1 , 1 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
]
ISpeechRecoResultTimes_vtables_dispatch_ = 1
ISpeechRecoResultTimes_vtables_ = [
(( u'StreamTime' , u'Time' , ), 1, (1, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Length' , u'Length' , ), 2, (2, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'TickCount' , u'TickCount' , ), 3, (3, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'OffsetFromStart' , u'OffsetFromStart' , ), 4, (4, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
]
ISpeechRecognizer_vtables_dispatch_ = 1
ISpeechRecognizer_vtables_ = [
(( u'Recognizer' , u'Recognizer' , ), 1, (1, (), [ (9, 1, None, "IID('{C74A3ADC-B727-4500-A84A-B526721C8B8C}')") , ], 1 , 8 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Recognizer' , u'Recognizer' , ), 1, (1, (), [ (16393, 10, None, "IID('{C74A3ADC-B727-4500-A84A-B526721C8B8C}')") , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'AllowAudioInputFormatChangesOnNextSet' , u'Allow' , ), 2, (2, (), [ (11, 1, None, None) , ], 1 , 4 , 4 , 0 , 72 , (3, 0, None, None) , 64 , )),
(( u'AllowAudioInputFormatChangesOnNextSet' , u'Allow' , ), 2, (2, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 64 , )),
(( u'AudioInput' , u'AudioInput' , ), 3, (3, (), [ (9, 49, '0', "IID('{C74A3ADC-B727-4500-A84A-B526721C8B8C}')") , ], 1 , 8 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'AudioInput' , u'AudioInput' , ), 3, (3, (), [ (16393, 10, None, "IID('{C74A3ADC-B727-4500-A84A-B526721C8B8C}')") , ], 1 , 2 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'AudioInputStream' , u'AudioInputStream' , ), 4, (4, (), [ (9, 49, '0', "IID('{6450336F-7D49-4CED-8097-49D6DEE37294}')") , ], 1 , 8 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'AudioInputStream' , u'AudioInputStream' , ), 4, (4, (), [ (16393, 10, None, "IID('{6450336F-7D49-4CED-8097-49D6DEE37294}')") , ], 1 , 2 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'IsShared' , u'Shared' , ), 5, (5, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'State' , u'State' , ), 6, (6, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'State' , u'State' , ), 6, (6, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'Status' , u'Status' , ), 7, (7, (), [ (16393, 10, None, "IID('{BFF9E781-53EC-484E-BB8A-0E1B5551E35C}')") , ], 1 , 2 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'Profile' , u'Profile' , ), 8, (8, (), [ (9, 49, '0', "IID('{C74A3ADC-B727-4500-A84A-B526721C8B8C}')") , ], 1 , 8 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
(( u'Profile' , u'Profile' , ), 8, (8, (), [ (16393, 10, None, "IID('{C74A3ADC-B727-4500-A84A-B526721C8B8C}')") , ], 1 , 2 , 4 , 0 , 160 , (3, 0, None, None) , 0 , )),
(( u'EmulateRecognition' , u'TextElements' , u'ElementDisplayAttributes' , u'LanguageId' , ), 9, (9, (), [
(12, 1, None, None) , (16396, 49, "u''", None) , (3, 49, '0', None) , ], 1 , 1 , 4 , 0 , 168 , (3, 32, None, None) , 0 , )),
(( u'CreateRecoContext' , u'NewContext' , ), 10, (10, (), [ (16393, 10, None, "IID('{580AA49D-7E1E-4809-B8E2-57DA806104B8}')") , ], 1 , 1 , 4 , 0 , 176 , (3, 0, None, None) , 0 , )),
(( u'GetFormat' , u'Type' , u'Format' , ), 11, (11, (), [ (3, 1, None, None) ,
(16393, 10, None, "IID('{E6E9C590-3E18-40E3-8299-061F98BDE7C7}')") , ], 1 , 1 , 4 , 0 , 184 , (3, 0, None, None) , 0 , )),
(( u'SetPropertyNumber' , u'Name' , u'Value' , u'Supported' , ), 12, (12, (), [
(8, 1, None, None) , (3, 1, None, None) , (16395, 10, None, None) , ], 1 , 1 , 4 , 0 , 192 , (3, 0, None, None) , 64 , )),
(( u'GetPropertyNumber' , u'Name' , u'Value' , u'Supported' , ), 13, (13, (), [
(8, 1, None, None) , (16387, 3, None, None) , (16395, 10, None, None) , ], 1 , 1 , 4 , 0 , 200 , (3, 0, None, None) , 64 , )),
(( u'SetPropertyString' , u'Name' , u'Value' , u'Supported' , ), 14, (14, (), [
(8, 1, None, None) , (8, 1, None, None) , (16395, 10, None, None) , ], 1 , 1 , 4 , 0 , 208 , (3, 0, None, None) , 64 , )),
(( u'GetPropertyString' , u'Name' , u'Value' , u'Supported' , ), 15, (15, (), [
(8, 1, None, None) , (16392, 3, None, None) , (16395, 10, None, None) , ], 1 , 1 , 4 , 0 , 216 , (3, 0, None, None) , 64 , )),
(( u'IsUISupported' , u'TypeOfUI' , u'ExtraData' , u'Supported' , ), 16, (16, (), [
(8, 1, None, None) , (16396, 49, "u''", None) , (16395, 10, None, None) , ], 1 , 1 , 4 , 0 , 224 , (3, 32, None, None) , 0 , )),
(( u'DisplayUI' , u'hWndParent' , u'Title' , u'TypeOfUI' , u'ExtraData' ,
), 17, (17, (), [ (3, 1, None, None) , (8, 1, None, None) , (8, 1, None, None) , (16396, 49, "u''", None) , ], 1 , 1 , 4 , 0 , 232 , (3, 32, None, None) , 0 , )),
(( u'GetRecognizers' , u'RequiredAttributes' , u'OptionalAttributes' , u'ObjectTokens' , ), 18, (18, (), [
(8, 49, "u''", None) , (8, 49, "u''", None) , (16393, 10, None, "IID('{9285B776-2E7B-4BC0-B53E-580EB6FA967F}')") , ], 1 , 1 , 4 , 0 , 240 , (3, 32, None, None) , 0 , )),
(( u'GetAudioInputs' , u'RequiredAttributes' , u'OptionalAttributes' , u'ObjectTokens' , ), 19, (19, (), [
(8, 49, "u''", None) , (8, 49, "u''", None) , (16393, 10, None, "IID('{9285B776-2E7B-4BC0-B53E-580EB6FA967F}')") , ], 1 , 1 , 4 , 0 , 248 , (3, 32, None, None) , 0 , )),
(( u'GetProfiles' , u'RequiredAttributes' , u'OptionalAttributes' , u'ObjectTokens' , ), 20, (20, (), [
(8, 49, "u''", None) , (8, 49, "u''", None) , (16393, 10, None, "IID('{9285B776-2E7B-4BC0-B53E-580EB6FA967F}')") , ], 1 , 1 , 4 , 0 , 256 , (3, 32, None, None) , 0 , )),
]
ISpeechRecognizerStatus_vtables_dispatch_ = 1
ISpeechRecognizerStatus_vtables_ = [
(( u'AudioStatus' , u'AudioStatus' , ), 1, (1, (), [ (16393, 10, None, "IID('{C62D9C91-7458-47F6-862D-1EF86FB0B278}')") , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'CurrentStreamPosition' , u'pCurrentStreamPos' , ), 2, (2, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'CurrentStreamNumber' , u'StreamNumber' , ), 3, (3, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'NumberOfActiveRules' , u'NumberOfActiveRules' , ), 4, (4, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'ClsidEngine' , u'ClsidEngine' , ), 5, (5, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'SupportedLanguages' , u'SupportedLanguages' , ), 6, (6, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
]
ISpeechResourceLoader_vtables_dispatch_ = 1
ISpeechResourceLoader_vtables_ = [
(( u'LoadResource' , u'bstrResourceUri' , u'fAlwaysReload' , u'pStream' , u'pbstrMIMEType' ,
u'pfModified' , u'pbstrRedirectUrl' , ), 1, (1, (), [ (8, 1, None, None) , (11, 1, None, None) ,
(16397, 2, None, None) , (16392, 2, None, None) , (16395, 2, None, None) , (16392, 2, None, None) , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'GetLocalCopy' , u'bstrResourceUri' , u'pbstrLocalPath' , u'pbstrMIMEType' , u'pbstrRedirectUrl' ,
), 2, (2, (), [ (8, 1, None, None) , (16392, 2, None, None) , (16392, 2, None, None) , (16392, 2, None, None) , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'ReleaseLocalCopy' , u'pbstrLocalPath' , ), 3, (3, (), [ (8, 1, None, None) , ], 1 , 1 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
]
ISpeechTextSelectionInformation_vtables_dispatch_ = 1
ISpeechTextSelectionInformation_vtables_ = [
(( u'ActiveOffset' , u'ActiveOffset' , ), 1, (1, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'ActiveOffset' , u'ActiveOffset' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'ActiveLength' , u'ActiveLength' , ), 2, (2, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'ActiveLength' , u'ActiveLength' , ), 2, (2, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'SelectionOffset' , u'SelectionOffset' , ), 3, (3, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'SelectionOffset' , u'SelectionOffset' , ), 3, (3, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'SelectionLength' , u'SelectionLength' , ), 4, (4, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'SelectionLength' , u'SelectionLength' , ), 4, (4, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
]
ISpeechVoice_vtables_dispatch_ = 1
ISpeechVoice_vtables_ = [
(( u'Status' , u'Status' , ), 1, (1, (), [ (16393, 10, None, "IID('{8BE47B07-57F6-11D2-9EEE-00C04F797396}')") , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Voice' , u'Voice' , ), 2, (2, (), [ (16393, 10, None, "IID('{C74A3ADC-B727-4500-A84A-B526721C8B8C}')") , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'Voice' , u'Voice' , ), 2, (2, (), [ (9, 1, None, "IID('{C74A3ADC-B727-4500-A84A-B526721C8B8C}')") , ], 1 , 8 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'AudioOutput' , u'AudioOutput' , ), 3, (3, (), [ (16393, 10, None, "IID('{C74A3ADC-B727-4500-A84A-B526721C8B8C}')") , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'AudioOutput' , u'AudioOutput' , ), 3, (3, (), [ (9, 1, None, "IID('{C74A3ADC-B727-4500-A84A-B526721C8B8C}')") , ], 1 , 8 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'AudioOutputStream' , u'AudioOutputStream' , ), 4, (4, (), [ (16393, 10, None, "IID('{6450336F-7D49-4CED-8097-49D6DEE37294}')") , ], 1 , 2 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'AudioOutputStream' , u'AudioOutputStream' , ), 4, (4, (), [ (9, 1, None, "IID('{6450336F-7D49-4CED-8097-49D6DEE37294}')") , ], 1 , 8 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'Rate' , u'Rate' , ), 5, (5, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'Rate' , u'Rate' , ), 5, (5, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'Volume' , u'Volume' , ), 6, (6, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'Volume' , u'Volume' , ), 6, (6, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'AllowAudioOutputFormatChangesOnNextSet' , u'Allow' , ), 7, (7, (), [ (11, 1, None, None) , ], 1 , 4 , 4 , 0 , 144 , (3, 0, None, None) , 64 , )),
(( u'AllowAudioOutputFormatChangesOnNextSet' , u'Allow' , ), 7, (7, (), [ (16395, 10, None, None) , ], 1 , 2 , 4 , 0 , 152 , (3, 0, None, None) , 64 , )),
(( u'EventInterests' , u'EventInterestFlags' , ), 8, (8, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 160 , (3, 0, None, None) , 0 , )),
(( u'EventInterests' , u'EventInterestFlags' , ), 8, (8, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 168 , (3, 0, None, None) , 0 , )),
(( u'Priority' , u'Priority' , ), 9, (9, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 176 , (3, 0, None, None) , 0 , )),
(( u'Priority' , u'Priority' , ), 9, (9, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 184 , (3, 0, None, None) , 0 , )),
(( u'AlertBoundary' , u'Boundary' , ), 10, (10, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 192 , (3, 0, None, None) , 0 , )),
(( u'AlertBoundary' , u'Boundary' , ), 10, (10, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 200 , (3, 0, None, None) , 0 , )),
(( u'SynchronousSpeakTimeout' , u'msTimeout' , ), 11, (11, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 208 , (3, 0, None, None) , 0 , )),
(( u'SynchronousSpeakTimeout' , u'msTimeout' , ), 11, (11, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 216 , (3, 0, None, None) , 0 , )),
(( u'Speak' , u'Text' , u'Flags' , u'StreamNumber' , ), 12, (12, (), [
(8, 1, None, None) , (3, 49, '0', None) , (16387, 10, None, None) , ], 1 , 1 , 4 , 0 , 224 , (3, 0, None, None) , 0 , )),
(( u'SpeakStream' , u'Stream' , u'Flags' , u'StreamNumber' , ), 13, (13, (), [
(9, 1, None, "IID('{6450336F-7D49-4CED-8097-49D6DEE37294}')") , (3, 49, '0', None) , (16387, 10, None, None) , ], 1 , 1 , 4 , 0 , 232 , (3, 0, None, None) , 0 , )),
(( u'Pause' , ), 14, (14, (), [ ], 1 , 1 , 4 , 0 , 240 , (3, 0, None, None) , 0 , )),
(( u'Resume' , ), 15, (15, (), [ ], 1 , 1 , 4 , 0 , 248 , (3, 0, None, None) , 0 , )),
(( u'Skip' , u'Type' , u'NumItems' , u'NumSkipped' , ), 16, (16, (), [
(8, 1, None, None) , (3, 1, None, None) , (16387, 10, None, None) , ], 1 , 1 , 4 , 0 , 256 , (3, 0, None, None) , 0 , )),
(( u'GetVoices' , u'RequiredAttributes' , u'OptionalAttributes' , u'ObjectTokens' , ), 17, (17, (), [
(8, 49, "u''", None) , (8, 49, "u''", None) , (16393, 10, None, "IID('{9285B776-2E7B-4BC0-B53E-580EB6FA967F}')") , ], 1 , 1 , 4 , 0 , 264 , (3, 32, None, None) , 0 , )),
(( u'GetAudioOutputs' , u'RequiredAttributes' , u'OptionalAttributes' , u'ObjectTokens' , ), 18, (18, (), [
(8, 49, "u''", None) , (8, 49, "u''", None) , (16393, 10, None, "IID('{9285B776-2E7B-4BC0-B53E-580EB6FA967F}')") , ], 1 , 1 , 4 , 0 , 272 , (3, 32, None, None) , 0 , )),
(( u'WaitUntilDone' , u'msTimeout' , u'Done' , ), 19, (19, (), [ (3, 1, None, None) ,
(16395, 10, None, None) , ], 1 , 1 , 4 , 0 , 280 , (3, 0, None, None) , 0 , )),
(( u'SpeakCompleteEvent' , u'Handle' , ), 20, (20, (), [ (16387, 10, None, None) , ], 1 , 1 , 4 , 0 , 288 , (3, 0, None, None) , 64 , )),
(( u'IsUISupported' , u'TypeOfUI' , u'ExtraData' , u'Supported' , ), 21, (21, (), [
(8, 1, None, None) , (16396, 49, "u''", None) , (16395, 10, None, None) , ], 1 , 1 , 4 , 0 , 296 , (3, 32, None, None) , 0 , )),
(( u'DisplayUI' , u'hWndParent' , u'Title' , u'TypeOfUI' , u'ExtraData' ,
), 22, (22, (), [ (3, 1, None, None) , (8, 1, None, None) , (8, 1, None, None) , (16396, 49, "u''", None) , ], 1 , 1 , 4 , 0 , 304 , (3, 32, None, None) , 0 , )),
]
ISpeechVoiceStatus_vtables_dispatch_ = 1
ISpeechVoiceStatus_vtables_ = [
(( u'CurrentStreamNumber' , u'StreamNumber' , ), 1, (1, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'LastStreamNumberQueued' , u'StreamNumber' , ), 2, (2, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'LastHResult' , u'HResult' , ), 3, (3, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'RunningState' , u'State' , ), 4, (4, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'InputWordPosition' , u'Position' , ), 5, (5, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'InputWordLength' , u'Length' , ), 6, (6, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'InputSentencePosition' , u'Position' , ), 7, (7, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'InputSentenceLength' , u'Length' , ), 8, (8, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'LastBookmark' , u'Bookmark' , ), 9, (9, (), [ (16392, 10, None, None) , ], 1 , 2 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'LastBookmarkId' , u'BookmarkId' , ), 10, (10, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 128 , (3, 0, None, None) , 64 , )),
(( u'PhonemeId' , u'PhoneId' , ), 11, (11, (), [ (16386, 10, None, None) , ], 1 , 2 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'VisemeId' , u'VisemeId' , ), 12, (12, (), [ (16386, 10, None, None) , ], 1 , 2 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
]
ISpeechWaveFormatEx_vtables_dispatch_ = 1
ISpeechWaveFormatEx_vtables_ = [
(( u'FormatTag' , u'FormatTag' , ), 1, (1, (), [ (16386, 10, None, None) , ], 1 , 2 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'FormatTag' , u'FormatTag' , ), 1, (1, (), [ (2, 1, None, None) , ], 1 , 4 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'Channels' , u'Channels' , ), 2, (2, (), [ (16386, 10, None, None) , ], 1 , 2 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'Channels' , u'Channels' , ), 2, (2, (), [ (2, 1, None, None) , ], 1 , 4 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'SamplesPerSec' , u'SamplesPerSec' , ), 3, (3, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'SamplesPerSec' , u'SamplesPerSec' , ), 3, (3, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'AvgBytesPerSec' , u'AvgBytesPerSec' , ), 4, (4, (), [ (16387, 10, None, None) , ], 1 , 2 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
(( u'AvgBytesPerSec' , u'AvgBytesPerSec' , ), 4, (4, (), [ (3, 1, None, None) , ], 1 , 4 , 4 , 0 , 112 , (3, 0, None, None) , 0 , )),
(( u'BlockAlign' , u'BlockAlign' , ), 5, (5, (), [ (16386, 10, None, None) , ], 1 , 2 , 4 , 0 , 120 , (3, 0, None, None) , 0 , )),
(( u'BlockAlign' , u'BlockAlign' , ), 5, (5, (), [ (2, 1, None, None) , ], 1 , 4 , 4 , 0 , 128 , (3, 0, None, None) , 0 , )),
(( u'BitsPerSample' , u'BitsPerSample' , ), 6, (6, (), [ (16386, 10, None, None) , ], 1 , 2 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'BitsPerSample' , u'BitsPerSample' , ), 6, (6, (), [ (2, 1, None, None) , ], 1 , 4 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
(( u'ExtraData' , u'ExtraData' , ), 7, (7, (), [ (16396, 10, None, None) , ], 1 , 2 , 4 , 0 , 152 , (3, 0, None, None) , 0 , )),
(( u'ExtraData' , u'ExtraData' , ), 7, (7, (), [ (12, 1, None, None) , ], 1 , 4 , 4 , 0 , 160 , (3, 0, None, None) , 0 , )),
]
ISpeechXMLRecoResult_vtables_dispatch_ = 1
ISpeechXMLRecoResult_vtables_ = [
(( u'GetXMLResult' , u'Options' , u'pResult' , ), 10, (10, (), [ (3, 1, None, None) ,
(16392, 10, None, None) , ], 1 , 1 , 4 , 0 , 136 , (3, 0, None, None) , 0 , )),
(( u'GetXMLErrorInfo' , u'LineNumber' , u'ScriptLine' , u'Source' , u'Description' ,
u'ResultCode' , u'IsError' , ), 11, (11, (), [ (16387, 2, None, None) , (16392, 2, None, None) ,
(16392, 2, None, None) , (16392, 2, None, None) , (16387, 2, None, None) , (16395, 10, None, None) , ], 1 , 1 , 4 , 0 , 144 , (3, 0, None, None) , 0 , )),
]
IStream_vtables_dispatch_ = 0
IStream_vtables_ = [
(( u'RemoteSeek' , u'dlibMove' , u'dwOrigin' , u'plibNewPosition' , ), 1610743808, (1610743808, (), [
(36, 1, None, None) , (19, 1, None, None) , (36, 2, None, None) , ], 1 , 1 , 4 , 0 , 40 , (3, 0, None, None) , 0 , )),
(( u'SetSize' , u'libNewSize' , ), 1610743809, (1610743809, (), [ (36, 1, None, None) , ], 1 , 1 , 4 , 0 , 48 , (3, 0, None, None) , 0 , )),
(( u'RemoteCopyTo' , u'pstm' , u'cb' , u'pcbRead' , u'pcbWritten' ,
), 1610743810, (1610743810, (), [ (13, 1, None, "IID('{0000000C-0000-0000-C000-000000000046}')") , (36, 1, None, None) , (36, 2, None, None) , (36, 2, None, None) , ], 1 , 1 , 4 , 0 , 56 , (3, 0, None, None) , 0 , )),
(( u'Commit' , u'grfCommitFlags' , ), 1610743811, (1610743811, (), [ (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 64 , (3, 0, None, None) , 0 , )),
(( u'Revert' , ), 1610743812, (1610743812, (), [ ], 1 , 1 , 4 , 0 , 72 , (3, 0, None, None) , 0 , )),
(( u'LockRegion' , u'libOffset' , u'cb' , u'dwLockType' , ), 1610743813, (1610743813, (), [
(36, 1, None, None) , (36, 1, None, None) , (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 80 , (3, 0, None, None) , 0 , )),
(( u'UnlockRegion' , u'libOffset' , u'cb' , u'dwLockType' , ), 1610743814, (1610743814, (), [
(36, 1, None, None) , (36, 1, None, None) , (19, 1, None, None) , ], 1 , 1 , 4 , 0 , 88 , (3, 0, None, None) , 0 , )),
(( u'Stat' , u'pstatstg' , u'grfStatFlag' , ), 1610743815, (1610743815, (), [ (36, 2, None, None) ,
(19, 1, None, None) , ], 1 , 1 , 4 , 0 , 96 , (3, 0, None, None) , 0 , )),
(( u'Clone' , u'ppstm' , ), 1610743816, (1610743816, (), [ (16397, 2, None, "IID('{0000000C-0000-0000-C000-000000000046}')") , ], 1 , 1 , 4 , 0 , 104 , (3, 0, None, None) , 0 , )),
]
RecordMap = {
###u'SPRULE': '{00000000-0000-0000-0000-000000000000}', # Record disabled because it doesn't have a non-null GUID
}
CLSIDToClassMap = {
'{CFF8E175-019E-11D3-A08E-00C04F8EF9B5}' : ISpeechAudio,
'{7013943A-E2EC-11D2-A086-00C04F8EF9B5}' : SpStreamFormatConverter,
'{3BEE4890-4FE9-4A37-8C1E-5E7E12791C1F}' : SpSharedRecognizer,
'{9185F743-1143-4C28-86B5-BFF14F20E5C8}' : SpPhoneConverter,
'{7A1EF0D5-1581-4741-88E4-209A49F11A10}' : ISpeechWaveFormatEx,
'{947812B3-2AE1-4644-BA86-9E90DED7EC91}' : SpFileStream,
'{0F92030A-CBFD-4AB8-A164-FF5985547FF6}' : SpTextSelectionInformation,
'{EABCE657-75BC-44A2-AA7F-C56476742963}' : ISpeechGrammarRuleStateTransitions,
'{CAFD1DB1-41D1-4A06-9863-E2E81DA17A9A}' : ISpeechGrammarRuleStateTransition,
'{1A9E9F4F-104F-4DB8-A115-EFD7FD0C97AE}' : ISpeechCustomStream,
'{41B89B6B-9399-11D2-9623-00C04F8EE628}' : SpInprocRecognizer,
'{4F414126-DFE3-4629-99EE-797978317EAD}' : SpPhoneticAlphabetConverter,
'{7B8FCB42-0E9D-4F00-A048-7B04D6179D3D}' : _ISpeechRecoContextEvents,
'{9EF96870-E160-4792-820D-48CF0649E4EC}' : SpAudioFormat,
'{9047D593-01DD-4B72-81A3-E4A0CA69F407}' : ISpeechPhraseRules,
'{0D722F1A-9FCF-4E62-96D8-6DF8F01A26AA}' : SpShortcut,
'{AF67F125-AB39-4E93-B4A2-CC2E66E182A7}' : ISpeechFileStream,
'{0626B328-3478-467D-A0B3-D0853B93DDA3}' : ISpeechPhraseElements,
'{27864A2A-2B9F-4CB8-92D3-0D2722FD1E73}' : ISpeechPhraseAlternate,
'{715D9C59-4442-11D2-9605-00C04F8EE628}' : SpStream,
'{269316D8-57BD-11D2-9EEE-00C04F797396}' : ISpeechVoice,
'{6450336F-7D49-4CED-8097-49D6DEE37294}' : ISpeechBaseStream,
'{3C76AF6D-1FD7-4831-81D1-3B71D5A13C44}' : ISpeechMMSysAudio,
'{4E5B933C-C9BE-48ED-8842-1EE51BB1D4FF}' : ISpeechLexiconWord,
'{08166B47-102E-4B23-A599-BDB98DBFD1F4}' : ISpeechPhraseProperties,
'{47206204-5ECA-11D2-960F-00C04F8EE628}' : SpSharedRecoContext,
'{62B3B8FB-F6E7-41BE-BDCB-056B1C29EFC0}' : ISpeechRecoResultTimes,
'{E6E9C590-3E18-40E3-8299-061F98BDE7C7}' : ISpeechAudioFormat,
'{8E0A246D-D3C8-45DE-8657-04290C458C3C}' : ISpeechRecoResult2,
'{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}' : ISpeechGrammarRule,
'{D4286F2C-EE67-45AE-B928-28D695362EDA}' : ISpeechGrammarRuleState,
'{E6176F96-E373-4801-B223-3B62C068C0B4}' : ISpeechPhraseElement,
'{2890A410-53A7-4FB5-94EC-06D4998E3D02}' : ISpeechPhraseReplacement,
'{8BE47B07-57F6-11D2-9EEE-00C04F797396}' : ISpeechVoiceStatus,
'{38BC662F-2257-4525-959E-2069D2596C05}' : ISpeechPhraseReplacements,
'{EF411752-3736-4CB4-9C8C-8EF4CCB58EFE}' : SpObjectToken,
'{580AA49D-7E1E-4809-B8E2-57DA806104B8}' : ISpeechRecoContext,
'{8DBEF13F-1948-4AA8-8CF0-048EEBED95D8}' : SpCustomStream,
'{3DA7627A-C7AE-4B23-8708-638C50362C25}' : ISpeechLexicon,
'{72829128-5682-4704-A0D4-3E2BB6F2EAD3}' : ISpeechLexiconPronunciations,
'{AAEC54AF-8F85-4924-944D-B79D39D72E19}' : ISpeechXMLRecoResult,
'{A8C680EB-3D32-11D2-9EE7-00C04F797396}' : SpMMAudioOut,
'{C79A574C-63BE-44B9-801F-283F87F898BE}' : SpWaveFormatEx,
'{2D5F1C0C-BD75-4B08-9478-3B11FEA2586C}' : ISpeechRecognizer,
'{6FFA3B44-FC2D-40D1-8AFC-32911C7F1AD1}' : ISpeechGrammarRules,
'{B9AC5783-FCD0-4B21-B119-B4F8DA8FD2C3}' : ISpeechResourceLoader,
'{C9E37C15-DF92-4727-85D6-72E5EEB6995A}' : SpUnCompressedLexicon,
'{C74A3ADC-B727-4500-A84A-B526721C8B8C}' : ISpeechObjectToken,
'{CF3D2E50-53F2-11D2-960C-00C04F8EE628}' : SpMMAudioIn,
'{ED2879CF-CED9-4EE6-A534-DE0191D5468D}' : ISpeechRecoResult,
'{A910187F-0C7A-45AC-92CC-59EDAFB77B53}' : SpObjectTokenCategory,
'{EEB14B68-808B-4ABE-A5EA-B51DA7588008}' : ISpeechMemoryStream,
'{90903716-2F42-11D3-9C26-00C04F8EF87C}' : SpCompressedLexicon,
'{CE563D48-961E-4732-A2E1-378A42B430BE}' : ISpeechPhraseProperty,
'{73AD6842-ACE0-45E8-A4DD-8795881A2C2A}' : SpInProcRecoContext,
'{9285B776-2E7B-4BC0-B53E-580EB6FA967F}' : ISpeechObjectTokens,
'{3B9C7E7A-6EEE-4DED-9092-11657279ADBE}' : ISpeechTextSelectionInformation,
'{B238B6D5-F276-4C3D-A6C1-2974801C3CC2}' : ISpeechPhraseAlternates,
'{8D199862-415E-47D5-AC4F-FAA608B424E6}' : ISpeechLexiconWords,
'{B6D6F79F-2158-4E50-B5BC-9A9CCD852A09}' : ISpeechRecoGrammar,
'{A372ACD1-3BEF-4BBD-8FFB-CB3E2B416AF8}' : _ISpeechVoiceEvents,
'{BFF9E781-53EC-484E-BB8A-0E1B5551E35C}' : ISpeechRecognizerStatus,
'{C62D9C91-7458-47F6-862D-1EF86FB0B278}' : ISpeechAudioStatus,
'{6D60EB64-ACED-40A6-BBF3-4E557F71DEE2}' : ISpeechRecoResultDispatch,
'{CE17C09B-4EFA-44D5-A4C9-59D9585AB0CD}' : ISpeechDataKey,
'{E2AE5372-5D40-11D2-960E-00C04F8EE628}' : SpNotifyTranslator,
'{455F24E9-7396-4A16-9715-7C0FDBE3EFE3}' : SpNullPhoneConverter,
'{95252C5D-9E43-4F4A-9899-48EE73352F9F}' : ISpeechLexiconPronunciation,
'{CA7EAC50-2D01-4145-86D4-5AE7D70F4469}' : ISpeechObjectTokenCategory,
'{C23FC28D-C55F-4720-8B32-91F73C2BD5D1}' : SpPhraseInfoBuilder,
'{3B151836-DF3A-4E0A-846C-D2ADC9334333}' : ISpeechPhraseInfoBuilder,
'{C3E4F353-433F-43D6-89A1-6A62A7054C3D}' : ISpeechPhoneConverter,
'{AB1890A0-E91F-11D2-BB91-00C04F8EE6C0}' : SpMMAudioEnum,
'{961559CF-4E67-4662-8BF0-D93F1FCD61B3}' : ISpeechPhraseInfo,
'{0655E396-25D0-11D3-9C26-00C04F8EF87C}' : SpLexicon,
'{A7BFE112-A4A0-48D9-B602-C313843F6964}' : ISpeechPhraseRule,
'{96749373-3391-11D2-9EE3-00C04F797396}' : SpResourceManager,
'{11B103D8-1142-4EDF-A093-82FB3915F8CC}' : ISpeechAudioBufferInfo,
'{96749377-3391-11D2-9EE3-00C04F797396}' : SpVoice,
'{5FB7EF7D-DFF4-468A-B6B7-2FCBD188F994}' : SpMemoryStream,
}
CLSIDToPackageMap = {}
win32com.client.CLSIDToClass.RegisterCLSIDsFromDict( CLSIDToClassMap )
VTablesToPackageMap = {}
VTablesToClassMap = {
'{CFF8E175-019E-11D3-A08E-00C04F8EF9B5}' : 'ISpeechAudio',
'{5B4FB971-B115-4DE1-AD97-E482E3BF6EE4}' : 'ISpProperties',
'{8FC6D974-C81E-4098-93C5-0147F61ED4D3}' : 'ISpRecognizer2',
'{C05C768F-FAE8-4EC2-8E07-338321C12452}' : 'ISpAudio',
'{7A1EF0D5-1581-4741-88E4-209A49F11A10}' : 'ISpeechWaveFormatEx',
'{4B37BC9E-9ED6-44A3-93D3-18F022B79EC3}' : 'ISpRecoGrammar2',
'{20B053BE-E235-43CD-9A2A-8D17A48B7842}' : 'ISpRecoResult',
'{EABCE657-75BC-44A2-AA7F-C56476742963}' : 'ISpeechGrammarRuleStateTransitions',
'{ACA16614-5D3D-11D2-960E-00C04F8EE628}' : 'ISpNotifyTranslator',
'{CAFD1DB1-41D1-4A06-9863-E2E81DA17A9A}' : 'ISpeechGrammarRuleStateTransition',
'{1A9E9F4F-104F-4DB8-A115-EFD7FD0C97AE}' : 'ISpeechCustomStream',
'{2D3D3845-39AF-4850-BBF9-40B49780011D}' : 'ISpObjectTokenCategory',
'{BE7A9CC9-5F9E-11D2-960F-00C04F8EE628}' : 'ISpEventSink',
'{B2745EFD-42CE-48CA-81F1-A96E02538A90}' : 'ISpPhoneticAlphabetSelection',
'{BE7A9CCE-5F9E-11D2-960F-00C04F8EE628}' : 'ISpEventSource',
'{DF1B943C-5838-4AA2-8706-D7CD5B333499}' : 'ISpRecognizer3',
'{5EFF4AEF-8487-11D2-961C-00C04F8EE628}' : 'ISpNotifySource',
'{9047D593-01DD-4B72-81A3-E4A0CA69F407}' : 'ISpeechPhraseRules',
'{21B501A0-0EC7-46C9-92C3-A2BC784C54B9}' : 'ISpSerializeState',
'{79EAC9ED-BAF9-11CE-8C82-00AA004BA90B}' : 'IInternetSecurityMgrSite',
'{79EAC9EE-BAF9-11CE-8C82-00AA004BA90B}' : 'IInternetSecurityManager',
'{15806F6E-1D70-4B48-98E6-3B1A007509AB}' : 'ISpMMSysAudio',
'{AF67F125-AB39-4E93-B4A2-CC2E66E182A7}' : 'ISpeechFileStream',
'{0626B328-3478-467D-A0B3-D0853B93DDA3}' : 'ISpeechPhraseElements',
'{27864A2A-2B9F-4CB8-92D3-0D2722FD1E73}' : 'ISpeechPhraseAlternate',
'{269316D8-57BD-11D2-9EEE-00C04F797396}' : 'ISpeechVoice',
'{259684DC-37C3-11D2-9603-00C04F8EE628}' : 'ISpNotifySink',
'{6450336F-7D49-4CED-8097-49D6DEE37294}' : 'ISpeechBaseStream',
'{8FCEBC98-4E49-4067-9C6C-D86A0E092E3D}' : 'ISpPhraseAlt',
'{3C76AF6D-1FD7-4831-81D1-3B71D5A13C44}' : 'ISpeechMMSysAudio',
'{5B559F40-E952-11D2-BB91-00C04F8EE6C0}' : 'ISpObjectWithToken',
'{4E5B933C-C9BE-48ED-8842-1EE51BB1D4FF}' : 'ISpeechLexiconWord',
'{133ADCD4-19B4-4020-9FDC-842E78253B17}' : 'ISpPhoneticAlphabetConverter',
'{08166B47-102E-4B23-A599-BDB98DBFD1F4}' : 'ISpeechPhraseProperties',
'{BEAD311C-52FF-437F-9464-6B21054CA73D}' : 'ISpRecoContext2',
'{62B3B8FB-F6E7-41BE-BDCB-056B1C29EFC0}' : 'ISpeechRecoResultTimes',
'{E6E9C590-3E18-40E3-8299-061F98BDE7C7}' : 'ISpeechAudioFormat',
'{8E0A246D-D3C8-45DE-8657-04290C458C3C}' : 'ISpeechRecoResult2',
'{0000000C-0000-0000-C000-000000000046}' : 'IStream',
'{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}' : 'ISpeechGrammarRule',
'{93384E18-5014-43D5-ADBB-A78E055926BD}' : 'ISpResourceManager',
'{12E3CCA9-7518-44C5-A5E7-BA5A79CB929E}' : 'ISpStream',
'{D4286F2C-EE67-45AE-B928-28D695362EDA}' : 'ISpeechGrammarRuleState',
'{E6176F96-E373-4801-B223-3B62C068C0B4}' : 'ISpeechPhraseElement',
'{2890A410-53A7-4FB5-94EC-06D4998E3D02}' : 'ISpeechPhraseReplacement',
'{8137828F-591A-4A42-BE58-49EA7EBAAC68}' : 'ISpGrammarBuilder',
'{1A5C0354-B621-4B5A-8791-D306ED379E53}' : 'ISpPhrase',
'{8BE47B07-57F6-11D2-9EEE-00C04F797396}' : 'ISpeechVoiceStatus',
'{38BC662F-2257-4525-959E-2069D2596C05}' : 'ISpeechPhraseReplacements',
'{580AA49D-7E1E-4809-B8E2-57DA806104B8}' : 'ISpeechRecoContext',
'{3DA7627A-C7AE-4B23-8708-638C50362C25}' : 'ISpeechLexicon',
'{72829128-5682-4704-A0D4-3E2BB6F2EAD3}' : 'ISpeechLexiconPronunciations',
'{AAEC54AF-8F85-4924-944D-B79D39D72E19}' : 'ISpeechXMLRecoResult',
'{AE39362B-45A8-4074-9B9E-CCF49AA2D0B6}' : 'ISpXMLRecoResult',
'{2D5F1C0C-BD75-4B08-9478-3B11FEA2586C}' : 'ISpeechRecognizer',
'{6FFA3B44-FC2D-40D1-8AFC-32911C7F1AD1}' : 'ISpeechGrammarRules',
'{C74A3ADC-B727-4500-A84A-B526721C8B8C}' : 'ISpeechObjectToken',
'{C2B5F241-DAA0-4507-9E16-5A1EAA2B7A5C}' : 'ISpRecognizer',
'{BED530BE-2606-4F4D-A1C0-54C5CDA5566F}' : 'ISpStreamFormat',
'{ED2879CF-CED9-4EE6-A534-DE0191D5468D}' : 'ISpeechRecoResult',
'{6C44DF74-72B9-4992-A1EC-EF996E0422D4}' : 'ISpVoice',
'{EEB14B68-808B-4ABE-A5EA-B51DA7588008}' : 'ISpeechMemoryStream',
'{CE563D48-961E-4732-A2E1-378A42B430BE}' : 'ISpeechPhraseProperty',
'{9285B776-2E7B-4BC0-B53E-580EB6FA967F}' : 'ISpeechObjectTokens',
'{3B9C7E7A-6EEE-4DED-9092-11657279ADBE}' : 'ISpeechTextSelectionInformation',
'{DA0CD0F9-14A2-4F09-8C2A-85CC48979345}' : 'ISpRecoCategory',
'{3DF681E2-EA56-11D9-8BDE-F66BAD1E3F3A}' : 'ISpShortcut',
'{B238B6D5-F276-4C3D-A6C1-2974801C3CC2}' : 'ISpeechPhraseAlternates',
'{8D199862-415E-47D5-AC4F-FAA608B424E6}' : 'ISpeechLexiconWords',
'{6D5140C1-7436-11CE-8034-00AA006009FA}' : 'IServiceProvider',
'{B6D6F79F-2158-4E50-B5BC-9A9CCD852A09}' : 'ISpeechRecoGrammar',
'{06B64F9E-7FDA-11D2-B4F2-00C04F797396}' : 'IEnumSpObjectTokens',
'{BFF9E781-53EC-484E-BB8A-0E1B5551E35C}' : 'ISpeechRecognizerStatus',
'{C62D9C91-7458-47F6-862D-1EF86FB0B278}' : 'ISpeechAudioStatus',
'{2177DB29-7F45-47D0-8554-067E91C80502}' : 'ISpRecoGrammar',
'{6D60EB64-ACED-40A6-BBF3-4E557F71DEE2}' : 'ISpeechRecoResultDispatch',
'{CE17C09B-4EFA-44D5-A4C9-59D9585AB0CD}' : 'ISpeechDataKey',
'{95252C5D-9E43-4F4A-9899-48EE73352F9F}' : 'ISpeechLexiconPronunciation',
'{CA7EAC50-2D01-4145-86D4-5AE7D70F4469}' : 'ISpeechObjectTokenCategory',
'{8445C581-0CAC-4A38-ABFE-9B2CE2826455}' : 'ISpPhoneConverter',
'{3B151836-DF3A-4E0A-846C-D2ADC9334333}' : 'ISpeechPhraseInfoBuilder',
'{00000101-0000-0000-C000-000000000046}' : 'IEnumString',
'{C3E4F353-433F-43D6-89A1-6A62A7054C3D}' : 'ISpeechPhoneConverter',
'{0C733A30-2A1C-11CE-ADE5-00AA0044773D}' : 'ISequentialStream',
'{678A932C-EA71-4446-9B41-78FDA6280A29}' : 'ISpStreamFormatConverter',
'{961559CF-4E67-4662-8BF0-D93F1FCD61B3}' : 'ISpeechPhraseInfo',
'{A7BFE112-A4A0-48D9-B602-C313843F6964}' : 'ISpeechPhraseRule',
'{DA41A7C2-5383-4DB2-916B-6C1719E3DB58}' : 'ISpLexicon',
'{11B103D8-1142-4EDF-A093-82FB3915F8CC}' : 'ISpeechAudioBufferInfo',
'{14056581-E16C-11D2-BB90-00C04F8EE6C0}' : 'ISpDataKey',
'{B9AC5783-FCD0-4B21-B119-B4F8DA8FD2C3}' : 'ISpeechResourceLoader',
'{F740A62F-7C15-489E-8234-940A33D9272D}' : 'ISpRecoContext',
'{14056589-E16C-11D2-BB90-00C04F8EE6C0}' : 'ISpObjectToken',
}
NamesToIIDMap = {
'ISpeechRecoResult' : '{ED2879CF-CED9-4EE6-A534-DE0191D5468D}',
'ISpRecoCategory' : '{DA0CD0F9-14A2-4F09-8C2A-85CC48979345}',
'ISpeechPhraseProperties' : '{08166B47-102E-4B23-A599-BDB98DBFD1F4}',
'ISpeechPhraseRules' : '{9047D593-01DD-4B72-81A3-E4A0CA69F407}',
'ISpeechMemoryStream' : '{EEB14B68-808B-4ABE-A5EA-B51DA7588008}',
'ISpeechAudioStatus' : '{C62D9C91-7458-47F6-862D-1EF86FB0B278}',
'ISpEventSink' : '{BE7A9CC9-5F9E-11D2-960F-00C04F8EE628}',
'ISpMMSysAudio' : '{15806F6E-1D70-4B48-98E6-3B1A007509AB}',
'ISpeechGrammarRule' : '{AFE719CF-5DD1-44F2-999C-7A399F1CFCCC}',
'ISpeechVoice' : '{269316D8-57BD-11D2-9EEE-00C04F797396}',
'ISpNotifySink' : '{259684DC-37C3-11D2-9603-00C04F8EE628}',
'ISpeechPhraseInfo' : '{961559CF-4E67-4662-8BF0-D93F1FCD61B3}',
'ISpPhrase' : '{1A5C0354-B621-4B5A-8791-D306ED379E53}',
'IEnumString' : '{00000101-0000-0000-C000-000000000046}',
'ISpProperties' : '{5B4FB971-B115-4DE1-AD97-E482E3BF6EE4}',
'ISpeechGrammarRuleState' : '{D4286F2C-EE67-45AE-B928-28D695362EDA}',
'ISpeechPhraseReplacements' : '{38BC662F-2257-4525-959E-2069D2596C05}',
'ISpeechAudio' : '{CFF8E175-019E-11D3-A08E-00C04F8EF9B5}',
'ISpeechLexiconPronunciations' : '{72829128-5682-4704-A0D4-3E2BB6F2EAD3}',
'ISpNotifyTranslator' : '{ACA16614-5D3D-11D2-960E-00C04F8EE628}',
'ISpeechWaveFormatEx' : '{7A1EF0D5-1581-4741-88E4-209A49F11A10}',
'ISpeechAudioBufferInfo' : '{11B103D8-1142-4EDF-A093-82FB3915F8CC}',
'ISpRecognizer2' : '{8FC6D974-C81E-4098-93C5-0147F61ED4D3}',
'ISpRecognizer3' : '{DF1B943C-5838-4AA2-8706-D7CD5B333499}',
'ISpeechPhraseElement' : '{E6176F96-E373-4801-B223-3B62C068C0B4}',
'IStream' : '{0000000C-0000-0000-C000-000000000046}',
'ISpEventSource' : '{BE7A9CCE-5F9E-11D2-960F-00C04F8EE628}',
'ISpRecoResult' : '{20B053BE-E235-43CD-9A2A-8D17A48B7842}',
'ISpRecoGrammar2' : '{4B37BC9E-9ED6-44A3-93D3-18F022B79EC3}',
'ISpeechRecoResultTimes' : '{62B3B8FB-F6E7-41BE-BDCB-056B1C29EFC0}',
'ISpNotifySource' : '{5EFF4AEF-8487-11D2-961C-00C04F8EE628}',
'ISpeechObjectTokens' : '{9285B776-2E7B-4BC0-B53E-580EB6FA967F}',
'ISpRecognizer' : '{C2B5F241-DAA0-4507-9E16-5A1EAA2B7A5C}',
'ISpRecoGrammar' : '{2177DB29-7F45-47D0-8554-067E91C80502}',
'ISpeechRecoResult2' : '{8E0A246D-D3C8-45DE-8657-04290C458C3C}',
'ISpObjectTokenCategory' : '{2D3D3845-39AF-4850-BBF9-40B49780011D}',
'ISpStreamFormatConverter' : '{678A932C-EA71-4446-9B41-78FDA6280A29}',
'ISpeechGrammarRules' : '{6FFA3B44-FC2D-40D1-8AFC-32911C7F1AD1}',
'ISpeechLexiconWords' : '{8D199862-415E-47D5-AC4F-FAA608B424E6}',
'IEnumSpObjectTokens' : '{06B64F9E-7FDA-11D2-B4F2-00C04F797396}',
'ISpeechTextSelectionInformation' : '{3B9C7E7A-6EEE-4DED-9092-11657279ADBE}',
'ISpPhoneticAlphabetSelection' : '{B2745EFD-42CE-48CA-81F1-A96E02538A90}',
'ISpeechRecoGrammar' : '{B6D6F79F-2158-4E50-B5BC-9A9CCD852A09}',
'ISpeechPhraseAlternate' : '{27864A2A-2B9F-4CB8-92D3-0D2722FD1E73}',
'ISpeechMMSysAudio' : '{3C76AF6D-1FD7-4831-81D1-3B71D5A13C44}',
'ISpeechRecoContext' : '{580AA49D-7E1E-4809-B8E2-57DA806104B8}',
'ISpeechLexiconPronunciation' : '{95252C5D-9E43-4F4A-9899-48EE73352F9F}',
'ISpeechPhraseReplacement' : '{2890A410-53A7-4FB5-94EC-06D4998E3D02}',
'ISpeechGrammarRuleStateTransitions' : '{EABCE657-75BC-44A2-AA7F-C56476742963}',
'ISpRecoContext2' : '{BEAD311C-52FF-437F-9464-6B21054CA73D}',
'ISpPhoneConverter' : '{8445C581-0CAC-4A38-ABFE-9B2CE2826455}',
'IInternetSecurityManager' : '{79EAC9EE-BAF9-11CE-8C82-00AA004BA90B}',
'ISpLexicon' : '{DA41A7C2-5383-4DB2-916B-6C1719E3DB58}',
'ISpeechRecognizer' : '{2D5F1C0C-BD75-4B08-9478-3B11FEA2586C}',
'ISpeechPhoneConverter' : '{C3E4F353-433F-43D6-89A1-6A62A7054C3D}',
'ISequentialStream' : '{0C733A30-2A1C-11CE-ADE5-00AA0044773D}',
'ISpStream' : '{12E3CCA9-7518-44C5-A5E7-BA5A79CB929E}',
'ISpeechLexiconWord' : '{4E5B933C-C9BE-48ED-8842-1EE51BB1D4FF}',
'ISpRecoContext' : '{F740A62F-7C15-489E-8234-940A33D9272D}',
'ISpeechVoiceStatus' : '{8BE47B07-57F6-11D2-9EEE-00C04F797396}',
'ISpeechPhraseProperty' : '{CE563D48-961E-4732-A2E1-378A42B430BE}',
'ISpeechPhraseRule' : '{A7BFE112-A4A0-48D9-B602-C313843F6964}',
'ISpeechPhraseElements' : '{0626B328-3478-467D-A0B3-D0853B93DDA3}',
'ISpPhoneticAlphabetConverter' : '{133ADCD4-19B4-4020-9FDC-842E78253B17}',
'ISpeechCustomStream' : '{1A9E9F4F-104F-4DB8-A115-EFD7FD0C97AE}',
'ISpStreamFormat' : '{BED530BE-2606-4F4D-A1C0-54C5CDA5566F}',
'ISpeechXMLRecoResult' : '{AAEC54AF-8F85-4924-944D-B79D39D72E19}',
'ISpeechPhraseAlternates' : '{B238B6D5-F276-4C3D-A6C1-2974801C3CC2}',
'ISpShortcut' : '{3DF681E2-EA56-11D9-8BDE-F66BAD1E3F3A}',
'ISpeechPhraseInfoBuilder' : '{3B151836-DF3A-4E0A-846C-D2ADC9334333}',
'ISpPhraseAlt' : '{8FCEBC98-4E49-4067-9C6C-D86A0E092E3D}',
'ISpGrammarBuilder' : '{8137828F-591A-4A42-BE58-49EA7EBAAC68}',
'ISpVoice' : '{6C44DF74-72B9-4992-A1EC-EF996E0422D4}',
'ISpDataKey' : '{14056581-E16C-11D2-BB90-00C04F8EE6C0}',
'ISpXMLRecoResult' : '{AE39362B-45A8-4074-9B9E-CCF49AA2D0B6}',
'ISpeechResourceLoader' : '{B9AC5783-FCD0-4B21-B119-B4F8DA8FD2C3}',
'ISpeechObjectTokenCategory' : '{CA7EAC50-2D01-4145-86D4-5AE7D70F4469}',
'ISpResourceManager' : '{93384E18-5014-43D5-ADBB-A78E055926BD}',
'IInternetSecurityMgrSite' : '{79EAC9ED-BAF9-11CE-8C82-00AA004BA90B}',
'ISpeechLexicon' : '{3DA7627A-C7AE-4B23-8708-638C50362C25}',
'ISpeechAudioFormat' : '{E6E9C590-3E18-40E3-8299-061F98BDE7C7}',
'ISpeechBaseStream' : '{6450336F-7D49-4CED-8097-49D6DEE37294}',
'ISpeechRecognizerStatus' : '{BFF9E781-53EC-484E-BB8A-0E1B5551E35C}',
'ISpeechDataKey' : '{CE17C09B-4EFA-44D5-A4C9-59D9585AB0CD}',
'_ISpeechVoiceEvents' : '{A372ACD1-3BEF-4BBD-8FFB-CB3E2B416AF8}',
'ISpeechRecoResultDispatch' : '{6D60EB64-ACED-40A6-BBF3-4E557F71DEE2}',
'_ISpeechRecoContextEvents' : '{7B8FCB42-0E9D-4F00-A048-7B04D6179D3D}',
'ISpeechGrammarRuleStateTransition' : '{CAFD1DB1-41D1-4A06-9863-E2E81DA17A9A}',
'ISpObjectToken' : '{14056589-E16C-11D2-BB90-00C04F8EE6C0}',
'IServiceProvider' : '{6D5140C1-7436-11CE-8034-00AA006009FA}',
'ISpAudio' : '{C05C768F-FAE8-4EC2-8E07-338321C12452}',
'ISpeechFileStream' : '{AF67F125-AB39-4E93-B4A2-CC2E66E182A7}',
'ISpSerializeState' : '{21B501A0-0EC7-46C9-92C3-A2BC784C54B9}',
'ISpObjectWithToken' : '{5B559F40-E952-11D2-BB91-00C04F8EE6C0}',
'ISpeechObjectToken' : '{C74A3ADC-B727-4500-A84A-B526721C8B8C}',
}
win32com.client.constants.__dicts__.append(constants.__dict__)
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/win32com/gen_py/C866CA3A-32F7-11D2-9602-00C04F8EE628x0x5x4.py
|
Python
|
gpl-3.0
| 308,803 | 0.056677 |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.nestedset import NestedSet, get_root_of
class SupplierGroup(NestedSet):
nsm_parent_field = 'parent_supplier_group'
def validate(self):
if not self.parent_supplier_group:
self.parent_supplier_group = get_root_of("Supplier Group")
def on_update(self):
NestedSet.on_update(self)
self.validate_one_root()
def on_trash(self):
NestedSet.validate_if_child_exists(self)
frappe.utils.nestedset.update_nsm(self)
|
ovresko/erpnext
|
erpnext/setup/doctype/supplier_group/supplier_group.py
|
Python
|
gpl-3.0
| 639 | 0.017214 |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 21:31:53 2015
Create random synthetic velocity profile + linear first guesses
@author: alex
"""
import random
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError, msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
plt.close('all')
random.seed(2)
X = range(256)
Z = np.linspace(0,1000,256)
pos = 50
posVpVs = 1.7
L = np.array([pos])
vpvs = np.array([posVpVs])
for x in X[1:]:
pos += random.choice((-0.9,1)) #random.choice((-1,1))
posVpVs += random.choice((-0.02,0.02))
L=np.append(L,pos)
vpvs = np.append(vpvs,posVpVs)
L=70*L
Vp = savitzky_golay(L, 51, 3) # window size 51, polynomial order 3
A = np.array([ Z, np.ones(256)])
Vs = Vp/savitzky_golay(vpvs, 51, 3) # window size 51, polynomial order 3
w = np.linalg.lstsq(A.T,Vp)[0] # obtaining the parameters
# plotting the line
lineP = w[0]*Z+w[1]+500 # regression line
w = np.linalg.lstsq(A.T,Vs)[0] # obtaining the parameters
# plotting the line
lineS = w[0]*Z+w[1]-250 # regression line
plt.figure()
plt.hold(True)
plt.plot(L,Z,label="Random walk")
plt.plot(Vp,Z,linewidth=4,label="P wave velocity from this random walk")
plt.plot(lineP,Z,linewidth=4,label="First guess")
ax = plt.axes()
ax.set_ylim(Z[0],Z[-1])
ax.invert_yaxis()
plt.legend()
plt.figure()
plt.hold(True)
plt.plot(vpvs,Z,linewidth=4,label="Random walk vp/vs")
plt.legend()
ax = plt.axes()
ax.set_ylim(Z[0],Z[-1])
ax.invert_yaxis()
plt.figure()
plt.hold(True)
plt.plot(Vs,Z,linewidth=4,label="S wave velocity from random vp/vs")
plt.plot(lineS,Z,linewidth=4,label="First guess")
plt.legend()
ax = plt.axes()
ax.set_ylim(Z[0],Z[-1])
ax.invert_yaxis()
# Save profiles
np.savetxt("dataExample/realProfileP.txt",np.dstack((Z,Vp))[0])
np.savetxt("dataExample/realProfileS.txt",np.dstack((Z,Vs))[0])
np.savetxt("dataExample/firstGuessP.txt",np.dstack((Z,lineP))[0])
np.savetxt("dataExample/firstGuessS.txt",np.dstack((Z,lineS))[0])
#####################################################################
coordShotsX=[300,500]
coordShotsY=[400]
coordShotsZ=[650]
coordStatsX=[200,300,400,500,600]
coordStatsY=[200,300,400,500,600]
coordStatsZ=[200,300,400,500,600]
Xshots=[]
Yshots=[]
Zshots=[]
Xstats=[]
Ystats=[]
Zstats=[]
#Open a file in write mode:
fo = open("dataExample/coordShots.txt", "w+")
for coordX in coordShotsX:
for coordY in coordShotsY:
for coordZ in coordShotsZ:
Xshots.append(coordX)
Yshots.append(coordY)
Zshots.append(coordZ)
fo.write(str(coordX)+" "+str(coordY)+" "+str(coordZ)+"\n")
# Close opened file
fo.close()
#Open a file in write mode:
fo = open("dataExample/coordStats.txt", "w+")
for coordX in coordStatsX:
for coordY in coordStatsY:
for coordZ in coordStatsZ:
Xstats.append(coordX)
Ystats.append(coordY)
Zstats.append(coordZ)
fo.write(str(coordX)+" "+str(coordY)+" "+str(coordZ)+"\n")
# Close opened file
fo.close()
fig = plt.figure()
ax = fig.gca(projection='3d') #Axes3D(fig)
ax.hold(True)
ax.scatter(Xstats,Ystats,Zstats,zdir='z',s=20,c='b')
if (len(coordShotsX) > 3):
ax.scatter(Xshots,Yshots,Zshots,zdir='z',s=20,c='r',marker='^')
else:
ax.scatter(Xshots,Yshots,Zshots,zdir='z',s=200,c='r',marker='^')
ax.set_xlim3d(min(min(Xshots),min(Xstats))-100,max(max(Xshots),max(Xstats))+100)
ax.set_ylim3d(min(min(Yshots),min(Ystats))-100,max(max(Yshots),max(Ystats))+100)
ax.set_zlim3d(min(min(Zshots),min(Zstats))-100,max(max(Zshots),max(Zstats))+100)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
ax.set_xlabel('X (m)')
ax.set_ylabel('Y (m)')
ax.set_zlabel('Z (m)')
ax.set_title('Geometry')
ax.invert_zaxis()
|
bottero/IMCMCrun
|
examples/Synthetic1/createSyntheticRandomProfile.py
|
Python
|
mit
| 6,749 | 0.017188 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import collections
import os
import stat
import shutil
import errno
import sys
import inspect
import imp
import re
import traceback
import tempfile
import json
from contextlib import contextmanager
from six import string_types
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from types import ModuleType
import yaml
import llnl.util.lang
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp, install
import spack.config
import spack.caches
import spack.error
import spack.spec
from spack.provider_index import ProviderIndex
from spack.util.path import canonicalize_path
from spack.util.naming import NamespaceTrie, valid_module_name
from spack.util.naming import mod_to_class, possible_spack_module_names
#: Super-namespace for all packages.
#: Package modules are imported as spack.pkg.<namespace>.<pkg-name>.
repo_namespace = 'spack.pkg'
#
# These names describe how repos should be laid out in the filesystem.
#
repo_config_name = 'repo.yaml' # Top-level filename for repo config.
repo_index_name = 'index.yaml' # Top-level filename for repository index.
packages_dir_name = 'packages' # Top-level repo directory containing pkgs.
package_file_name = 'package.py' # Filename for packages in a repository.
#: Guaranteed unused default value for some functions.
NOT_PROVIDED = object()
#: Code in ``_package_prepend`` is prepended to imported packages.
#:
#: Spack packages were originally expected to call `from spack import *`
#: themselves, but it became difficult to manage and imports in the Spack
#: core the top-level namespace polluted by package symbols this way. To
#: solve this, the top-level ``spack`` package contains very few symbols
#: of its own, and importing ``*`` is essentially a no-op. The common
#: routines and directives that packages need are now in ``spack.pkgkit``,
#: and the import system forces packages to automatically include
#: this. This way, old packages that call ``from spack import *`` will
#: continue to work without modification, but it's no longer required.
#:
#: TODO: At some point in the future, consider removing ``from spack import *``
#: TODO: from packages and shifting to from ``spack.pkgkit import *``
_package_prepend = 'from spack.pkgkit import *'
def _autospec(function):
"""Decorator that automatically converts the argument of a single-arg
function to a Spec."""
def converter(self, spec_like, *args, **kwargs):
if not isinstance(spec_like, spack.spec.Spec):
spec_like = spack.spec.Spec(spec_like)
return function(self, spec_like, *args, **kwargs)
return converter
class SpackNamespace(ModuleType):
""" Allow lazy loading of modules."""
def __init__(self, namespace):
super(SpackNamespace, self).__init__(namespace)
self.__file__ = "(spack namespace)"
self.__path__ = []
self.__name__ = namespace
self.__package__ = namespace
self.__modules = {}
def __getattr__(self, name):
"""Getattr lazily loads modules if they're not already loaded."""
submodule = self.__package__ + '.' + name
setattr(self, name, __import__(submodule))
return getattr(self, name)
class FastPackageChecker(Mapping):
"""Cache that maps package names to the stats obtained on the
'package.py' files associated with them.
For each repository a cache is maintained at class level, and shared among
all instances referring to it. Update of the global cache is done lazily
during instance initialization.
"""
#: Global cache, reused by every instance
_paths_cache = {}
def __init__(self, packages_path):
# The path of the repository managed by this instance
self.packages_path = packages_path
# If the cache we need is not there yet, then build it appropriately
if packages_path not in self._paths_cache:
self._paths_cache[packages_path] = self._create_new_cache()
#: Reference to the appropriate entry in the global cache
self._packages_to_stats = self._paths_cache[packages_path]
def _create_new_cache(self):
"""Create a new cache for packages in a repo.
The implementation here should try to minimize filesystem
calls. At the moment, it is O(number of packages) and makes
about one stat call per package. This is reasonably fast, and
avoids actually importing packages in Spack, which is slow.
"""
# Create a dictionary that will store the mapping between a
# package name and its stat info
cache = {}
for pkg_name in os.listdir(self.packages_path):
# Skip non-directories in the package root.
pkg_dir = os.path.join(self.packages_path, pkg_name)
# Warn about invalid names that look like packages.
if not valid_module_name(pkg_name):
msg = 'Skipping package at {0}. '
msg += '"{1}" is not a valid Spack module name.'
tty.warn(msg.format(pkg_dir, pkg_name))
continue
# Construct the file name from the directory
pkg_file = os.path.join(
self.packages_path, pkg_name, package_file_name
)
# Use stat here to avoid lots of calls to the filesystem.
try:
sinfo = os.stat(pkg_file)
except OSError as e:
if e.errno == errno.ENOENT:
# No package.py file here.
continue
elif e.errno == errno.EACCES:
tty.warn("Can't read package file %s." % pkg_file)
continue
raise e
# If it's not a file, skip it.
if stat.S_ISDIR(sinfo.st_mode):
continue
# If it is a file, then save the stats under the
# appropriate key
cache[pkg_name] = sinfo
return cache
def __getitem__(self, item):
return self._packages_to_stats[item]
def __iter__(self):
return iter(self._packages_to_stats)
def __len__(self):
return len(self._packages_to_stats)
class TagIndex(Mapping):
"""Maps tags to list of packages."""
def __init__(self):
self._tag_dict = collections.defaultdict(list)
def to_json(self, stream):
json.dump({'tags': self._tag_dict}, stream)
@staticmethod
def from_json(stream):
d = json.load(stream)
r = TagIndex()
for tag, list in d['tags'].items():
r[tag].extend(list)
return r
def __getitem__(self, item):
return self._tag_dict[item]
def __iter__(self):
return iter(self._tag_dict)
def __len__(self):
return len(self._tag_dict)
def update_package(self, pkg_name):
"""Updates a package in the tag index.
Args:
pkg_name (str): name of the package to be removed from the index
"""
package = path.get(pkg_name)
# Remove the package from the list of packages, if present
for pkg_list in self._tag_dict.values():
if pkg_name in pkg_list:
pkg_list.remove(pkg_name)
# Add it again under the appropriate tags
for tag in getattr(package, 'tags', []):
self._tag_dict[tag].append(package.name)
@llnl.util.lang.memoized
def make_provider_index_cache(packages_path, namespace):
"""Lazily updates the provider index cache associated with a repository,
if need be, then returns it. Caches results for later look-ups.
Args:
packages_path: path of the repository
namespace: namespace of the repository
Returns:
instance of ProviderIndex
"""
# Map that goes from package names to stat info
fast_package_checker = FastPackageChecker(packages_path)
# Filename of the provider index cache
cache_filename = 'providers/{0}-index.yaml'.format(namespace)
# Compute which packages needs to be updated in the cache
misc_cache = spack.caches.misc_cache
index_mtime = misc_cache.mtime(cache_filename)
needs_update = [
x for x, sinfo in fast_package_checker.items()
if sinfo.st_mtime > index_mtime
]
# Read the old ProviderIndex, or make a new one.
index_existed = misc_cache.init_entry(cache_filename)
if index_existed and not needs_update:
# If the provider index exists and doesn't need an update
# just read from it
with misc_cache.read_transaction(cache_filename) as f:
index = ProviderIndex.from_yaml(f)
else:
# Otherwise we need a write transaction to update it
with misc_cache.write_transaction(cache_filename) as (old, new):
index = ProviderIndex.from_yaml(old) if old else ProviderIndex()
for pkg_name in needs_update:
namespaced_name = '{0}.{1}'.format(namespace, pkg_name)
index.remove_provider(namespaced_name)
index.update(namespaced_name)
index.to_yaml(new)
return index
@llnl.util.lang.memoized
def make_tag_index_cache(packages_path, namespace):
"""Lazily updates the tag index cache associated with a repository,
if need be, then returns it. Caches results for later look-ups.
Args:
packages_path: path of the repository
namespace: namespace of the repository
Returns:
instance of TagIndex
"""
# Map that goes from package names to stat info
fast_package_checker = FastPackageChecker(packages_path)
# Filename of the provider index cache
cache_filename = 'tags/{0}-index.json'.format(namespace)
# Compute which packages needs to be updated in the cache
misc_cache = spack.caches.misc_cache
index_mtime = misc_cache.mtime(cache_filename)
needs_update = [
x for x, sinfo in fast_package_checker.items()
if sinfo.st_mtime > index_mtime
]
# Read the old ProviderIndex, or make a new one.
index_existed = misc_cache.init_entry(cache_filename)
if index_existed and not needs_update:
# If the provider index exists and doesn't need an update
# just read from it
with misc_cache.read_transaction(cache_filename) as f:
index = TagIndex.from_json(f)
else:
# Otherwise we need a write transaction to update it
with misc_cache.write_transaction(cache_filename) as (old, new):
index = TagIndex.from_json(old) if old else TagIndex()
for pkg_name in needs_update:
namespaced_name = '{0}.{1}'.format(namespace, pkg_name)
index.update_package(namespaced_name)
index.to_json(new)
return index
class RepoPath(object):
"""A RepoPath is a list of repos that function as one.
It functions exactly like a Repo, but it operates on the combined
results of the Repos in its list instead of on a single package
repository.
Args:
repos (list): list Repo objects or paths to put in this RepoPath
Optional Args:
repo_namespace (str): super-namespace for all packages in this
RepoPath (used when importing repos as modules)
"""
def __init__(self, *repos, **kwargs):
self.super_namespace = kwargs.get('namespace', repo_namespace)
self.repos = []
self.by_namespace = NamespaceTrie()
self.by_path = {}
self._all_package_names = None
self._provider_index = None
# Add each repo to this path.
for repo in repos:
try:
if isinstance(repo, string_types):
repo = Repo(repo, self.super_namespace)
self.put_last(repo)
except RepoError as e:
tty.warn("Failed to initialize repository: '%s'." % repo,
e.message,
"To remove the bad repository, run this command:",
" spack repo rm %s" % repo)
def _add(self, repo):
"""Add a repository to the namespace and path indexes.
Checks for duplicates -- two repos can't have the same root
directory, and they provide have the same namespace.
"""
if repo.root in self.by_path:
raise DuplicateRepoError("Duplicate repository: '%s'" % repo.root)
if repo.namespace in self.by_namespace:
raise DuplicateRepoError(
"Package repos '%s' and '%s' both provide namespace %s"
% (repo.root, self.by_namespace[repo.namespace].root,
repo.namespace))
# Add repo to the pkg indexes
self.by_namespace[repo.full_namespace] = repo
self.by_path[repo.root] = repo
def put_first(self, repo):
"""Add repo first in the search path."""
self._add(repo)
self.repos.insert(0, repo)
def put_last(self, repo):
"""Add repo last in the search path."""
self._add(repo)
self.repos.append(repo)
def remove(self, repo):
"""Remove a repo from the search path."""
if repo in self.repos:
self.repos.remove(repo)
def get_repo(self, namespace, default=NOT_PROVIDED):
"""Get a repository by namespace.
Arguments:
namespace:
Look up this namespace in the RepoPath, and return it if found.
Optional Arguments:
default:
If default is provided, return it when the namespace
isn't found. If not, raise an UnknownNamespaceError.
"""
fullspace = '%s.%s' % (self.super_namespace, namespace)
if fullspace not in self.by_namespace:
if default == NOT_PROVIDED:
raise UnknownNamespaceError(namespace)
return default
return self.by_namespace[fullspace]
def first_repo(self):
"""Get the first repo in precedence order."""
return self.repos[0] if self.repos else None
def all_package_names(self):
"""Return all unique package names in all repositories."""
if self._all_package_names is None:
all_pkgs = set()
for repo in self.repos:
for name in repo.all_package_names():
all_pkgs.add(name)
self._all_package_names = sorted(all_pkgs, key=lambda n: n.lower())
return self._all_package_names
def packages_with_tags(self, *tags):
r = set()
for repo in self.repos:
r |= set(repo.packages_with_tags(*tags))
return sorted(r)
def all_packages(self):
for name in self.all_package_names():
yield self.get(name)
@property
def provider_index(self):
"""Merged ProviderIndex from all Repos in the RepoPath."""
if self._provider_index is None:
self._provider_index = ProviderIndex()
for repo in reversed(self.repos):
self._provider_index.merge(repo.provider_index)
return self._provider_index
@_autospec
def providers_for(self, vpkg_spec):
providers = self.provider_index.providers_for(vpkg_spec)
if not providers:
raise UnknownPackageError(vpkg_spec.name)
return providers
@_autospec
def extensions_for(self, extendee_spec):
return [p for p in self.all_packages() if p.extends(extendee_spec)]
def find_module(self, fullname, path=None):
"""Implements precedence for overlaid namespaces.
Loop checks each namespace in self.repos for packages, and
also handles loading empty containing namespaces.
"""
# namespaces are added to repo, and package modules are leaves.
namespace, dot, module_name = fullname.rpartition('.')
# If it's a module in some repo, or if it is the repo's
# namespace, let the repo handle it.
for repo in self.repos:
if namespace == repo.full_namespace:
if repo.real_name(module_name):
return repo
elif fullname == repo.full_namespace:
return repo
# No repo provides the namespace, but it is a valid prefix of
# something in the RepoPath.
if self.by_namespace.is_prefix(fullname):
return self
return None
def load_module(self, fullname):
"""Handles loading container namespaces when necessary.
See ``Repo`` for how actual package modules are loaded.
"""
if fullname in sys.modules:
return sys.modules[fullname]
if not self.by_namespace.is_prefix(fullname):
raise ImportError("No such Spack repo: %s" % fullname)
module = SpackNamespace(fullname)
module.__loader__ = self
sys.modules[fullname] = module
return module
def repo_for_pkg(self, spec):
"""Given a spec, get the repository for its package."""
# We don't @_autospec this function b/c it's called very frequently
# and we want to avoid parsing str's into Specs unnecessarily.
namespace = None
if isinstance(spec, spack.spec.Spec):
namespace = spec.namespace
name = spec.name
else:
# handle strings directly for speed instead of @_autospec'ing
namespace, _, name = spec.rpartition('.')
# If the spec already has a namespace, then return the
# corresponding repo if we know about it.
if namespace:
fullspace = '%s.%s' % (self.super_namespace, namespace)
if fullspace not in self.by_namespace:
raise UnknownNamespaceError(spec.namespace)
return self.by_namespace[fullspace]
# If there's no namespace, search in the RepoPath.
for repo in self.repos:
if name in repo:
return repo
# If the package isn't in any repo, return the one with
# highest precedence. This is for commands like `spack edit`
# that can operate on packages that don't exist yet.
return self.first_repo()
@_autospec
def get(self, spec, new=False):
"""Find a repo that contains the supplied spec's package.
Raises UnknownPackageError if not found.
"""
return self.repo_for_pkg(spec).get(spec)
def get_pkg_class(self, pkg_name):
"""Find a class for the spec's package and return the class object."""
return self.repo_for_pkg(pkg_name).get_pkg_class(pkg_name)
@_autospec
def dump_provenance(self, spec, path):
"""Dump provenance information for a spec to a particular path.
This dumps the package file and any associated patch files.
Raises UnknownPackageError if not found.
"""
return self.repo_for_pkg(spec).dump_provenance(spec, path)
def dirname_for_package_name(self, pkg_name):
return self.repo_for_pkg(pkg_name).dirname_for_package_name(pkg_name)
def filename_for_package_name(self, pkg_name):
return self.repo_for_pkg(pkg_name).filename_for_package_name(pkg_name)
def exists(self, pkg_name):
"""Whether package with the give name exists in the path's repos.
Note that virtual packages do not "exist".
"""
return any(repo.exists(pkg_name) for repo in self.repos)
def is_virtual(self, pkg_name):
"""True if the package with this name is virtual, False otherwise."""
return pkg_name in self.provider_index
def __contains__(self, pkg_name):
return self.exists(pkg_name)
class Repo(object):
"""Class representing a package repository in the filesystem.
Each package repository must have a top-level configuration file
called `repo.yaml`.
Currently, `repo.yaml` this must define:
`namespace`:
A Python namespace where the repository's packages should live.
"""
def __init__(self, root, namespace=repo_namespace):
"""Instantiate a package repository from a filesystem path.
Arguments:
root The root directory of the repository.
namespace A super-namespace that will contain the repo-defined
namespace (this is generally jsut `spack.pkg`). The
super-namespace is Spack's way of separating repositories
from other python namespaces.
"""
# Root directory, containing _repo.yaml and package dirs
# Allow roots to by spack-relative by starting with '$spack'
self.root = canonicalize_path(root)
# super-namespace for all packages in the Repo
self.super_namespace = namespace
# check and raise BadRepoError on fail.
def check(condition, msg):
if not condition:
raise BadRepoError(msg)
# Validate repository layout.
self.config_file = os.path.join(self.root, repo_config_name)
check(os.path.isfile(self.config_file),
"No %s found in '%s'" % (repo_config_name, root))
self.packages_path = os.path.join(self.root, packages_dir_name)
check(os.path.isdir(self.packages_path),
"No directory '%s' found in '%s'" % (repo_config_name, root))
# Read configuration and validate namespace
config = self._read_config()
check('namespace' in config, '%s must define a namespace.'
% os.path.join(root, repo_config_name))
self.namespace = config['namespace']
check(re.match(r'[a-zA-Z][a-zA-Z0-9_.]+', self.namespace),
("Invalid namespace '%s' in repo '%s'. "
% (self.namespace, self.root)) +
"Namespaces must be valid python identifiers separated by '.'")
# Set up 'full_namespace' to include the super-namespace
if self.super_namespace:
self.full_namespace = "%s.%s" % (
self.super_namespace, self.namespace)
else:
self.full_namespace = self.namespace
# Keep name components around for checking prefixes.
self._names = self.full_namespace.split('.')
# These are internal cache variables.
self._modules = {}
self._classes = {}
self._instances = {}
# Maps that goes from package name to corresponding file stat
self._fast_package_checker = None
# Index of virtual dependencies, computed lazily
self._provider_index = None
# Index of tags, computed lazily
self._tag_index = None
# make sure the namespace for packages in this repo exists.
self._create_namespace()
def _create_namespace(self):
"""Create this repo's namespace module and insert it into sys.modules.
Ensures that modules loaded via the repo have a home, and that
we don't get runtime warnings from Python's module system.
"""
parent = None
for l in range(1, len(self._names) + 1):
ns = '.'.join(self._names[:l])
if ns not in sys.modules:
module = SpackNamespace(ns)
module.__loader__ = self
sys.modules[ns] = module
# Ensure the namespace is an atrribute of its parent,
# if it has not been set by something else already.
#
# This ensures that we can do things like:
# import spack.pkg.builtin.mpich as mpich
if parent:
modname = self._names[l - 1]
setattr(parent, modname, module)
else:
# no need to set up a module
module = sys.modules[ns]
# but keep track of the parent in this loop
parent = module
def real_name(self, import_name):
"""Allow users to import Spack packages using Python identifiers.
A python identifier might map to many different Spack package
names due to hyphen/underscore ambiguity.
Easy example:
num3proxy -> 3proxy
Ambiguous:
foo_bar -> foo_bar, foo-bar
More ambiguous:
foo_bar_baz -> foo_bar_baz, foo-bar-baz, foo_bar-baz, foo-bar_baz
"""
if import_name in self:
return import_name
options = possible_spack_module_names(import_name)
options.remove(import_name)
for name in options:
if name in self:
return name
return None
def is_prefix(self, fullname):
"""True if fullname is a prefix of this Repo's namespace."""
parts = fullname.split('.')
return self._names[:len(parts)] == parts
def find_module(self, fullname, path=None):
"""Python find_module import hook.
Returns this Repo if it can load the module; None if not.
"""
if self.is_prefix(fullname):
return self
namespace, dot, module_name = fullname.rpartition('.')
if namespace == self.full_namespace:
if self.real_name(module_name):
return self
return None
def load_module(self, fullname):
"""Python importer load hook.
Tries to load the module; raises an ImportError if it can't.
"""
if fullname in sys.modules:
return sys.modules[fullname]
namespace, dot, module_name = fullname.rpartition('.')
if self.is_prefix(fullname):
module = SpackNamespace(fullname)
elif namespace == self.full_namespace:
real_name = self.real_name(module_name)
if not real_name:
raise ImportError("No module %s in %s" % (module_name, self))
module = self._get_pkg_module(real_name)
else:
raise ImportError("No module %s in %s" % (fullname, self))
module.__loader__ = self
sys.modules[fullname] = module
if namespace != fullname:
parent = sys.modules[namespace]
if not hasattr(parent, module_name):
setattr(parent, module_name, module)
return module
def _read_config(self):
"""Check for a YAML config file in this db's root directory."""
try:
with open(self.config_file) as reponame_file:
yaml_data = yaml.load(reponame_file)
if (not yaml_data or 'repo' not in yaml_data or
not isinstance(yaml_data['repo'], dict)):
tty.die("Invalid %s in repository %s" % (
repo_config_name, self.root))
return yaml_data['repo']
except IOError:
tty.die("Error reading %s when opening %s"
% (self.config_file, self.root))
@_autospec
def get(self, spec):
if not self.exists(spec.name):
raise UnknownPackageError(spec.name)
if spec.namespace and spec.namespace != self.namespace:
raise UnknownPackageError(
"Repository %s does not contain package %s"
% (self.namespace, spec.fullname))
package_class = self.get_pkg_class(spec.name)
try:
return package_class(spec)
except spack.error.SpackError:
# pass these through as their error messages will be fine.
raise
except Exception:
# make sure other errors in constructors hit the error
# handler by wrapping them
if spack.config.get('config:debug'):
sys.excepthook(*sys.exc_info())
raise FailedConstructorError(spec.fullname, *sys.exc_info())
@_autospec
def dump_provenance(self, spec, path):
"""Dump provenance information for a spec to a particular path.
This dumps the package file and any associated patch files.
Raises UnknownPackageError if not found.
"""
# Some preliminary checks.
if spec.virtual:
raise UnknownPackageError(spec.name)
if spec.namespace and spec.namespace != self.namespace:
raise UnknownPackageError(
"Repository %s does not contain package %s."
% (self.namespace, spec.fullname))
# Install any patch files needed by packages.
mkdirp(path)
for spec, patches in spec.package.patches.items():
for patch in patches:
if patch.path:
if os.path.exists(patch.path):
install(patch.path, path)
else:
tty.warn("Patch file did not exist: %s" % patch.path)
# Install the package.py file itself.
install(self.filename_for_package_name(spec), path)
def purge(self):
"""Clear entire package instance cache."""
self._instances.clear()
@property
def provider_index(self):
"""A provider index with names *specific* to this repo."""
if self._provider_index is None:
self._provider_index = make_provider_index_cache(
self.packages_path, self.namespace
)
return self._provider_index
@property
def tag_index(self):
"""A provider index with names *specific* to this repo."""
if self._tag_index is None:
self._tag_index = make_tag_index_cache(
self.packages_path, self.namespace
)
return self._tag_index
@_autospec
def providers_for(self, vpkg_spec):
providers = self.provider_index.providers_for(vpkg_spec)
if not providers:
raise UnknownPackageError(vpkg_spec.name)
return providers
@_autospec
def extensions_for(self, extendee_spec):
return [p for p in self.all_packages() if p.extends(extendee_spec)]
def _check_namespace(self, spec):
"""Check that the spec's namespace is the same as this repository's."""
if spec.namespace and spec.namespace != self.namespace:
raise UnknownNamespaceError(spec.namespace)
@_autospec
def dirname_for_package_name(self, spec):
"""Get the directory name for a particular package. This is the
directory that contains its package.py file."""
self._check_namespace(spec)
return os.path.join(self.packages_path, spec.name)
@_autospec
def filename_for_package_name(self, spec):
"""Get the filename for the module we should load for a particular
package. Packages for a Repo live in
``$root/<package_name>/package.py``
This will return a proper package.py path even if the
package doesn't exist yet, so callers will need to ensure
the package exists before importing.
"""
self._check_namespace(spec)
pkg_dir = self.dirname_for_package_name(spec.name)
return os.path.join(pkg_dir, package_file_name)
@property
def _pkg_checker(self):
if self._fast_package_checker is None:
self._fast_package_checker = FastPackageChecker(self.packages_path)
return self._fast_package_checker
def all_package_names(self):
"""Returns a sorted list of all package names in the Repo."""
return sorted(self._pkg_checker.keys())
def packages_with_tags(self, *tags):
v = set(self.all_package_names())
index = self.tag_index
for t in tags:
v &= set(index[t])
return sorted(v)
def all_packages(self):
"""Iterator over all packages in the repository.
Use this with care, because loading packages is slow.
"""
for name in self.all_package_names():
yield self.get(name)
def exists(self, pkg_name):
"""Whether a package with the supplied name exists."""
return pkg_name in self._pkg_checker
def is_virtual(self, pkg_name):
"""True if the package with this name is virtual, False otherwise."""
return self.provider_index.contains(pkg_name)
def _get_pkg_module(self, pkg_name):
"""Create a module for a particular package.
This caches the module within this Repo *instance*. It does
*not* add it to ``sys.modules``. So, you can construct
multiple Repos for testing and ensure that the module will be
loaded once per repo.
"""
if pkg_name not in self._modules:
file_path = self.filename_for_package_name(pkg_name)
if not os.path.exists(file_path):
raise UnknownPackageError(pkg_name, self)
if not os.path.isfile(file_path):
tty.die("Something's wrong. '%s' is not a file!" % file_path)
if not os.access(file_path, os.R_OK):
tty.die("Cannot read '%s'!" % file_path)
# e.g., spack.pkg.builtin.mpich
fullname = "%s.%s" % (self.full_namespace, pkg_name)
try:
with import_lock():
with prepend_open(file_path, text=_package_prepend) as f:
module = imp.load_source(fullname, file_path, f)
except SyntaxError as e:
# SyntaxError strips the path from the filename so we need to
# manually construct the error message in order to give the
# user the correct package.py where the syntax error is located
raise SyntaxError('invalid syntax in {0:}, line {1:}'
''.format(file_path, e.lineno))
module.__package__ = self.full_namespace
module.__loader__ = self
self._modules[pkg_name] = module
return self._modules[pkg_name]
def get_pkg_class(self, pkg_name):
"""Get the class for the package out of its module.
First loads (or fetches from cache) a module for the
package. Then extracts the package class from the module
according to Spack's naming convention.
"""
namespace, _, pkg_name = pkg_name.rpartition('.')
if namespace and (namespace != self.namespace):
raise InvalidNamespaceError('Invalid namespace for %s repo: %s'
% (self.namespace, namespace))
class_name = mod_to_class(pkg_name)
module = self._get_pkg_module(pkg_name)
cls = getattr(module, class_name)
if not inspect.isclass(cls):
tty.die("%s.%s is not a class" % (pkg_name, class_name))
return cls
def __str__(self):
return "[Repo '%s' at '%s']" % (self.namespace, self.root)
def __repr__(self):
return self.__str__()
def __contains__(self, pkg_name):
return self.exists(pkg_name)
def create_repo(root, namespace=None):
"""Create a new repository in root with the specified namespace.
If the namespace is not provided, use basename of root.
Return the canonicalized path and namespace of the created repository.
"""
root = canonicalize_path(root)
if not namespace:
namespace = os.path.basename(root)
if not re.match(r'\w[\.\w-]*', namespace):
raise InvalidNamespaceError(
"'%s' is not a valid namespace." % namespace)
existed = False
if os.path.exists(root):
if os.path.isfile(root):
raise BadRepoError('File %s already exists and is not a directory'
% root)
elif os.path.isdir(root):
if not os.access(root, os.R_OK | os.W_OK):
raise BadRepoError(
'Cannot create new repo in %s: cannot access directory.'
% root)
if os.listdir(root):
raise BadRepoError(
'Cannot create new repo in %s: directory is not empty.'
% root)
existed = True
full_path = os.path.realpath(root)
parent = os.path.dirname(full_path)
if not os.access(parent, os.R_OK | os.W_OK):
raise BadRepoError(
"Cannot create repository in %s: can't access parent!" % root)
try:
config_path = os.path.join(root, repo_config_name)
packages_path = os.path.join(root, packages_dir_name)
mkdirp(packages_path)
with open(config_path, 'w') as config:
config.write("repo:\n")
config.write(" namespace: '%s'\n" % namespace)
except (IOError, OSError) as e:
raise BadRepoError('Failed to create new repository in %s.' % root,
"Caused by %s: %s" % (type(e), e))
# try to clean up.
if existed:
shutil.rmtree(config_path, ignore_errors=True)
shutil.rmtree(packages_path, ignore_errors=True)
else:
shutil.rmtree(root, ignore_errors=True)
return full_path, namespace
def _path():
"""Get the singleton RepoPath instance for Spack.
Create a RepoPath, add it to sys.meta_path, and return it.
TODO: consider not making this a singleton.
"""
repo_dirs = spack.config.get('repos')
if not repo_dirs:
raise NoRepoConfiguredError(
"Spack configuration contains no package repositories.")
path = RepoPath(*repo_dirs)
sys.meta_path.append(path)
return path
#: Singleton repo path instance
path = llnl.util.lang.Singleton(_path)
def get(spec):
"""Convenience wrapper around ``spack.repo.get()``."""
return path.get(spec)
def all_package_names():
"""Convenience wrapper around ``spack.repo.all_package_names()``."""
return path.all_package_names()
def set_path(repo):
"""Set the path singleton to a specific value.
Overwrite ``path`` and register it as an importer in
``sys.meta_path`` if it is a ``Repo`` or ``RepoPath``.
"""
global path
path = repo
# make the new repo_path an importer if needed
append = isinstance(repo, (Repo, RepoPath))
if append:
sys.meta_path.append(repo)
return append
@contextmanager
def import_lock():
imp.acquire_lock()
yield
imp.release_lock()
@contextmanager
def prepend_open(f, *args, **kwargs):
"""Open a file for reading, but prepend with some text prepended
Arguments are same as for ``open()``, with one keyword argument,
``text``, specifying the text to prepend.
"""
text = kwargs.get('text', None)
with open(f, *args) as f:
with tempfile.NamedTemporaryFile(mode='w+') as tf:
if text:
tf.write(text + '\n')
tf.write(f.read())
tf.seek(0)
yield tf.file
@contextmanager
def swap(repo_path):
"""Temporarily use another RepoPath."""
global path
# swap out _path for repo_path
saved = path
remove_from_meta = set_path(repo_path)
yield
# restore _path and sys.meta_path
if remove_from_meta:
sys.meta_path.remove(repo_path)
path = saved
class RepoError(spack.error.SpackError):
"""Superclass for repository-related errors."""
class NoRepoConfiguredError(RepoError):
"""Raised when there are no repositories configured."""
class InvalidNamespaceError(RepoError):
"""Raised when an invalid namespace is encountered."""
class BadRepoError(RepoError):
"""Raised when repo layout is invalid."""
class DuplicateRepoError(RepoError):
"""Raised when duplicate repos are added to a RepoPath."""
class UnknownEntityError(RepoError):
"""Raised when we encounter a package spack doesn't have."""
class UnknownPackageError(UnknownEntityError):
"""Raised when we encounter a package spack doesn't have."""
def __init__(self, name, repo=None):
msg = None
if repo:
msg = "Package %s not found in repository %s" % (name, repo)
else:
msg = "Package %s not found." % name
super(UnknownPackageError, self).__init__(msg)
self.name = name
class UnknownNamespaceError(UnknownEntityError):
"""Raised when we encounter an unknown namespace"""
def __init__(self, namespace):
super(UnknownNamespaceError, self).__init__(
"Unknown namespace: %s" % namespace)
class FailedConstructorError(RepoError):
"""Raised when a package's class constructor fails."""
def __init__(self, name, exc_type, exc_obj, exc_tb):
super(FailedConstructorError, self).__init__(
"Class constructor failed for package '%s'." % name,
'\nCaused by:\n' +
('%s: %s\n' % (exc_type.__name__, exc_obj)) +
''.join(traceback.format_tb(exc_tb)))
self.name = name
|
mfherbst/spack
|
lib/spack/spack/repo.py
|
Python
|
lgpl-2.1
| 42,050 | 0.000143 |
import glob
import operational_instruments
from astropy.io import fits
from numpy.fft import fft2, ifft2
import sewpy
from astropy import wcs
from astropy.table import Table
from astropy.io import ascii
from astropy.time import Time
import pytz
import numpy as np
import os
import time
import log_utilities
import datetime
import rome_telescopes_dict
import rome_filters_dict
import shutil
import api_tools
import socket
import config_parser
import pwd
# Django modules had to be removed to make API compatible and run outside
# the docker container. Timezone applied at the API endpoint.
#from django.utils import timezone
class QuantityLimits(object):
def __init__(self):
self.sky_background_median_limit = 10000.0
self.sky_background_std_limit = 200
self.sky_background_minimum = 100
self.sky_background_maximum = 5000
self.minimum_moon_sep = 10
self.minimum_number_of_stars = {'gp': 1000, 'rp': 2000, 'ip' : 4000}
self.maximum_ellipticity = 0.4
self.maximum_seeing = 2.0
class Image(object):
def __init__(self, image_directory, image_output_origin_directory,
image_name, logger ):
self.image_directory = image_directory
self.image_name = image_name
self.origin_directory = image_output_origin_directory
self.logger = logger
self.banzai_bpm = None
self.banzai_catalog = None
try:
images = fits.open(os.path.join(self.image_directory,self.image_name))
for image in images:
try :
if image.header['EXTNAME'] == 'BPM':
self.banzai_bpm = image
logger.info('Loaded the bad pixel mask')
if image.header['EXTNAME'] == 'SCI':
science_image = image
self.data = science_image.data
self.header = science_image.header
self.oldheader = science_image.header.copy()
logger.info('Loaded the science data')
if image.header['EXTNAME'] == 'CAT':
self.banzai_catalog = image
logger.info('Loaded the BANZAI catalogue')
except :
pass
except:
logger.error('I cannot load the image!')
# self.data = science_image.data
# self.header = science_image.header
# self.oldheader = science_image.header.copy()
self.camera = None
self.sky_level = None
self.sky_level_std = None
self.sky_minimum_level = None
self.sky_maximum_level = None
self.number_of_stars = None
self.ellipticity = None
self.seeing = None
self.quality_flags = []
self.thumbnail_box_size = 60
self.field_name = None
self.x_shift = 0
self.y_shift = 0
self.header_date_obs = '1986-04-04T00:00:00.00' #dummy value
self.header_telescope_site = None
self.header_dome_id = None
self.header_group_id = ''
self.header_track_id = ''
self.header_request_id = ''
self.header_object_name = None
self.header_moon_distance = None
self.header_moon_status = False
self.header_moon_fraction = None
self.header_airmass = None
self.header_seeing = None
self.header_ccd_temp = None
self.header_ellipticity = None
self.header_sky_level = None
self.header_sky_temperature = None
self.header_sky_measured_mag = None
self.find_camera()
self.find_object_and_field_name()
self.quantity_limits = QuantityLimits()
def process_the_image(self):
#self.extract_header_statistics()
#self.find_wcs_template()
#self.generate_sextractor_catalog()
#self.
#self.update_image_wcs()
#self.move_frame()
pass
def update_image_wcs(self):
try:
hdutemplate = fits.open(os.path.join(self.template_directory,self.template_name))
templateheader=hdutemplate[0].header
hdutemplate.close()
imageheader=self.header
#STORE OLD FITSHEADER AND ADJUST BASED ON TEMPLATE
imageheader['DPXCORR'] = self.x_shift
imageheader['DPYCORR'] = self.y_shift
imageheader['WCSRFCAT'] = templateheader['WCSRFCAT']
imageheader['RA'] = templateheader['RA']
imageheader['DEC'] = templateheader['DEC']
imageheader['CRPIX1'] = templateheader['CRPIX1']
imageheader['CRPIX2'] = templateheader['CRPIX2']
imageheader['CRVAL1'] = templateheader['CRVAL1']
imageheader['CRVAL2'] = templateheader['CRVAL2']
imageheader['CD1_1'] = templateheader['CD1_1']
imageheader['CD1_2'] = templateheader['CD1_2']
imageheader['CD2_1'] = templateheader['CD2_1']
imageheader['CD2_2'] = templateheader['CD2_2']
imageheader['CRPIX1'] = self.x_new_center
imageheader['CRPIX2'] = self.y_new_center
imageheader['CDELT1'] = templateheader['CDELT1']
imageheader['CDELT2'] = templateheader['CDELT2']
imageheader['CROTA1'] = templateheader['CROTA1']
imageheader['CROTA2'] = templateheader['CROTA2']
imageheader['SECPIX1'] = templateheader['SECPIX1']
imageheader['SECPIX2'] = templateheader['SECPIX2']
imageheader['WCSSEP'] = templateheader['WCSSEP']
self.logger.info('WCS header successfully updated')
except:
self.logger.error('WCS header successfully updated')
def find_wcs_template(self):
field_name = self.field_name.replace('ROME-','')
template_name = 'WCS_template_' + field_name + '.fits'
thumbnail_name = 'WCS_template_' + field_name + '.thumbnail'
origin_directory = self.origin_directory
template_directory = origin_directory + 'wcs_templates/'
self.template_name = template_name
self.template_directory = template_directory
try:
coord=np.loadtxt(os.path.join(self.template_directory,thumnail_name))
self.x_center_thumbnail_world=coord[0]
self.y_center_thumbnail_world=coord[1]
except:
self.x_center_thumbnail_world=self.header['CRVAL1']
self.y_center_thumbnail_world=self.header['CRVAL2']
self.logger.info('Extracted WCS information')
def find_camera(self):
try:
self.camera_name = self.image_name[9:13]
self.camera = operational_instruments.define_instrument(self.camera_name)
self.filter = self.header[self.camera.header_dictionnary['filter']]
self.logger.info('Successfully identified the associated camera, '+str(self.camera_name))
except:
self.logger.error('I do not recognise camera '+str(self.camera_name))
def find_object_and_field_name(self):
try:
self.object_name = self.header[self.camera.header_dictionnary['object']]
self.field_name = self.object_name
self.logger.info('Object name is : '+self.object_name)
self.logger.info('And so the assiocated field : '+self.field_name)
except:
self.logger.error('I cannot recognize the object name or/and field name!')
def determine_the_output_directory(self):
try:
origin_directory = self.origin_directory
if len(self.quality_flags) == 0:
quality_directory = 'good/'
else:
quality_directory = 'bad/'
if 'ROME' in self.header_group_id:
mode_directory = 'rome/'
else:
mode_directory = 'rea/'
site_directory = self.header_telescope_site +'/'
the_filter = self.camera.filter_convention[self.filter]
filter_directory = the_filter +'/'
camera_directory = self.camera.name +'/'
field_directory = self.field_name +'/'
output_directory = os.path.join(origin_directory,quality_directory,
mode_directory,site_directory,
camera_directory, filter_directory,
field_directory)
self.output_directory = output_directory
self.catalog_directory = origin_directory.replace('images','catalog0')
if os.path.isdir(self.output_directory) == False:
os.makedirs(self.output_directory)
if os.path.isdir(self.catalog_directory) == False:
os.makedirs(self.catalog_directory)
self.logger.info('Successfully built the output directory : '+self.output_directory)
self.logger.info('Successfully built the catalog directory : '+self.catalog_directory)
except:
self.logger.error('I can not construct the output directory!')
def find_or_construct_the_output_directory(self):
try :
flag = os.path.isdir(self.output_directory)
if flag == True:
self.logger.info('Successfully found the output directory : '+self.output_directory)
else :
os.makedirs(self.output_directory)
self.logger.info('Successfully mkdir the output directory : '+self.output_directory)
except:
self.logger.error('I cannot find or mkdir the output directory!')
def find_WCS_offset(self):
try:
self.x_new_center,self.y_new_center,self.x_shift,self.y_shift = xycorr(os.path.join(self.template_directory,self.template_name), self.data, 0.4)
self.update_image_wcs()
self.x_shift = int(self.x_shift)
self.y_shift = int(self.y_shift)
self.logger.info('Successfully found the WCS correction')
except:
self.x_shift = 0
self.y_shift = 0
self.logger.error('I failed to find a WCS correction')
def generate_sextractor_catalog(self, config):
"""
extracting a catalog from a WCS-recalibrated (!) image
calling it through logging to obtain an astropy
compliant output with logging...
"""
try:
extractor_parameters=['X_IMAGE','Y_IMAGE','BACKGROUND',
'ELLIPTICITY','FWHM_WORLD','X_WORLD',
'Y_WORLD','MAG_APER','MAGERR_APER']
extractor_config={'DETECT_THRESH':2.5,
'ANALYSIS_THRESH':2.5,
'FILTER':'Y',
'DEBLEND_NTHRESH':32,
'DEBLEND_MINCOUNT':0.005,
'CLEAN':'Y',
'CLEAN_PARAM':1.0,
'PIXEL_SCALE':self.camera.pix_scale,
'SATUR_LEVEL':self.camera.ADU_high,
'PHOT_APERTURES':10,
'DETECT_MINAREA':7,
'GAIN':self.camera.gain,
'SEEING_FWHM':self.header_seeing,
'BACK_FILTERSIZE':3}
sew = sewpy.SEW(workdir=os.path.join(self.image_directory,'sewpy'),
sexpath=config['sextractor_path'],
params=extractor_parameters,
config=extractor_config)
sewoutput = sew(os.path.join(self.image_directory,self.image_name))
#APPEND JD, ATTEMPTING TO CALIBRATE MAGNITUDES..
catalog=sewoutput['table']
tobs=Time([self.header['DATE-OBS']],format='isot',scale='utc')
calibration_pars={'gp':[1.0281267,29.315002],'ip':[1.0198562,28.13711],'rp':[1.020762,28.854443]}
if self.filter!=None:
calmag=catalog['MAG_APER']*calibration_pars[self.filter][0]+calibration_pars[self.filter][1]
calmag[np.where(catalog['MAG_APER']==99.)]=99.
catalog['MAG_APER_CAL']=calmag
catalog['FILTER']=[self.filter]*len(calmag)
catalog['JD']=np.ones(len(catalog))*tobs.jd
#APPEND JD AND CALIBRATED MAGNITUDES...
#ROUGH CALIBRATION TO VPHAS+
#gmag=instmag*1.0281267+29.315002
#imag=instmag*1.0198562+28.13711
#rmag=instmag*1.020762+28.854443
self.compute_stats_from_catalog(catalog)
self.catalog = catalog
#ascii.write(catalog,os.path.join('./',catname))
self.logger.info('Sextractor catalog successfully produced')
except:
self.logger.error('I cannot produce the Sextractor catalog!')
def create_image_control_region(self):
w = wcs.WCS(self.header)
py,px = w.wcs_world2pix(self.x_center_thumbnail_world,
self.y_center_thumbnail_world,1)
py = int(py)
px = int(px)
try:
self.thumbnail=self.data[px-self.thumbnail_box_size/2:px+self.thumbnail_box_size/2,py-self.thumbnail_box_size/2:py+self.thumbnail_box_size/2]
self.logger.info('Thumbnail successfully produce around the good position')
except:
self.thumbnail=np.zeros((self.thumbnail_box_size,self.thumbnail_box_size))
self.logger.info('Thumbnail successfully produce around the center of the image')
def compute_stats_from_catalog(self,catalog):
try:
self.sky_level=np.median(catalog['BACKGROUND'])
self.sky_level_std=np.std(catalog['BACKGROUND'])
self.sky_minimum_level=np.percentile(catalog['BACKGROUND'],1)
self.sky_maximum_level=np.max(catalog['BACKGROUND'])
self.number_of_stars=len(catalog)
self.ellipticity=np.median(catalog['ELLIPTICITY'])
self.seeing=np.median(catalog['FWHM_WORLD']*3600)
self.logger.info('Image quality statistics well updated')
except:
self.logger.error('For some reason, I can not update the image quality statistics!')
def extract_header_statistics(self):
desired_quantities = [ key for key,value in self.__dict__.items() if 'header' in key]
for quantity in desired_quantities :
try:
dictionnary_key = quantity.replace('header_','')
setattr(self, quantity, self.header[self.camera.header_dictionnary[dictionnary_key]])
except:
pass
self.logger.info('Successfully obtained image header_quality statistics')
def assess_image_quality(self):
try:
self.check_background()
self.check_Moon()
self.check_Nstars()
self.check_ellipticity()
self.check_seeing()
self.logger.info('Quality flags well produced')
except:
self.logger.error('I can not assess the image quality, no quality flags produced!')
def check_background(self):
if self.sky_level:
if self.sky_level > self.quantity_limits.sky_background_median_limit:
self.quality_flags.append('High sky background')
else:
self.quality_flags.append('No sky level measured!')
if self.sky_level_std :
if self.sky_level_std > self.quantity_limits.sky_background_std_limit:
self.quality_flags.append('High sky background variations')
else:
self.quality_flags.append('No sky level variations measured!')
if self.sky_minimum_level:
if self.sky_minimum_level < self.quantity_limits.sky_background_minimum:
self.quality_flags.append('Low minimum background')
else:
self.quality_flags.append('No minimum sky level measured!')
if self.quality_flags.append('No sky level variations measured!'):
if self.sky_maximum_level > self.quantity_limits.sky_background_maximum:
self.quality_flags.append('High maximum background')
else:
self.quality_flags.append('No maximum sky level measured!')
def check_Moon(self):
if self.header_moon_distance:
if self.header_moon_distance < self.quantity_limits.minimum_moon_sep:
self.quality_flags.append('Moon too close')
else:
self.quality_flags.append('No Moon distance measured!')
def check_Nstars(self):
if self.number_of_stars:
if self.number_of_stars < self.quantity_limits.minimum_number_of_stars[self.filter]:
self.quality_flags.append('Low number of stars')
else:
self.quality_flags.append('No stars measured!')
def check_ellipticity(self):
if self.ellipticity:
if self.ellipticity > self.quantity_limits.maximum_ellipticity:
self.quality_flags.append('High ellipticity')
else:
self.quality_flags.append('No ellipticity measured!')
def check_seeing(self):
if self.seeing:
if self.seeing > self.quantity_limits.maximum_seeing:
self.quality_flags.append('Bad seeing')
else:
self.quality_flags.append('No seeing measured!')
def check_if_image_in_database(self):
return None
def ingest_the_image_in_the_database(self,config):
quality_flag = ' ; '.join(self.quality_flags)
observing_date = datetime.datetime.strptime(self.header_date_obs,'%Y-%m-%dT%H:%M:%S.%f')
observing_date = observing_date.replace(tzinfo=pytz.UTC)
observing_date = observing_date.strftime("%Y-%m-%dT%H:%M:%S")
try:
telescope = self.header_telescope_site + self.header_dome_id
telescope_name = rome_telescopes_dict.telescope_dict[telescope]
except:
telescope_name = ''
try:
camera_filter = rome_filters_dict.filter_dict[self.filter]
except:
camera_filter = ''
try:
moon_status_dictionnary = {'UP':True,'DOWN':False}
moon_status = moon_status_dictionnary[self.header_moon_status]
except:
moon_status = False
params = {'field_name': self.field_name,
'image_name': self.image_name,
'date_obs': observing_date,
'timestamp': datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S"),
'tel': telescope_name,
'inst': self.camera_name,
'filt': camera_filter,
'grp_id': self.header_group_id,
'track_id': self.header_track_id,
'req_id': self.header_request_id,
'airmass': self.header_airmass,
'avg_fwhm': self.seeing,
'avg_sky': self.sky_level,
'avg_sigsky': self.sky_level_std,
'moon_sep': self.header_moon_distance,
'moon_phase': self.header_moon_fraction,
'moon_up': moon_status,
'elongation': self.ellipticity,
'nstars': self.number_of_stars,
'ztemp': self.header_ccd_temp,
'shift_x': int(self.x_shift),
'shift_y': int(self.y_shift),
'quality': quality_flag}
try :
message = api_tools.check_image_in_db(config,params,
testing=config['testing'],
verbose=config['verbose'])
self.logger.info('Image known to the DB? '+str(message))
if 'true' in str(message).lower():
response = api_tools.submit_image_record(config,params,
testing=config['testing'],
verbose=config['verbose'])
if 'success' in str(response).lower():
ingest_success = True
self.logger.info('Image successfully updated in the DB')
else:
ingest_success = False
self.logger.warning('ERROR during update of image data in the DB:')
self.logger.warning(str(response))
else:
response = api_tools.submit_image_record(config,params,
testing=config['testing'],
verbose=config['verbose'])
if 'success' in str(response).lower():
ingest_success = True
self.logger.info('Image successfully ingested into the DB')
else:
ingest_success = False
self.logger.warning('ERROR while ingesting a new image into the DB:')
self.logger.warning(str(response))
except:
ingest_success = False
self.logger.warning('Image NOT ingested or updated in the DB, something went really wrong!')
return ingest_success
def class_the_image_in_the_directory(self):
#import pdb; pdb.set_trace()
try :
new_hdul = fits.HDUList()
calibrated_image = fits.ImageHDU(self.data, header=self.header, name='calibrated')
thumbnail_image = fits.ImageHDU(self.thumbnail, header=self.header, name='thumbnail')
original_header = fits.PrimaryHDU(header=self.oldheader)
new_hdul.append(calibrated_image)
new_hdul.append(thumbnail_image)
new_hdul.append(original_header)
if self.banzai_catalog:
new_hdul.append(self.banzai_catalog)
if self.banzai_bpm:
new_hdul.append(self.banzai_bpm)
new_hdul.writeto(self.output_directory+self.image_name, overwrite=True)
self.logger.info('Image '+self.image_name+' successfully place in the directory '+self.output_directory)
sorting_success = True
except :
self.logger.error('Something goes wrong when move the image to the directory!')
sorting_success = False
return sorting_success
def class_the_catalog_in_the_directory(self):
try:
catname=self.image_name.replace('.fits','.cat')
ascii.write(self.catalog,os.path.join(self.catalog_directory,catname),overwrite=True)
self.logger.info('Catalog successfully moved to the catalog directory')
except:
self.logger.error('The catalog cannot be written to the good directory!')
def find_frames_to_process(new_frames_directory, logger):
IncomingList = [i for i in os.listdir(new_frames_directory) if ('.fits' in i) and ('.fz' not in i)]
if len(IncomingList) == 0 :
return
else :
logger.info('I found '+str(len(IncomingList))+' frames to process')
return IncomingList
def read_config():
"""Function to read the XML configuration file for Obs_Control"""
host_name = socket.gethostname()
userid = pwd.getpwuid( os.getuid() ).pw_name
if 'rachel' in str(host_name).lower():
config_file_path = os.path.join('/Users/rstreet/.robonet_site/reception_config.xml')
elif 'einstein' in str(host_name).lower() and userid == 'robouser':
config_file_path = '/data/romerea/configs/reception_config.xml'
else:
config_file_path = os.path.join('/home/',userid,'.robonet_site','reception_config.xml')
if os.path.isfile(config_file_path) == False:
raise IOError('Cannot find configuration file, looking for:'+config_file_path)
config = config_parser.read_config(config_file_path)
for key, value in config.items():
if str(value).lower() == 'true':
config[key] = True
elif str(value).lower() == 'false':
config[key] = False
return config
def process_new_images():
"""Main driver routine for reception_data
Designed to process all incoming images and extract information from the
image header as well as quick photometry of the image, for later use
in quality assessment.
"""
config = read_config()
logger = log_utilities. start_day_log( config, 'reception', console=False )
logger.info("Testing mode: "+repr(config['testing']))
logger.info("Verbosity mode: "+repr(config['verbose']))
NewFrames = find_frames_to_process(config['new_frames_directory'], logger)
if os.path.isdir(os.path.join(config['new_frames_directory'],'Processed')) == False:
os.makedirs(os.path.join(config['new_frames_directory'],'Processed'))
if NewFrames :
for newframe in NewFrames :
start = time.time()
newframe = newframe.replace(config['new_frames_directory'], '')
logger.info('')
logger.info('Working on frame: '+newframe)
image = Image(config['new_frames_directory'],
config['image_output_origin_directory'],
newframe, logger)
image.extract_header_statistics()
image.find_wcs_template()
image.create_image_control_region()
image.find_WCS_offset()
image.generate_sextractor_catalog(config)
image.assess_image_quality()
image.determine_the_output_directory()
image.find_or_construct_the_output_directory()
success = image.ingest_the_image_in_the_database(config)
if success == True:
image.class_the_catalog_in_the_directory()
sorting_success = image.class_the_image_in_the_directory()
if sorting_success == True:
src = os.path.join(config['new_frames_directory'],newframe)
dest = os.path.join(config['new_frames_directory'],'Processed',newframe)
shutil.move(src,dest)
logger.info('Successfully moved the frame in the Processed directory!')
else:
logger.info('NOT successfully moved the frame in the Processed directory!')
pass
if success == False:
logger.warning('The image cannot be update/ingest in the DB, aborting this frame! ')
log_utilities.end_day_log(logger)
else :
logger.info('')
logger.info('No frames to treat, halting!')
log_utilities.end_day_log(logger)
def convolve(image, psf, ft_psf=None, ft_image=None, no_ft=None, correlate=None, auto_correlation=None):
"""
NAME:
CONVOLVE
PURPOSE:
Convolution of an image with a Point Spread Function (PSF)
EXPLANATION:
The default is to compute the convolution using a product of
Fourier transforms (for speed).
CALLING SEQUENCE:
imconv = convolve( image1, psf, FT_PSF = psf_FT )
or:
correl = convolve( image1, image2, /CORREL )
or:
correl = convolve( image, /AUTO )
INPUTS:
image = 2-D np.array (matrix) to be convolved with psf
psf = the Point Spread Function, (size < or = to size of image).
OPTIONAL INPUT KEYWORDS:
FT_PSF = passes out/in the Fourier transform of the PSF,
(so that it can be re-used the next time function is called).
FT_IMAGE = passes out/in the Fourier transform of image.
/CORRELATE uses the np.conjugate of the Fourier transform of PSF,
to compute the cross-correlation of image and PSF,
(equivalent to IDL function convol() with NO rotation of PSF)
/AUTO_CORR computes the auto-correlation function of image using FFT.
/NO_FT overrides the use of FFT, using IDL function convol() instead.
(then PSF is rotated by 180 degrees to give same result)
METHOD:
When using FFT, PSF is centered & expanded to size of image.
HISTORY:
written, Frank Varosi, NASA/GSFC 1992.
Appropriate precision type for result depending on input image
Markus Hundertmark February 2006
Fix the bug causing the recomputation of FFT(psf) and/or FFT(image)
Sergey Koposov December 2006
"""
n_params = 2
psf_ft = ft_psf
imft = ft_image
noft = no_ft
auto = auto_correlation
sp = np.array(np.shape(psf_ft))
sif = np.array(np.shape(imft))
sim = np.array(np.shape(image))
sc = sim / 2
npix = np.array(image, copy=0).size
if image.ndim!=2 or noft!=None:
if (auto is not None):
message("auto-correlation only for images with FFT", inf=True)
return image
else:
if (correlate is not None):
return convol(image, psf)
else:
return convol(image, rotate(psf, 2))
if imft==None or (imft.ndim!=2) or imft.shape!=im.shape: #add the type check
imft = ifft2(image)
if (auto is not None):
return np.roll(np.roll(npix * np.real(fft2(imft * np.conjugate(imft))), sc[0], 0),sc[1],1)
if (ft_psf==None or ft_psf.ndim!=2 or ft_psf.shape!=image.shape or
ft_psf.dtype!=image.dtype):
sp = np.array(np.shape(psf))
loc = np.maximum((sc - sp / 2), 0) #center PSF in new np.array,
s = np.maximum((sp / 2 - sc), 0) #handle all cases: smaller or bigger
l = np.minimum((s + sim - 1), (sp - 1))
psf_ft = np.conjugate(image) * 0 #initialise with correct size+type according
#to logic of conj and set values to 0 (type of ft_psf is conserved)
psf_ft[loc[1]:loc[1]+l[1]-s[1]+1,loc[0]:loc[0]+l[0]-s[0]+1] = \
psf[s[1]:(l[1])+1,s[0]:(l[0])+1]
psf_ft = ifft2(psf_ft)
if (correlate is not None):
conv = npix * np.real(fft2(imft * np.conjugate(psf_ft)))
else:
conv = npix * np.real(fft2(imft * psf_ft))
sc = sc + (sim % 2) #shift correction for odd size images.
return np.roll(np.roll(conv, sc[0],0), sc[1],1)
def correl_shift(reference, image):
"""This function calculates the revised central pixel coordinate for
imgdata based on a cross correlation with refdata
"""
xcen = np.shape(reference)[0] / 2
ycen = np.shape(reference)[1] / 2
correl = convolve(np.matrix(reference),
np.matrix(image), correlate=1)
xshift, yshift = np.unravel_index(np.argmax(correl), np.shape(correl))
half = np.shape(correl)[0] / 2
return yshift-ycen,xshift-xcen
def xycorr(pathref, image, edgefraction):
"""
For a given reference image path pathref and a given image path pathimg
the central part with an edge length of edgefraction times full edge
length is used to correlate a revised central pixel position
"""
hduref = fits.open(pathref)
template_data = hduref[0].data
noff = np.shape(template_data)
if noff != np.shape(image):
hduref.close()
return 0, 0, 0, 0
xcen = np.shape(template_data)[0] / 2
ycen = np.shape(template_data)[1] / 2
halfx = int(edgefraction * float(noff[0]))/2
halfy = int(edgefraction * float(noff[1]))/2
reduce_template = template_data[
xcen - halfx:xcen + halfx, ycen - halfy:ycen + halfy]
reduce_image = image[
xcen - halfx:xcen + halfx, ycen - halfy:ycen + halfy]
xc, yc = correl_shift(reduce_template, reduce_image)
hduref.close()
return -xc + xcen , -yc + ycen, xc, yc
if __name__ == '__main__':
process_new_images()
|
ytsapras/robonet_site
|
scripts/reception_data.py
|
Python
|
gpl-2.0
| 34,418 | 0.014614 |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# Copyright (C) 2016 Oladimeji Fayomi, University of Waikato.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Name: NetlinkProcessor.py
# Author: Oladimeji Fayomi
# Created: 25 May 2016
# Last Modified: 17 August 2016
# Version: 1.0
# Description: Listens to netlink messages and notifies the RheaFlow
# application of important netlink messages.
import socket
from pyroute2 import IPDB
import eventlet
from datetime import datetime
from log import log
try:
import cPickle as pickle
except:
import pickle
server_addr = ('127.0.0.1', 55651)
class NetlinkClient(object):
def __init__(self):
self.neighbours = []
self.unresolvedneighbours = []
self.ip = IPDB(ignore_rtables=[254])
self.ip_uuid = self.ip.register_callback(self.callback)
self.server = eventlet.listen(('127.0.0.1', 55652))
self.socket = None
self.serve = True
self.pool = eventlet.GreenPool()
self.not_connect = True
def callback(self, ipdb, msg, action):
if action is 'RTM_NEWNEIGH':
self.add_neighbour(msg)
if action is 'RTM_DELNEIGH':
self.remove_neighbour(msg)
if action is 'RTM_NEWLINK':
self.notify(['ifaceTable', self.ifaceTable(ipdb)])
if action is 'RTM_DELLINK':
self.notify(['ifaceTable', self.ifaceTable(ipdb)])
if action is 'RTM_NEWADDR':
log.info("RTM_NEWADDR happened at %s", str(datetime.now()))
self.notify(['ifaceTable', self.ifaceTable(ipdb)])
if action is 'RTM_DELADDR':
log.info("RTM_DELADDR happened at %s", str(datetime.now()))
self.notify(['ifaceTable', self.ifaceTable(ipdb)])
def add_neighbour(self, msg):
attributes = msg['attrs']
ip_addr = attributes[0][1]
if attributes[1][0] is 'NDA_LLADDR':
mac_addr = attributes[1][1]
iface_index = msg['ifindex']
host = {'ipaddr': ip_addr, 'mac_addr': mac_addr,
'ifindex': iface_index}
if host not in self.neighbours:
self.notify(['add_neigh', host])
self.neighbours.append(host)
if ip_addr in self.unresolvedneighbours:
self.unresolvedneighbours = list(filter(lambda x: x !=
ip_addr,
self.unresolvedneighbours)
)
else:
if ip_addr not in self.unresolvedneighbours:
self.unresolvedneighbours.append(ip_addr)
self.notify(['unresolved', self.unresolvedneighbours])
def remove_neighbour(self, msg):
attributes = msg['attrs']
ip_addr = attributes[0][1]
if attributes[1][0] is 'NDA_LLADDR':
mac_addr = attributes[1][1]
iface_index = msg['ifindex']
host = {'ipaddr': ip_addr, 'mac_addr': mac_addr,
'ifindex': iface_index}
self.notify(['remove_neigh', host])
self.neighbours = list(filter(
lambda x: x != host, self.neighbours))
def notify(self, rheamsg):
notification = pickle.dumps(rheamsg)
if self.socket is not None:
self.socket.send(notification)
recv = self.socket.recv(8192)
def ifaceTable(self, ipdb):
ifaces = ipdb.by_name.keys()
table = []
for iface in ifaces:
mac_addr = ipdb.interfaces[iface]['address']
ip_addresses = ipdb.interfaces[iface]['ipaddr']
ifindex = ipdb.interfaces[iface]['index']
state = ipdb.interfaces[iface]['operstate']
table.append({'ifname': iface, 'mac-address': mac_addr,
'IP-Addresses': [x for x in ip_addresses],
'ifindex': ifindex,
'state': state})
return table
def neighbourtable(self):
return self.neighbours
def returnunresolvedhost(self):
return self.unresolvedneighbours
def process_requests(self, ipdb, request):
if request[0] == 'ifaceTable':
res = self.ifaceTable(ipdb)
result = ['ifaceTable', res]
return pickle.dumps(result)
if request[0] == 'neighbourtable':
res = self.neighbourtable()
result = ['neighbourtable', res]
return pickle.dumps(result)
if request[0] == 'get_unresolved':
res = self.returnunresolvedhost()
result = ['unresolved', res]
return pickle.dumps(result)
def handle_request(self, sock):
is_active = True
while is_active:
received = sock.recv(8192)
if len(received) != 0:
request = pickle.loads(received)
response = self.process_requests(self.ip, request)
sock.send(response)
if len(received) == 0:
is_active = False
sock.close()
sock.close()
def try_connect(self):
while self.not_connect:
try:
self.socket = eventlet.connect(('127.0.0.1', 55651))
except socket.error as e:
pass
else:
self.not_connect = False
def serve_forever(self):
while self.serve:
nl_sock, address = self.server.accept()
self.pool.spawn_n(self.handle_request, nl_sock)
log.info("Rhea has contacted us")
self.try_connect()
if __name__ == "__main__":
nlclient = NetlinkClient()
nlclient.serve_forever()
|
wandsdn/RheaFlow
|
RheaFlow/NetlinkProcessor.py
|
Python
|
apache-2.0
| 6,275 | 0.000478 |
# -*- coding: utf-8 -*-
from django.urls import re_path
from django.contrib.auth import views as auth_views
from django.utils.translation import gettext_lazy as _
from django.urls import reverse_lazy
from . import forms
from . import views
app_name = "auth"
urlpatterns = [
# Sign in / sign out
re_path(
_(r"^connexion/$"),
views.LoginView.as_view(
template_name="public/auth/login.html", authentication_form=forms.AuthenticationForm
),
name="login",
),
re_path(_(r"^deconnexion/$"), auth_views.LogoutView.as_view(next_page="/"), name="logout"),
re_path(_(r"^bienvenue/$"), views.UserLoginLandingRedirectView.as_view(), name="landing"),
# Parameters & personal data
re_path(
_(r"^donnees-personnelles/$"),
views.UserPersonalDataUpdateView.as_view(),
name="personal_data",
),
re_path(_(r"^parametres/$"), views.UserParametersUpdateView.as_view(), name="parameters"),
# Password change
re_path(_(r"^mot-de-passe/$"), views.UserPasswordChangeView.as_view(), name="password_change"),
# Password reset
re_path(
_(r"^mot-de-passe/reinitialisation/$"),
auth_views.PasswordResetView.as_view(
template_name="public/auth/password_reset_form.html",
email_template_name="emails/auth/password_reset_registered_email.html",
subject_template_name="emails/auth/password_reset_registered_email_subject.txt",
form_class=forms.PasswordResetForm,
success_url=reverse_lazy("public:auth:password_reset_done"),
),
name="password_reset",
),
re_path(
_(r"^mot-de-passe/reinitialisation/termine/$"),
auth_views.PasswordResetDoneView.as_view(
template_name="public/auth/password_reset_done.html"
),
name="password_reset_done",
),
re_path(
_(r"^reinitialisation/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]+-[0-9A-Za-z]+)/$"),
auth_views.PasswordResetConfirmView.as_view(
template_name="public/auth/password_reset_confirm.html",
success_url=reverse_lazy("public:auth:password_reset_complete"),
),
name="password_reset_confirm",
),
re_path(
_(r"^reinitialisation/termine/$"),
auth_views.PasswordResetCompleteView.as_view(
template_name="public/auth/password_reset_complete.html"
),
name="password_reset_complete",
),
]
|
erudit/eruditorg
|
eruditorg/apps/public/auth/urls.py
|
Python
|
gpl-3.0
| 2,483 | 0.003222 |
# This stores all the dialogue related stuff
import screen
class Dialogue(object):
"""Stores the dialogue tree for an individual NPC"""
def __init__(self, npc):
super(Dialogue, self).__init__()
self.npc = npc
self.game = npc.game
self.root = None
self.currentNode = None
def setRootNode(self, node):
self.root = node
def resetCurrentNode(self):
self.currentNode = self.root
def beginConversation(self):
self.resetCurrentNode()
self.runNextNode()
def runNextNode(self):
if self.currentNode is None:
return
# Grab all the DialogueChoices that should be shown
availableChoices = []
for (choice, predicate, child) in self.currentNode.choices:
if predicate is not None:
if predicate():
availableChoices.append((choice, child))
else:
availableChoices.append((choice, child))
npcName = None
if self.game.player.notebook.isNpcKnown(self.npc):
npcName = self.npc.firstName + " " + self.npc.lastName
choiceTexts = [choice.choiceText for (choice, child) in availableChoices]
screen.printDialogueChoices(self.game.screen, self.game.player,
choiceTexts, npcName)
choiceIdx = self.game.getDialogueChoice(len(choiceTexts)) - 1
self.game.draw()
(choice, nextNode) = availableChoices[choiceIdx]
response = ""
response += choice.response
if choice.responseFunction is not None:
response = choice.responseFunction(self.npc, response)
self.game.printDescription(response, npcName)
self.currentNode = nextNode
self.runNextNode()
class DialogueNode(object):
"""A single node of the dialogue tree"""
def __init__(self):
super(DialogueNode, self).__init__()
self.choices = []
def addChoice(self, choice, choicePredicate=None, childNode=None):
self.choices.append((choice, choicePredicate, childNode))
class DialogueChoice(object):
"""Stores the choice/function pair"""
def __init__(self, choiceText, response, responseFunction=None):
super(DialogueChoice, self).__init__()
self.choiceText = choiceText
self.response = response
self.responseFunction = responseFunction
def callResponseFunction(self, npcArgument, response):
if responseFunction is not None:
self.responseFunction(npcArgument, response)
|
mjdarby/RogueDetective
|
dialogue.py
|
Python
|
gpl-2.0
| 2,401 | 0.016243 |
import threading
__author__ = "Piotr Gawlowicz"
__copyright__ = "Copyright (c) 2015, Technische Universitat Berlin"
__version__ = "0.1.0"
__email__ = "gawlowicz@tkn.tu-berlin.de"
class Timer(object):
def __init__(self, handler_):
assert callable(handler_)
super().__init__()
self._handler = handler_
self._event = threading.Event()
self._thread = None
def start(self, interval):
"""interval is in seconds"""
if self._thread:
self.cancel()
self._event.clear()
self._thread = threading.Thread(target=self._timer, args=[interval])
self._thread.setDaemon(True)
self._thread.start()
def cancel(self):
if (not self._thread) or (not self._thread.is_alive()):
return
self._event.set()
# self._thread.join()
self._thread = None
def is_running(self):
return self._thread is not None
def _timer(self, interval):
# Avoid cancellation during execution of self._callable()
cancel = self._event.wait(interval)
if cancel:
return
self._handler()
class TimerEventSender(Timer):
# timeout handler is called by timer thread context.
# So in order to actual execution context to application's event thread,
# post the event to the application
def __init__(self, app, ev_cls):
super(TimerEventSender, self).__init__(self._timeout)
self._app = app
self._ev_cls = ev_cls
def _timeout(self):
self._app.send_event(self._ev_cls())
|
uniflex/uniflex
|
uniflex/core/timer.py
|
Python
|
mit
| 1,583 | 0 |
from flask import Flask
from flask import render_template
import euclid
import queue_constants
import ast
import redis
import threading
REDIS_ADDRESS = "localhost"
REDIS_PORT = 6379
REDIS_DB = 0
app = Flask(__name__)
@app.route("/")
def monitor():
queue = redis.StrictRedis(host=REDIS_ADDRESS, port=REDIS_PORT, db=REDIS_DB)
nstatus = []
status = ast.literal_eval(queue.get(queue_constants.NODE_KEY).decode())
for s in status:
nstatus.append({"name":s, "status":status[s]["status"]})
return render_template('monitor.html', status=nstatus)
if __name__ == "__main__":
euclidThread = threading.Thread(target=euclid.main)
euclidThread.setDaemon(True)
euclidThread.start()
app.run(host='0.0.0.0')
|
tommyp1ckles/hippocrates
|
server.py
|
Python
|
mit
| 742 | 0.006739 |
"""
Core
====
AudioSignals
------------
.. autoclass:: nussl.core.AudioSignal
:members:
:autosummary:
Masks
-----
.. automodule:: nussl.core.masks
:members:
:autosummary:
Constants
------------
.. automodule:: nussl.core.constants
:members:
:autosummary:
External File Zoo
-----------------
.. automodule:: nussl.core.efz_utils
:members:
:autosummary:
General utilities
-----------------
.. automodule:: nussl.core.utils
:members:
:autosummary:
Audio effects
-------------
.. automodule:: nussl.core.effects
:members:
:autosummary:
Mixing
------
.. automodule:: nussl.core.mixing
:members:
:autosummary:
Playing and embedding audio
---------------------------
.. automodule:: nussl.core.play_utils
:members:
:autosummary:
Checkpoint migration (backwards compatability)
----------------------------------------------
.. automodule:: nussl.core.migration
:members:
:autosummary:
"""
from .audio_signal import AudioSignal, STFTParams
from . import constants
from . import efz_utils
from . import play_utils
from . import utils
from . import mixing
from . import masks
__all__ = [
'AudioSignal',
'STFTParams',
'constants',
'efz_utils',
'play_utils',
'utils',
'mixing'
'masks',
]
|
interactiveaudiolab/nussl
|
nussl/core/__init__.py
|
Python
|
mit
| 1,293 | 0.000773 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: S.H.
Version: 0.1
Date: 2015-01-17
Description:
Scan ip:
74.125.131.0/24
74.125.131.99-125
74.125.131.201
Only three format above.
Read ip form a ip.txt, and scan all port(or a list port).
"""
import os
import io
import socket
fileOpen = open("ip.txt", 'r')
fileTemp = open("temp.txt", 'a')
for line in fileOpen.readlines():
if line.find("-") != -1:
list = line[:line.index("-")]
ip = [int(a) for a in list.split(".")]
b = int(line[line.index("-")+1:])
for i in range(ip[3], b+1):
fileTemp.write(str(ip[0])+"."+str(ip[1])+"."+str(ip[2])+"."+str(i)+"\n")
elif line.find("/") != -1:
list = line[:line.index("/")]
ip = [int(a) for a in list.split(".")]
for i in range(256):
fileTemp.write(str(ip[0])+"."+str(ip[1])+"."+str(ip[2])+"."+str(i)+"\n")
else:
fileTemp.write(line)
fileTemp.close()
fileOpen.close()
# print("process is here.")
f = open("temp.txt", 'r')
print("===Scan Staring===")
for line in f.readlines():
hostIP = socket.gethostbyname(line)
# print(hostIP)
# for port in range(65535):
portList = [80, 8080]
for port in portList:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((hostIP, port))
if result == 0:
print("Port {} is OPEN on:\t\t\t {}".format(port, hostIP))
else:
print("Port {} is NOT open on {}".format(port, hostIP))
sock.close()
f.close()
os.remove("temp.txt")
print("===Scan Complement===")
|
KyoHS/Python
|
SingleThreadPortScan.py
|
Python
|
gpl-2.0
| 1,536 | 0.009115 |
"""
Maximum likelihood covariance estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
# avoid division truncation
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import check_array
from ..utils.extmath import fast_logdet
from ..metrics.pairwise import pairwise_distances
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
sample mean of the log-likelihood
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + fast_logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : boolean
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data will be centered before computation.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool
Specifies if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
location_ : array-like, shape (n_features,)
Estimated location, i.e. the estimated mean.
covariance_ : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
precision_ : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EmpiricalCovariance
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = EmpiricalCovariance().fit(X)
>>> cov.covariance_
array([[0.7569..., 0.2818...],
[0.2818..., 0.3928...]])
>>> cov.location_
array([0.0622..., 0.0193...])
"""
def __init__(self, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y
not used, present for API consistence purpose.
Returns
-------
self : object
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y
not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
----------
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, X):
"""Computes the squared Mahalanobis distances of given observations.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
dist : array, shape = [n_samples,]
Squared Mahalanobis distances of the observations.
"""
precision = self.get_precision()
# compute mahalanobis distances
dist = pairwise_distances(X, self.location_[np.newaxis, :],
metric='mahalanobis', VI=precision)
return np.reshape(dist, (len(X),)) ** 2
|
chrsrds/scikit-learn
|
sklearn/covariance/empirical_covariance_.py
|
Python
|
bsd-3-clause
| 9,848 | 0 |
"""
LsVarRun - command ``ls -lnL /var/run``
=======================================
The ``ls -lnL /var/run`` command provides information for the listing of the
``/var/run`` directory.
Sample input is shown in the Examples. See ``FileListing`` class for
additional information.
Sample directory list::
total 20
drwx--x---. 2 0 984 40 May 15 09:29 openvpn
drwxr-xr-x. 2 0 0 40 May 15 09:30 plymouth
drwxr-xr-x. 2 0 0 40 May 15 09:29 ppp
drwxr-xr-x. 2 75 75 40 May 15 09:29 radvd
-rw-r--r--. 1 0 0 5 May 15 09:30 rhnsd.pid
drwxr-xr-x. 2 0 0 60 May 30 09:31 rhsm
drwx------. 2 32 32 40 May 15 09:29 rpcbind
-r--r--r--. 1 0 0 0 May 17 16:26 rpcbind.lock
Examples:
>>> "rhnsd.pid" in ls_var_run
False
>>> "/var/run" in ls_var_run
True
>>> ls_var_run.dir_entry('/var/run', 'openvpn')['type']
'd'
"""
from insights.specs import Specs
from .. import FileListing
from .. import parser
@parser(Specs.ls_var_run)
class LsVarRun(FileListing):
"""Parses output of ``ls -lnL /var/run`` command."""
pass
|
RedHatInsights/insights-core
|
insights/parsers/ls_var_run.py
|
Python
|
apache-2.0
| 1,120 | 0 |
#!/usr/bin/env python
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# (c) 2012-2014, Michael DeHaan <michael@ansible.com> and others
# (c) 2017 Ansible Project
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import datetime
import glob
import optparse
import os
import re
import sys
import warnings
from collections import defaultdict
from distutils.version import LooseVersion
from pprint import PrettyPrinter
try:
from html import escape as html_escape
except ImportError:
# Python-3.2 or later
import cgi
def html_escape(text, quote=True):
return cgi.escape(text, quote)
import jinja2
import yaml
from jinja2 import Environment, FileSystemLoader
from six import iteritems, string_types
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes
from ansible.utils import plugin_docs
from ansible.utils.display import Display
#####################################################################################
# constants and paths
# if a module is added in a version of Ansible older than this, don't print the version added information
# in the module documentation because everyone is assumed to be running something newer than this already.
TOO_OLD_TO_BE_NOTABLE = 1.3
# Get parent directory of the directory this script lives in
MODULEDIR = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules'
))
# The name of the DOCUMENTATION template
EXAMPLE_YAML = os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml'
))
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
DEPRECATED = b" (D)"
pp = PrettyPrinter()
display = Display()
def rst_ify(text):
''' convert symbols like I(this is in italics) to valid restructured text '''
try:
t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
t = _BOLD.sub(r'**' + r"\1" + r"**", t)
t = _MODULE.sub(r':ref:`module_docs/' + r"\1 <\1>" + r"`", t)
t = _URL.sub(r"\1", t)
t = _CONST.sub(r'``' + r"\1" + r"``", t)
except Exception as e:
raise AnsibleError("Could not process (%s) : %s" % (str(text), str(e)))
return t
def html_ify(text):
''' convert symbols like I(this is in italics) to valid HTML '''
t = html_escape(text)
t = _ITALIC.sub("<em>" + r"\1" + "</em>", t)
t = _BOLD.sub("<b>" + r"\1" + "</b>", t)
t = _MODULE.sub("<span class='module'>" + r"\1" + "</span>", t)
t = _URL.sub("<a href='" + r"\1" + "'>" + r"\1" + "</a>", t)
t = _CONST.sub("<code>" + r"\1" + "</code>", t)
return t
def rst_fmt(text, fmt):
''' helper for Jinja2 to do format strings '''
return fmt % (text)
def rst_xline(width, char="="):
''' return a restructured text line of a given length '''
return char * width
def write_data(text, output_dir, outputname, module=None):
''' dumps module output to a file or the screen, as requested '''
if output_dir is not None:
if module:
outputname = outputname % module
if not os.path.exists(output_dir):
os.makedirs(output_dir)
fname = os.path.join(output_dir, outputname)
fname = fname.replace(".py", "")
with open(fname, 'wb') as f:
f.write(to_bytes(text))
else:
print(text)
def get_plugin_info(module_dir, limit_to=None, verbose=False):
'''
Returns information about plugins and the categories that they belong to
:arg module_dir: file system path to the top of the plugin directory
:kwarg limit_to: If given, this is a list of plugin names to
generate information for. All other plugins will be ignored.
:returns: Tuple of two dicts containing module_info, categories, and
aliases and a set listing deprecated modules:
:module_info: mapping of module names to information about them. The fields of the dict are:
:path: filesystem path to the module
:deprecated: boolean. True means the module is deprecated otherwise not.
:aliases: set of aliases to this module name
:metadata: The modules metadata (as recorded in the module)
:doc: The documentation structure for the module
:examples: The module's examples
:returndocs: The module's returndocs
:categories: maps category names to a dict. The dict contains at
least one key, '_modules' which contains a list of module names in
that category. Any other keys in the dict are subcategories with
the same structure.
'''
categories = dict()
module_info = defaultdict(dict)
# * windows powershell modules have documentation stubs in python docstring
# format (they are not executed) so skip the ps1 format files
# * One glob level for every module level that we're going to traverse
files = (
glob.glob("%s/*.py" % module_dir) +
glob.glob("%s/*/*.py" % module_dir) +
glob.glob("%s/*/*/*.py" % module_dir) +
glob.glob("%s/*/*/*/*.py" % module_dir)
)
for module_path in files:
# Do not list __init__.py files
if module_path.endswith('__init__.py'):
continue
# Do not list blacklisted modules
module = os.path.splitext(os.path.basename(module_path))[0]
if module in plugin_docs.BLACKLIST['MODULE'] or module == 'base':
continue
# If requested, limit module documentation building only to passed-in
# modules.
if limit_to is not None and module.lower() not in limit_to:
continue
deprecated = False
if module.startswith("_"):
if os.path.islink(module_path):
# Handle aliases
source = os.path.splitext(os.path.basename(os.path.realpath(module_path)))[0]
module = module.replace("_", "", 1)
aliases = module_info[source].get('aliases', set())
aliases.add(module)
# In case we just created this via get()'s fallback
module_info[source]['aliases'] = aliases
continue
else:
# Handle deprecations
module = module.replace("_", "", 1)
deprecated = True
#
# Regular module to process
#
category = categories
# Start at the second directory because we don't want the "vendor"
mod_path_only = os.path.dirname(module_path[len(module_dir):])
module_categories = []
# build up the categories that this module belongs to
for new_cat in mod_path_only.split('/')[1:]:
if new_cat not in category:
category[new_cat] = dict()
category[new_cat]['_modules'] = []
module_categories.append(new_cat)
category = category[new_cat]
category['_modules'].append(module)
# the category we will use in links (so list_of_all_plugins can point to plugins/action_plugins/*'
if module_categories:
primary_category = module_categories[0]
# use ansible core library to parse out doc metadata YAML and plaintext examples
doc, examples, returndocs, metadata = plugin_docs.get_docstring(module_path, verbose=verbose)
# save all the information
module_info[module] = {'path': module_path,
'deprecated': deprecated,
'aliases': set(),
'metadata': metadata,
'doc': doc,
'examples': examples,
'returndocs': returndocs,
'categories': module_categories,
'primary_category': primary_category,
}
# keep module tests out of becoming module docs
if 'test' in categories:
del categories['test']
return module_info, categories
def generate_parser():
''' generate an optparse parser '''
p = optparse.OptionParser(
version='%prog 1.0',
usage='usage: %prog [options] arg1 arg2',
description='Generate module documentation from metadata',
)
p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number")
p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path")
p.add_option("-P", "--plugin-type", action="store", dest="plugin_type", default='module', help="The type of plugin (module, lookup, etc)")
p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates")
p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type")
p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files")
p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules")
p.add_option("-l", "--limit-to-modules", '--limit-to', action="store", dest="limit_to", default=None,
help="Limit building module documentation to comma-separated list of plugins. Specify non-existing plugin name for no plugins.")
p.add_option('-V', action='version', help='Show version number and exit')
p.add_option('-v', '--verbose', dest='verbosity', default=0, action="count", help="verbose mode (increase number of 'v's for more)")
return p
def jinja2_environment(template_dir, typ, plugin_type):
env = Environment(loader=FileSystemLoader(template_dir),
variable_start_string="@{",
variable_end_string="}@",
trim_blocks=True)
env.globals['xline'] = rst_xline
templates = {}
if typ == 'rst':
env.filters['convert_symbols_to_format'] = rst_ify
env.filters['html_ify'] = html_ify
env.filters['fmt'] = rst_fmt
env.filters['xline'] = rst_xline
templates['plugin'] = env.get_template('plugin.rst.j2')
if plugin_type == 'module':
name = 'modules'
else:
name = 'plugins'
templates['category_list'] = env.get_template('%s_by_category.rst.j2' % name)
templates['support_list'] = env.get_template('%s_by_support.rst.j2' % name)
templates['list_of_CATEGORY_modules'] = env.get_template('list_of_CATEGORY_%s.rst.j2' % name)
else:
raise Exception("Unsupported format type: %s" % typ)
return templates
def too_old(added):
if not added:
return False
try:
added_tokens = str(added).split(".")
readded = added_tokens[0] + "." + added_tokens[1]
added_float = float(readded)
except ValueError as e:
warnings.warn("Could not parse %s: %s" % (added, str(e)))
return False
return added_float < TOO_OLD_TO_BE_NOTABLE
def process_plugins(module_map, templates, outputname, output_dir, ansible_version, plugin_type):
for module in module_map:
display.display("rendering: %s" % module)
fname = module_map[module]['path']
display.vvvvv(pp.pformat(('process_plugins info: ', module_map[module])))
# crash if module is missing documentation and not explicitly hidden from docs index
if module_map[module]['doc'] is None:
display.error("%s MISSING DOCUMENTATION" % (fname,))
_doc = {plugin_type: module,
'version_added': '2.4',
'filename': fname}
module_map[module]['doc'] = _doc
# continue
# Going to reference this heavily so make a short name to reference it by
doc = module_map[module]['doc']
display.vvvvv(pp.pformat(('process_plugins doc: ', doc)))
# add some defaults for plugins that dont have most of the info
doc['module'] = doc.get('module', module)
doc['version_added'] = doc.get('version_added', 'historical')
doc['plugin_type'] = plugin_type
if module_map[module]['deprecated'] and 'deprecated' not in doc:
display.warning("%s PLUGIN MISSING DEPRECATION DOCUMENTATION: %s" % (fname, 'deprecated'))
required_fields = ('short_description',)
for field in required_fields:
if field not in doc:
display.warning("%s PLUGIN MISSING field '%s'" % (fname, field))
not_nullable_fields = ('short_description',)
for field in not_nullable_fields:
if field in doc and doc[field] in (None, ''):
print("%s: WARNING: MODULE field '%s' DOCUMENTATION is null/empty value=%s" % (fname, field, doc[field]))
if 'version_added' not in doc:
display.error("*** ERROR: missing version_added in: %s ***\n" % module)
#
# The present template gets everything from doc so we spend most of this
# function moving data into doc for the template to reference
#
if module_map[module]['aliases']:
doc['aliases'] = module_map[module]['aliases']
# don't show version added information if it's too old to be called out
added = 0
if doc['version_added'] == 'historical':
del doc['version_added']
else:
added = doc['version_added']
# Strip old version_added for the module
if too_old(added):
del doc['version_added']
option_names = []
if 'options' in doc and doc['options']:
for (k, v) in iteritems(doc['options']):
# Error out if there's no description
if 'description' not in doc['options'][k]:
raise AnsibleError("Missing required description for option %s in %s " % (k, module))
# Error out if required isn't a boolean (people have been putting
# information on when something is required in here. Those need
# to go in the description instead).
required_value = doc['options'][k].get('required', False)
if not isinstance(required_value, bool):
raise AnsibleError("Invalid required value '%s' for option '%s' in '%s' (must be truthy)" % (required_value, k, module))
# Strip old version_added information for options
if 'version_added' in doc['options'][k] and too_old(doc['options'][k]['version_added']):
del doc['options'][k]['version_added']
# Make sure description is a list of lines for later formatting
if not isinstance(doc['options'][k]['description'], list):
doc['options'][k]['description'] = [doc['options'][k]['description']]
option_names.append(k)
option_names.sort()
doc['option_keys'] = option_names
doc['filename'] = fname
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['ansible_version'] = ansible_version
# check the 'deprecated' field in doc. We expect a dict potentially with 'why', 'version', and 'alternative' fields
# examples = module_map[module]['examples']
# print('\n\n%s: type of examples: %s\n' % (module, type(examples)))
# if examples and not isinstance(examples, (str, unicode, list)):
# raise TypeError('module %s examples is wrong type (%s): %s' % (module, type(examples), examples))
# use 'examples' for 'plainexamples' if 'examples' is a string
if isinstance(module_map[module]['examples'], string_types):
doc['plainexamples'] = module_map[module]['examples'] # plain text
else:
doc['plainexamples'] = ''
doc['metadata'] = module_map[module]['metadata']
display.vvvvv(pp.pformat(module_map[module]))
if module_map[module]['returndocs']:
try:
doc['returndocs'] = yaml.safe_load(module_map[module]['returndocs'])
except Exception as e:
print("%s:%s:yaml error:%s:returndocs=%s" % (fname, module, e, module_map[module]['returndocs']))
doc['returndocs'] = None
else:
doc['returndocs'] = None
doc['author'] = doc.get('author', ['UNKNOWN'])
if isinstance(doc['author'], string_types):
doc['author'] = [doc['author']]
display.v('about to template %s' % module)
display.vvvvv(pp.pformat(doc))
text = templates['plugin'].render(doc)
if LooseVersion(jinja2.__version__) < LooseVersion('2.10'):
# jinja2 < 2.10's indent filter indents blank lines. Cleanup
text = re.sub(' +\n', '\n', text)
write_data(text, output_dir, outputname, module)
def process_categories(plugin_info, categories, templates, output_dir, output_name, plugin_type):
for category in sorted(categories.keys()):
module_map = categories[category]
category_filename = output_name % category
display.display("*** recording category %s in %s ***" % (category, category_filename))
# start a new category file
category_name = category.replace("_", " ")
category_title = category_name.title()
subcategories = dict((k, v) for k, v in module_map.items() if k != '_modules')
template_data = {'title': category_title,
'category_name': category_name,
'category': module_map,
'subcategories': subcategories,
'module_info': plugin_info,
'plugin_type': plugin_type
}
text = templates['list_of_CATEGORY_modules'].render(template_data)
write_data(text, output_dir, category_filename)
def process_support_levels(plugin_info, templates, output_dir, plugin_type):
supported_by = {'Ansible Core Team': {'slug': 'core_supported',
'modules': [],
'output': 'core_maintained.rst',
'blurb': "These are :doc:`modules maintained by the"
" Ansible Core Team<core_maintained>` and will always ship"
" with Ansible itself."},
'Ansible Network Team': {'slug': 'network_supported',
'modules': [],
'output': 'network_maintained.rst',
'blurb': "These are :doc:`modules maintained by the"
" Ansible Network Team<network_maintained>` in"
" a relationship similar to how the Ansible Core Team"
" maintains the Core modules."},
'Ansible Partners': {'slug': 'partner_supported',
'modules': [],
'output': 'partner_maintained.rst',
'blurb': """
Some examples of :doc:`Certified Modules<partner_maintained>` are those submitted by other
companies. Maintainers of these types of modules must watch for any issues reported or pull requests
raised against the module.
The Ansible Core Team will review all modules becoming certified. Core committers will review
proposed changes to existing Certified Modules once the community maintainers of the module have
approved the changes. Core committers will also ensure that any issues that arise due to Ansible
engine changes will be remediated. Also, it is strongly recommended (but not presently required)
for these types of modules to have unit tests.
These modules are currently shipped with Ansible, but might be shipped separately in the future.
"""},
'Ansible Community': {'slug': 'community_supported',
'modules': [],
'output': 'community_maintained.rst',
'blurb': """
These are :doc:`modules maintained by the Ansible Community<community_maintained>`. They **are
not** supported by the Ansible Core Team or by companies/partners associated to the module.
They are still fully usable, but the response rate to issues is purely up to the community. Best
effort support will be provided but is not covered under any support contracts.
These modules are currently shipped with Ansible, but will most likely be shipped separately in the future.
"""},
}
# only gen support pages for modules for now, need to split and namespace templates and generated docs
if plugin_type == 'plugins':
return
# Separate the modules by support_level
for module, info in plugin_info.items():
if not info.get('metadata', None):
display.warning('no metadata for %s' % module)
continue
if info['metadata']['supported_by'] == 'core':
supported_by['Ansible Core Team']['modules'].append(module)
elif info['metadata']['supported_by'] == 'network':
supported_by['Ansible Network Team']['modules'].append(module)
elif info['metadata']['supported_by'] == 'certified':
supported_by['Ansible Partners']['modules'].append(module)
elif info['metadata']['supported_by'] == 'community':
supported_by['Ansible Community']['modules'].append(module)
else:
raise AnsibleError('Unknown supported_by value: %s' % info['metadata']['supported_by'])
# Render the module lists
for maintainers, data in supported_by.items():
template_data = {'maintainers': maintainers,
'modules': data['modules'],
'slug': data['slug'],
'module_info': plugin_info,
}
text = templates['support_list'].render(template_data)
write_data(text, output_dir, data['output'])
def validate_options(options):
''' validate option parser options '''
if not options.module_dir:
sys.exit("--module-dir is required", file=sys.stderr)
if not os.path.exists(options.module_dir):
sys.exit("--module-dir does not exist: %s" % options.module_dir, file=sys.stderr)
if not options.template_dir:
sys.exit("--template-dir must be specified")
def main():
# INIT
p = generate_parser()
(options, args) = p.parse_args()
validate_options(options)
display.verbosity = options.verbosity
plugin_type = options.plugin_type
# prep templating
templates = jinja2_environment(options.template_dir, options.type, plugin_type)
# set file/directory structure
if plugin_type == 'module':
# trim trailing s off of plugin_type for plugin_type=='modules'. ie 'copy_module.rst'
outputname = '%s_' + '%s.rst' % plugin_type
output_dir = options.output_dir
else:
# for plugins, just use 'ssh.rst' vs 'ssh_module.rst'
outputname = '%s.rst'
output_dir = '%s/plugins/%s' % (options.output_dir, plugin_type)
display.vv('output name: %s' % outputname)
display.vv('output dir: %s' % output_dir)
# Convert passed-in limit_to to None or list of modules.
if options.limit_to is not None:
options.limit_to = [s.lower() for s in options.limit_to.split(",")]
plugin_info, categories = get_plugin_info(options.module_dir, limit_to=options.limit_to, verbose=(options.verbosity > 0))
categories['all'] = {'_modules': plugin_info.keys()}
display.vvv(pp.pformat(categories))
display.vvvvv(pp.pformat(plugin_info))
# Transform the data
if options.type == 'rst':
display.v('Generating rst')
for key, record in plugin_info.items():
display.vv(key)
display.vvvvv(pp.pformat(('record', record)))
if record.get('doc', None):
short_desc = record['doc']['short_description']
if short_desc is None:
display.warning('short_description for %s is None' % key)
short_desc = ''
record['doc']['short_description'] = rst_ify(short_desc)
if plugin_type == 'module':
display.v('Generating Categories')
# Write module master category list
category_list_text = templates['category_list'].render(categories=sorted(categories.keys()))
category_index_name = '%ss_by_category.rst' % plugin_type
write_data(category_list_text, output_dir, category_index_name)
# Render all the individual plugin pages
display.v('Generating plugin pages')
process_plugins(plugin_info, templates, outputname, output_dir, options.ansible_version, plugin_type)
# Render all the categories for modules
if plugin_type == 'module':
display.v('Generating Category lists')
category_list_name_template = 'list_of_%s_' + '%ss.rst' % plugin_type
process_categories(plugin_info, categories, templates, output_dir, category_list_name_template, plugin_type)
# Render all the categories for modules
process_support_levels(plugin_info, templates, output_dir, plugin_type)
if __name__ == '__main__':
main()
|
cryptobanana/ansible
|
docs/bin/plugin_formatter.py
|
Python
|
gpl-3.0
| 26,677 | 0.002774 |
"""tuple sub-class which holds weak references to objects"""
import weakref
class WeakTuple( tuple ):
"""tuple sub-class holding weakrefs to items
The weak reference tuple is intended to allow you
to store references to a list of objects without
needing to manage weak references directly.
For the most part, the WeakTuple operates just
like a tuple object, in that it allows for all
of the standard tuple operations. The difference
is that the WeakTuple class only stores weak
references to its items. As a result, adding
an object to the tuple does not necessarily mean
that it will still be there later on during
execution (if the referent has been garbage
collected).
Because WeakTuple's are static (their membership
doesn't change), they will raise ReferenceError
when a sub-item is missing rather than skipping
missing items as does the WeakList. This can
occur for basically _any_ use of the tuple.
"""
def __init__( self, sequence=() ):
"""Initialize the tuple
The WeakTuple will store weak references to objects
within the sequence.
"""
super( WeakTuple, self).__init__( map( self.wrap, sequence))
def valid( self ):
"""Explicit validity check for the tuple
Checks whether all references can be resolved,
basically just sees whether calling list(self)
raises a ReferenceError
"""
try:
list( self )
return 1
except weakref.ReferenceError:
return 0
def wrap( self, item ):
"""Wrap an individual item in a weak-reference
If the item is already a weak reference, we store
a reference to the original item. We use approximately
the same weak reference callback mechanism as the
standard weakref.WeakKeyDictionary object.
"""
if isinstance( item, weakref.ReferenceType ):
item = item()
return weakref.ref( item )
def unwrap( self, item ):
"""Unwrap an individual item
This is a fairly trivial operation at the moment,
it merely calls the item with no arguments and
returns the result.
"""
ref = item()
if ref is None:
raise weakref.ReferenceError( """%s instance no longer valid (item %s has been collected)"""%( self.__class__.__name__, item))
return ref
def __iter__( self ):
"""Iterate over the tuple, yielding strong references"""
index = 0
while index < len(self):
yield self[index]
index += 1
def __getitem__( self, index ):
"""Get the item at the given index"""
return self.unwrap(super (WeakTuple,self).__getitem__( index ))
def __getslice__( self, start, stop ):
"""Get the items in the range start to stop"""
return map(
self.unwrap,
super (WeakTuple,self).__getslice__( start, stop)
)
def __contains__( self, item ):
"""Return boolean indicating whether the item is in the tuple"""
for node in self:
if item is node:
return 1
return 0
def count( self, item ):
"""Return integer count of instances of item in tuple"""
count = 0
for node in self:
if item is node:
count += 1
return count
def index( self, item ):
"""Return integer index of item in tuple"""
count = 0
for node in self:
if item is node:
return count
count += 1
return -1
def __add__(self, other):
"""Return a new path with other as tail"""
return tuple(self) + other
def __eq__( self, sequence ):
"""Compare the tuple to another (==)"""
return list(self) == sequence
def __ge__( self, sequence ):
"""Compare the tuple to another (>=)"""
return list(self) >= sequence
def __gt__( self, sequence ):
"""Compare the tuple to another (>)"""
return list(self) > sequence
def __le__( self, sequence ):
"""Compare the tuple to another (<=)"""
return list(self) <= sequence
def __lt__( self, sequence ):
"""Compare the tuple to another (<)"""
return list(self) < sequence
def __ne__( self, sequence ):
"""Compare the tuple to another (!=)"""
return list(self) != sequence
def __repr__( self ):
"""Return a code-like representation of the weak tuple"""
return """%s( %s )"""%( self.__class__.__name__, super(WeakTuple,self).__repr__())
|
menpo/vrml97
|
vrml/weaktuple.py
|
Python
|
bsd-3-clause
| 4,708 | 0.015718 |
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import netaddr
from neutron_lib import exceptions
from oslo_log import log as logging
from oslo_policy import policy as oslo_policy
from oslo_utils import excutils
import six
import webob.exc
from neutron._i18n import _, _LE, _LI
from neutron.api import api_common
from neutron.api.v2 import attributes
from neutron.api.v2 import resource as wsgi_resource
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.db import api as db_api
from neutron import policy
from neutron import quota
from neutron.quota import resource_registry
LOG = logging.getLogger(__name__)
FAULT_MAP = {exceptions.NotFound: webob.exc.HTTPNotFound,
exceptions.Conflict: webob.exc.HTTPConflict,
exceptions.InUse: webob.exc.HTTPConflict,
exceptions.BadRequest: webob.exc.HTTPBadRequest,
exceptions.ServiceUnavailable: webob.exc.HTTPServiceUnavailable,
exceptions.NotAuthorized: webob.exc.HTTPForbidden,
netaddr.AddrFormatError: webob.exc.HTTPBadRequest,
oslo_policy.PolicyNotAuthorized: webob.exc.HTTPForbidden
}
class Controller(object):
LIST = 'list'
SHOW = 'show'
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'
@property
def plugin(self):
return self._plugin
@property
def resource(self):
return self._resource
@property
def attr_info(self):
return self._attr_info
@property
def member_actions(self):
return self._member_actions
def __init__(self, plugin, collection, resource, attr_info,
allow_bulk=False, member_actions=None, parent=None,
allow_pagination=False, allow_sorting=False):
if member_actions is None:
member_actions = []
self._plugin = plugin
self._collection = collection.replace('-', '_')
self._resource = resource.replace('-', '_')
self._attr_info = attr_info
self._allow_bulk = allow_bulk
self._allow_pagination = allow_pagination
self._allow_sorting = allow_sorting
self._native_bulk = self._is_native_bulk_supported()
self._native_pagination = self._is_native_pagination_supported()
self._native_sorting = self._is_native_sorting_supported()
self._policy_attrs = [name for (name, info) in self._attr_info.items()
if info.get('required_by_policy')]
self._notifier = n_rpc.get_notifier('network')
self._member_actions = member_actions
self._primary_key = self._get_primary_key()
if self._allow_pagination and self._native_pagination:
# Native pagination need native sorting support
if not self._native_sorting:
raise exceptions.Invalid(
_("Native pagination depend on native sorting")
)
if not self._allow_sorting:
LOG.info(_LI("Allow sorting is enabled because native "
"pagination requires native sorting"))
self._allow_sorting = True
self.parent = parent
if parent:
self._parent_id_name = '%s_id' % parent['member_name']
parent_part = '_%s' % parent['member_name']
else:
self._parent_id_name = None
parent_part = ''
self._plugin_handlers = {
self.LIST: 'get%s_%s' % (parent_part, self._collection),
self.SHOW: 'get%s_%s' % (parent_part, self._resource)
}
for action in [self.CREATE, self.UPDATE, self.DELETE]:
self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
self._resource)
def _get_primary_key(self, default_primary_key='id'):
for key, value in six.iteritems(self._attr_info):
if value.get('primary_key', False):
return key
return default_primary_key
def _is_native_bulk_supported(self):
native_bulk_attr_name = ("_%s__native_bulk_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_bulk_attr_name, False)
def _is_native_pagination_supported(self):
return api_common.is_native_pagination_supported(self._plugin)
def _is_native_sorting_supported(self):
return api_common.is_native_sorting_supported(self._plugin)
def _exclude_attributes_by_policy(self, context, data):
"""Identifies attributes to exclude according to authZ policies.
Return a list of attribute names which should be stripped from the
response returned to the user because the user is not authorized
to see them.
"""
attributes_to_exclude = []
for attr_name in data.keys():
attr_data = self._attr_info.get(attr_name)
if attr_data and attr_data['is_visible']:
if policy.check(
context,
'%s:%s' % (self._plugin_handlers[self.SHOW], attr_name),
data,
might_not_exist=True,
pluralized=self._collection):
# this attribute is visible, check next one
continue
# if the code reaches this point then either the policy check
# failed or the attribute was not visible in the first place
attributes_to_exclude.append(attr_name)
return attributes_to_exclude
def _view(self, context, data, fields_to_strip=None):
"""Build a view of an API resource.
:param context: the neutron context
:param data: the object for which a view is being created
:param fields_to_strip: attributes to remove from the view
:returns: a view of the object which includes only attributes
visible according to API resource declaration and authZ policies.
"""
fields_to_strip = ((fields_to_strip or []) +
self._exclude_attributes_by_policy(context, data))
return self._filter_attributes(context, data, fields_to_strip)
def _filter_attributes(self, context, data, fields_to_strip=None):
if not fields_to_strip:
return data
return dict(item for item in six.iteritems(data)
if (item[0] not in fields_to_strip))
def _do_field_list(self, original_fields):
fields_to_add = None
# don't do anything if fields were not specified in the request
if original_fields:
fields_to_add = [attr for attr in self._policy_attrs
if attr not in original_fields]
original_fields.extend(self._policy_attrs)
return original_fields, fields_to_add
def __getattr__(self, name):
if name in self._member_actions:
@db_api.retry_db_errors
def _handle_action(request, id, **kwargs):
arg_list = [request.context, id]
# Ensure policy engine is initialized
policy.init()
# Fetch the resource and verify if the user can access it
try:
parent_id = kwargs.get(self._parent_id_name)
resource = self._item(request,
id,
do_authz=True,
field_list=None,
parent_id=parent_id)
except oslo_policy.PolicyNotAuthorized:
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
body = kwargs.pop('body', None)
# Explicit comparison with None to distinguish from {}
if body is not None:
arg_list.append(body)
# It is ok to raise a 403 because accessibility to the
# object was checked earlier in this method
policy.enforce(request.context,
name,
resource,
pluralized=self._collection)
ret_value = getattr(self._plugin, name)(*arg_list, **kwargs)
# It is simply impossible to predict whether one of this
# actions alters resource usage. For instance a tenant port
# is created when a router interface is added. Therefore it is
# important to mark as dirty resources whose counters have
# been altered by this operation
resource_registry.set_resources_dirty(request.context)
return ret_value
return _handle_action
else:
raise AttributeError()
def _get_pagination_helper(self, request):
if self._allow_pagination and self._native_pagination:
return api_common.PaginationNativeHelper(request,
self._primary_key)
elif self._allow_pagination:
return api_common.PaginationEmulatedHelper(request,
self._primary_key)
return api_common.NoPaginationHelper(request, self._primary_key)
def _get_sorting_helper(self, request):
if self._allow_sorting and self._native_sorting:
return api_common.SortingNativeHelper(request, self._attr_info)
elif self._allow_sorting:
return api_common.SortingEmulatedHelper(request, self._attr_info)
return api_common.NoSortingHelper(request, self._attr_info)
def _items(self, request, do_authz=False, parent_id=None):
"""Retrieves and formats a list of elements of the requested entity."""
# NOTE(salvatore-orlando): The following ensures that fields which
# are needed for authZ policy validation are not stripped away by the
# plugin before returning.
original_fields, fields_to_add = self._do_field_list(
api_common.list_args(request, 'fields'))
filters = api_common.get_filters(request, self._attr_info,
['fields', 'sort_key', 'sort_dir',
'limit', 'marker', 'page_reverse'])
kwargs = {'filters': filters,
'fields': original_fields}
sorting_helper = self._get_sorting_helper(request)
pagination_helper = self._get_pagination_helper(request)
sorting_helper.update_args(kwargs)
sorting_helper.update_fields(original_fields, fields_to_add)
pagination_helper.update_args(kwargs)
pagination_helper.update_fields(original_fields, fields_to_add)
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, self._plugin_handlers[self.LIST])
obj_list = obj_getter(request.context, **kwargs)
obj_list = sorting_helper.sort(obj_list)
obj_list = pagination_helper.paginate(obj_list)
# Check authz
if do_authz:
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
# Omit items from list that should not be visible
obj_list = [obj for obj in obj_list
if policy.check(request.context,
self._plugin_handlers[self.SHOW],
obj,
plugin=self._plugin,
pluralized=self._collection)]
# Use the first element in the list for discriminating which attributes
# should be filtered out because of authZ policies
# fields_to_add contains a list of attributes added for request policy
# checks but that were not required by the user. They should be
# therefore stripped
fields_to_strip = fields_to_add or []
if obj_list:
fields_to_strip += self._exclude_attributes_by_policy(
request.context, obj_list[0])
collection = {self._collection:
[self._filter_attributes(
request.context, obj,
fields_to_strip=fields_to_strip)
for obj in obj_list]}
pagination_links = pagination_helper.get_links(obj_list)
if pagination_links:
collection[self._collection + "_links"] = pagination_links
# Synchronize usage trackers, if needed
resource_registry.resync_resource(
request.context, self._resource, request.context.tenant_id)
return collection
def _item(self, request, id, do_authz=False, field_list=None,
parent_id=None):
"""Retrieves and formats a single element of the requested entity."""
kwargs = {'fields': field_list}
action = self._plugin_handlers[self.SHOW]
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, action)
obj = obj_getter(request.context, id, **kwargs)
# Check authz
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
if do_authz:
policy.enforce(request.context,
action,
obj,
pluralized=self._collection)
return obj
@db_api.retry_db_errors
def index(self, request, **kwargs):
"""Returns a list of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return self._items(request, True, parent_id)
@db_api.retry_db_errors
def show(self, request, id, **kwargs):
"""Returns detailed information about the requested entity."""
try:
# NOTE(salvatore-orlando): The following ensures that fields
# which are needed for authZ policy validation are not stripped
# away by the plugin before returning.
field_list, added_fields = self._do_field_list(
api_common.list_args(request, "fields"))
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return {self._resource:
self._view(request.context,
self._item(request,
id,
do_authz=True,
field_list=field_list,
parent_id=parent_id),
fields_to_strip=added_fields)}
except oslo_policy.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None):
objs = []
try:
for item in body[self._collection]:
kwargs = {self._resource: item}
if parent_id:
kwargs[self._parent_id_name] = parent_id
fields_to_strip = self._exclude_attributes_by_policy(
request.context, item)
objs.append(self._filter_attributes(
request.context,
obj_creator(request.context, **kwargs),
fields_to_strip=fields_to_strip))
return objs
# Note(salvatore-orlando): broad catch as in theory a plugin
# could raise any kind of exception
except Exception:
with excutils.save_and_reraise_exception():
for obj in objs:
obj_deleter = getattr(self._plugin,
self._plugin_handlers[self.DELETE])
try:
kwargs = ({self._parent_id_name: parent_id}
if parent_id else {})
obj_deleter(request.context, obj['id'], **kwargs)
except Exception:
# broad catch as our only purpose is to log the
# exception
LOG.exception(_LE("Unable to undo add for "
"%(resource)s %(id)s"),
{'resource': self._resource,
'id': obj['id']})
# TODO(salvatore-orlando): The object being processed when the
# plugin raised might have been created or not in the db.
# We need a way for ensuring that if it has been created,
# it is then deleted
def create(self, request, body=None, **kwargs):
self._notifier.info(request.context,
self._resource + '.create.start',
body)
return self._create(request, body, **kwargs)
@db_api.retry_db_errors
def _create(self, request, body, **kwargs):
"""Creates a new instance of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
body = Controller.prepare_request_body(request.context,
body, True,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.CREATE]
# Check authz
if self._collection in body:
# Have to account for bulk create
items = body[self._collection]
else:
items = [body]
# Ensure policy engine is initialized
policy.init()
# Store requested resource amounts grouping them by tenant
# This won't work with multiple resources. However because of the
# current structure of this controller there will hardly be more than
# one resource for which reservations are being made
request_deltas = collections.defaultdict(int)
for item in items:
self._validate_network_tenant_ownership(request,
item[self._resource])
policy.enforce(request.context,
action,
item[self._resource],
pluralized=self._collection)
if 'tenant_id' not in item[self._resource]:
# no tenant_id - no quota check
continue
tenant_id = item[self._resource]['tenant_id']
request_deltas[tenant_id] += 1
# Quota enforcement
reservations = []
try:
for (tenant, delta) in request_deltas.items():
reservation = quota.QUOTAS.make_reservation(
request.context,
tenant,
{self._resource: delta},
self._plugin)
reservations.append(reservation)
except n_exc.QuotaResourceUnknown as e:
# We don't want to quota this resource
LOG.debug(e)
def notify(create_result):
# Ensure usage trackers for all resources affected by this API
# operation are marked as dirty
with request.context.session.begin():
# Commit the reservation(s)
for reservation in reservations:
quota.QUOTAS.commit_reservation(
request.context, reservation.reservation_id)
resource_registry.set_resources_dirty(request.context)
notifier_method = self._resource + '.create.end'
self._notifier.info(request.context,
notifier_method,
create_result)
registry.notify(self._resource, events.BEFORE_RESPONSE, self,
context=request.context, data=create_result,
method_name=notifier_method,
collection=self._collection,
action=action, original={})
return create_result
def do_create(body, bulk=False, emulated=False):
kwargs = {self._parent_id_name: parent_id} if parent_id else {}
if bulk and not emulated:
obj_creator = getattr(self._plugin, "%s_bulk" % action)
else:
obj_creator = getattr(self._plugin, action)
try:
if emulated:
return self._emulate_bulk_create(obj_creator, request,
body, parent_id)
else:
if self._collection in body:
# This is weird but fixing it requires changes to the
# plugin interface
kwargs.update({self._collection: body})
else:
kwargs.update({self._resource: body})
return obj_creator(request.context, **kwargs)
except Exception:
# In case of failure the plugin will always raise an
# exception. Cancel the reservation
with excutils.save_and_reraise_exception():
for reservation in reservations:
quota.QUOTAS.cancel_reservation(
request.context, reservation.reservation_id)
if self._collection in body and self._native_bulk:
# plugin does atomic bulk create operations
objs = do_create(body, bulk=True)
# Use first element of list to discriminate attributes which
# should be removed because of authZ policies
fields_to_strip = self._exclude_attributes_by_policy(
request.context, objs[0])
return notify({self._collection: [self._filter_attributes(
request.context, obj, fields_to_strip=fields_to_strip)
for obj in objs]})
else:
if self._collection in body:
# Emulate atomic bulk behavior
objs = do_create(body, bulk=True, emulated=True)
return notify({self._collection: objs})
else:
obj = do_create(body)
return notify({self._resource: self._view(request.context,
obj)})
def delete(self, request, id, **kwargs):
"""Deletes the specified entity."""
if request.body:
msg = _('Request body is not supported in DELETE.')
raise webob.exc.HTTPBadRequest(msg)
self._notifier.info(request.context,
self._resource + '.delete.start',
{self._resource + '_id': id})
return self._delete(request, id, **kwargs)
@db_api.retry_db_errors
def _delete(self, request, id, **kwargs):
action = self._plugin_handlers[self.DELETE]
# Check authz
policy.init()
parent_id = kwargs.get(self._parent_id_name)
obj = self._item(request, id, parent_id=parent_id)
try:
policy.enforce(request.context,
action,
obj,
pluralized=self._collection)
except oslo_policy.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_deleter = getattr(self._plugin, action)
obj_deleter(request.context, id, **kwargs)
# A delete operation usually alters resource usage, so mark affected
# usage trackers as dirty
resource_registry.set_resources_dirty(request.context)
notifier_method = self._resource + '.delete.end'
result = {self._resource: self._view(request.context, obj)}
notifier_payload = {self._resource + '_id': id}
notifier_payload.update(result)
self._notifier.info(request.context,
notifier_method,
notifier_payload)
registry.notify(self._resource, events.BEFORE_RESPONSE, self,
context=request.context, data=result,
method_name=notifier_method, action=action,
original={})
def update(self, request, id, body=None, **kwargs):
"""Updates the specified entity's attributes."""
try:
payload = body.copy()
except AttributeError:
msg = _("Invalid format: %s") % request.body
raise exceptions.BadRequest(resource='body', msg=msg)
payload['id'] = id
self._notifier.info(request.context,
self._resource + '.update.start',
payload)
return self._update(request, id, body, **kwargs)
@db_api.retry_db_errors
def _update(self, request, id, body, **kwargs):
body = Controller.prepare_request_body(request.context,
body, False,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.UPDATE]
# Load object to check authz
# but pass only attributes in the original body and required
# by the policy engine to the policy 'brain'
field_list = [name for (name, value) in six.iteritems(self._attr_info)
if (value.get('required_by_policy') or
value.get('primary_key') or
'default' not in value)]
# Ensure policy engine is initialized
policy.init()
parent_id = kwargs.get(self._parent_id_name)
orig_obj = self._item(request, id, field_list=field_list,
parent_id=parent_id)
orig_object_copy = copy.copy(orig_obj)
orig_obj.update(body[self._resource])
# Make a list of attributes to be updated to inform the policy engine
# which attributes are set explicitly so that it can distinguish them
# from the ones that are set to their default values.
orig_obj[n_const.ATTRIBUTES_TO_UPDATE] = body[self._resource].keys()
try:
policy.enforce(request.context,
action,
orig_obj,
pluralized=self._collection)
except oslo_policy.PolicyNotAuthorized:
with excutils.save_and_reraise_exception() as ctxt:
# If a tenant is modifying its own object, it's safe to return
# a 403. Otherwise, pretend that it doesn't exist to avoid
# giving away information.
orig_obj_tenant_id = orig_obj.get("tenant_id")
if (request.context.tenant_id != orig_obj_tenant_id or
orig_obj_tenant_id is None):
ctxt.reraise = False
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_updater = getattr(self._plugin, action)
kwargs = {self._resource: body}
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj = obj_updater(request.context, id, **kwargs)
# Usually an update operation does not alter resource usage, but as
# there might be side effects it might be worth checking for changes
# in resource usage here as well (e.g: a tenant port is created when a
# router interface is added)
resource_registry.set_resources_dirty(request.context)
result = {self._resource: self._view(request.context, obj)}
notifier_method = self._resource + '.update.end'
self._notifier.info(request.context, notifier_method, result)
registry.notify(self._resource, events.BEFORE_RESPONSE, self,
context=request.context, data=result,
method_name=notifier_method, action=action,
original=orig_object_copy)
return result
@staticmethod
def prepare_request_body(context, body, is_create, resource, attr_info,
allow_bulk=False):
"""Verifies required attributes are in request body.
Also checking that an attribute is only specified if it is allowed
for the given operation (create/update).
Attribute with default values are considered to be optional.
body argument must be the deserialized body.
"""
collection = resource + "s"
if not body:
raise webob.exc.HTTPBadRequest(_("Resource body required"))
LOG.debug("Request body: %(body)s", {'body': body})
try:
if collection in body:
if not allow_bulk:
raise webob.exc.HTTPBadRequest(_("Bulk operation "
"not supported"))
if not body[collection]:
raise webob.exc.HTTPBadRequest(_("Resources required"))
bulk_body = [
Controller.prepare_request_body(
context, item if resource in item
else {resource: item}, is_create, resource, attr_info,
allow_bulk) for item in body[collection]
]
return {collection: bulk_body}
res_dict = body.get(resource)
except (AttributeError, TypeError):
msg = _("Body contains invalid data")
raise webob.exc.HTTPBadRequest(msg)
if res_dict is None:
msg = _("Unable to find '%s' in request body") % resource
raise webob.exc.HTTPBadRequest(msg)
attributes.populate_tenant_id(context, res_dict, attr_info, is_create)
attributes.verify_attributes(res_dict, attr_info)
if is_create: # POST
attributes.fill_default_value(attr_info, res_dict,
webob.exc.HTTPBadRequest)
else: # PUT
for attr, attr_vals in six.iteritems(attr_info):
if attr in res_dict and not attr_vals['allow_put']:
msg = _("Cannot update read-only attribute %s") % attr
raise webob.exc.HTTPBadRequest(msg)
attributes.convert_value(attr_info, res_dict, webob.exc.HTTPBadRequest)
return body
def _validate_network_tenant_ownership(self, request, resource_item):
# TODO(salvatore-orlando): consider whether this check can be folded
# in the policy engine
if (request.context.is_admin or request.context.is_advsvc or
self._resource not in ('port', 'subnet')):
return
network = self._plugin.get_network(
request.context,
resource_item['network_id'])
# do not perform the check on shared networks
if network.get('shared'):
return
network_owner = network['tenant_id']
if network_owner != resource_item['tenant_id']:
# NOTE(kevinbenton): we raise a 404 to hide the existence of the
# network from the tenant since they don't have access to it.
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def create_resource(collection, resource, plugin, params, allow_bulk=False,
member_actions=None, parent=None, allow_pagination=False,
allow_sorting=False):
controller = Controller(plugin, collection, resource, params, allow_bulk,
member_actions=member_actions, parent=parent,
allow_pagination=allow_pagination,
allow_sorting=allow_sorting)
return wsgi_resource.Resource(controller, FAULT_MAP)
|
igor-toga/local-snat
|
neutron/api/v2/base.py
|
Python
|
apache-2.0
| 33,385 | 0.00012 |
# -*-coding: utf-8 -*-
import colander
from . import (
SelectInteger,
ResourceSchema,
BaseForm,
BaseSearchForm,
)
from ..resources.leads_offers import LeadsOffersResource
from ..models.lead_offer import LeadOffer
from ..models.currency import Currency
from ..models.supplier import Supplier
from ..models.service import Service
from ..lib.qb.leads_offers import LeadsOffersQueryBuilder
from ..lib.utils.security_utils import get_auth_employee
class _LeadOfferSchema(ResourceSchema):
service_id = colander.SchemaNode(
SelectInteger(Service),
)
supplier_id = colander.SchemaNode(
SelectInteger(Supplier),
)
currency_id = colander.SchemaNode(
SelectInteger(Currency),
)
price = colander.SchemaNode(
colander.Money(),
)
status = colander.SchemaNode(
colander.String(),
)
descr = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=255),
)
class LeadOfferForm(BaseForm):
_schema = _LeadOfferSchema
def submit(self, lead_offer=None):
if not lead_offer:
lead_offer = LeadOffer(
resource=LeadsOffersResource.create_resource(
get_auth_employee(self.request)
)
)
lead_offer.service_id = self._controls.get('service_id')
lead_offer.currency_id = self._controls.get('currency_id')
lead_offer.supplier_id = self._controls.get('supplier_id')
lead_offer.price = self._controls.get('price')
lead_offer.status = self._controls.get('status')
lead_offer.descr = self._controls.get('descr')
return lead_offer
class LeadOfferSearchForm(BaseSearchForm):
_qb = LeadsOffersQueryBuilder
|
mazvv/travelcrm
|
travelcrm/forms/leads_offers.py
|
Python
|
gpl-3.0
| 1,762 | 0 |
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from behave import given
from behave import when
from steps.util import create_consumer_group
from steps.util import create_random_group_id
from steps.util import create_random_topic
from steps.util import initialize_kafka_offsets_topic
from steps.util import produce_example_msg
PRODUCED_MSG_COUNT = 82
CONSUMED_MSG_COUNT = 39
@given(u'we have an existing kafka cluster with a topic')
def step_impl1(context):
context.topic = create_random_topic(1, 1)
@given(u'we have a kafka consumer group')
def step_impl2(context):
context.group = create_random_group_id()
context.client = create_consumer_group(
context.topic,
context.group,
)
@when(u'we produce some number of messages into the topic')
def step_impl3(context):
produce_example_msg(context.topic, num_messages=PRODUCED_MSG_COUNT)
context.msgs_produced = PRODUCED_MSG_COUNT
@when(u'we consume some number of messages from the topic')
def step_impl4(context):
context.group = create_random_group_id()
context.client = create_consumer_group(
context.topic,
context.group,
num_messages=CONSUMED_MSG_COUNT,
)
context.msgs_consumed = CONSUMED_MSG_COUNT
@given(u'we have initialized kafka offsets storage')
def step_impl5(context):
initialize_kafka_offsets_topic()
@given(u'we have an existing kafka cluster')
def step_impl6(context):
pass
@given(u'we have an existing kafka cluster with multiple topics')
def step_impl7(context):
context.topic = []
context.topic.append(create_random_topic(1, 1, 'abcde'))
context.topic.append(create_random_topic(1, 1, 'abcd'))
|
Yelp/kafka-utils
|
tests/acceptance/steps/common.py
|
Python
|
apache-2.0
| 2,265 | 0 |
'''This gile will take arguments from the command line, if none are found it
will look for a .bot file, if that isn't found it will promt the user for auth
tokens :- with this information the masterbot will connect to its own twitch channel and await a !connect command'''
#Author MrYevral
#check for .bot file in current directory
import os
import sys
def getBotInfo():
if len(sys.argv) > 2:
if sys.argv[2] == '-c':
newBotFile()
else:
print "incorrect use of flags please use -c for creating a bot"
'''for file in os.listdir("."):
if file.endswith(".bot"):
print file'''
|
MrYevral/YevBot
|
Alternate/MasterBot.py
|
Python
|
gpl-3.0
| 586 | 0.020478 |
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example deactivates all active placements. To determine which
placements exist, run get_all_placements.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.dfp import DfpUtils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
placement_service = client.GetService('PlacementService', version='v201306')
# Create query.
values = [{
'key': 'status',
'value': {
'xsi_type': 'TextValue',
'value': 'ACTIVE'
}
}]
query = 'WHERE status = :status'
# Get placements by statement.
placements = DfpUtils.GetAllEntitiesByStatementWithService(
placement_service, query=query, bind_vars=values)
for placement in placements:
print ('Placement with id \'%s\', name \'%s\', and status \'%s\' will be '
'deactivated.' % (placement['id'], placement['name'],
placement['status']))
print 'Number of placements to be deactivated: %s' % len(placements)
# Perform action.
result = placement_service.PerformPlacementAction(
{'type': 'DeactivatePlacements'}, {'query': query, 'values': values})[0]
# Display results.
if result and int(result['numChanges']) > 0:
print 'Number of placements deactivated: %s' % result['numChanges']
else:
print 'No placements were deactivated.'
|
caioserra/apiAdwords
|
examples/adspygoogle/dfp/v201306/deactivate_placements.py
|
Python
|
apache-2.0
| 2,253 | 0.003107 |
# -*- coding: utf-8 *-*
from pyglet.window import key
from pyglet import clock
from . import util, physicalobject
from . import resources
class Ship(physicalobject.PhysicalObject):
"""A class for the player"""
def __init__(self, thrust_image=None, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set some easy-to-tweak constants
# play values
self.rotate_speed = 170.0
self.bullet_speed = 500.0
self.thrust_acc = 500
self.friction = 0.95
self.bullet_duration = 0.6
self.thrust = False
self.thrust_image = thrust_image
self.normal_image = self.image
self.bullets = set() # FIXME: bullet by OOT
def on_key_press(self, symbol, modifiers):
if symbol == key.SPACE:
self.shoot()
elif symbol == key.LEFT:
self.turn(-1)
elif symbol == key.RIGHT:
self.turn(1)
elif symbol == key.UP:
self.set_thrust(True)
def on_key_release(self, symbol, modifiers):
if symbol in (key.LEFT, key.RIGHT):
self.turn(0)
elif symbol == key.UP:
self.set_thrust(False)
def update(self, dt):
super().update(dt)
if self.thrust and self.thrust_image:
self.image = self.thrust_image
else:
self.image = self.normal_image
# update velocity
if self.thrust:
acc = util.angle_to_vector(self.rotation)
for i in (0,1):
self.vel[i] += acc[i] * self.thrust_acc * dt
# add friction
for i in (0,1):
self.vel[i] *= (1 - self.friction * dt)
for bullet in set(self.bullets):
if bullet.update(dt):
self.bullets.remove(bullet)
return False
def set_thrust(self, on):
self.thrust = on
if on:
resources.thrust_sound.seek(0)
resources.thrust_sound.play()
else:
resources.thrust_sound.pause()
def turn(self, clockwise):
self.rotation_speed = clockwise * self.rotate_speed
def shoot(self):
resources.bullet_sound.play()
forward = util.angle_to_vector(self.rotation)
bullet_pos = [self.x + self.radius * forward[0], self.y + self.radius * forward[1]]
bullet_vel = [self.vel[0] + self.bullet_speed * forward[0], self.vel[1] + self.bullet_speed * forward[1]]
bullet = physicalobject.PhysicalObject(lifespan=self.bullet_duration, vel=bullet_vel, x=bullet_pos[0], y=bullet_pos[1],
img=resources.shot_image, batch=self.batch, group=self.group, screensize=self.screensize)
self.bullets.add(bullet)
def destroy(self):
# check invulnerability
if self.opacity != 255:
return
explosion = super().destroy()
self.rotation = -90
self.x = self.screensize[0] / 2
self.y = self.screensize[1] / 2
self.vel = [0, 0]
self.set_thrust(False)
self.visible = True
return explosion
def normal_mode(self, dt):
self.opacity = 255
def invulnerable(self, time):
# be invulnerable for a brief time
self.opacity = 128
clock.schedule_once(self.normal_mode, time)
|
mammadori/asteroids
|
game/ship.py
|
Python
|
bsd-3-clause
| 3,293 | 0.004555 |
from django.contrib import admin
from freemix.exhibit import models
class CanvasAdmin(admin.ModelAdmin):
list_display = ('title', 'description')
search_fields = ('title', 'description',)
admin.site.register(models.Canvas, CanvasAdmin)
class ExhibitAdmin(admin.ModelAdmin):
list_display = ('slug', 'owner',)
search_fields = ('slug', 'title', 'description', 'owner__username')
admin.site.register(models.Exhibit, ExhibitAdmin)
class ThemeAdmin(admin.ModelAdmin):
list_display = ('title', 'description')
search_fields = ('title', 'description',)
admin.site.register(models.Theme, ThemeAdmin)
|
zepheira/freemix
|
freemix/exhibit/admin.py
|
Python
|
apache-2.0
| 632 | 0.011076 |
import os
requirements = ["numpy", "scipy", "pandas",
"matplotlib", "peakutils", "uncertainties",
"pyqtgraph"]
for package in requirements:
os.system("pip install " + package)
|
laserkelvin/FTSpecViewer
|
setup.py
|
Python
|
gpl-3.0
| 214 | 0 |
"""
This core.py module is part of the Jaide (Junos Aide) package.
It is free software for use in manipulating junos devices. To immediately get
started, take a look at the example files for implementation
guidelines. More information can be found at the github page found here:
https://github.com/NetworkAutomation/jaide
"""
# This is for modifying printed output (used for --scp to rewrite the same line
# multiple times.) It is required to be at the top of the file.
from __future__ import print_function
# standard modules.
from os import path
import time
import difflib
# from lxml import etree, objectify
# needed to parse strings into xml for cases when ncclient doesn't handle
# it (commit, validate, etc)
import xml.etree.ElementTree as ET
import logging # logging needed for disabling paramiko logging output
# intra-Jaide imports
from errors import InvalidCommandError
from utils import clean_lines, xpath
# network modules for device connections
try:
from ncclient import manager
from scp import SCPClient
import paramiko
except ImportError as e:
print("FAILED TO IMPORT ONE OR MORE PACKAGES.\n"
"NCCLIENT\thttps://github.com/leopoul/ncclient/\n"
"PARAMIKO\thttps://github.com/paramiko/paramiko\n"
"SCP\t\thttps://pypi.python.org/pypi/scp/0.8.0")
print('\nImport Error:\n')
raise e
class Jaide():
""" Purpose: An object for manipulating a Junos device.
Methods include copying files, running show commands,
shell commands, commit configuration changes, finding
interface errors, and getting device status/information.
All of the methods listed below that touch Junos are wrapped by a
decorator function @check_instance, which handles ensuring the correct
connection is used to perform the requested operation.
"""
def __init__(self, host, username, password, connect_timeout=5,
session_timeout=300, connect="paramiko", port=22):
""" Initialize the Jaide object.
Purpose: This is the initialization function for the Jaide class,
| which creates a connection to a junos device. It will
| return a Jaide object, which can then be used to actually
| send commands to the device. This function establishes the
| connection to the device via a NCClient manager object.
| > **NOTE:** The connect parameter should be ignored under most
| > circumstances. Changing it only affects how Jaide first
| > connects to the device. The decorator function
| > @check_instance will handle moving between session
| > types for you.
@param host: The IP or hostname of the device to connect to.
@type host: str
@param username: The username for the connection
@type username: str
@param password: The password for the connection
@type password: str
@param connect_timeout: The timeout value, in seconds, for attempting
| to connect to the device.
@type connect_timeout: int
@param session_timeout: The timeout value, in seconds, for the
| session. If a command is sent and nothing
| is heard back from the device in this
| timeframe, the session is declared dead,
| and times out.
@type session_timeout: int
@param connect: **NOTE: We default to 'paramiko', but this
| parameter can be set to False to prevent connecting
| on object instantiation. The @check_instance
| decorator function will handle sliding between
| session types depending on what function is being
| called, meaning generally self.conn_type and this
| connect parameter should be ignored.**
|
| The connection type that should be made. Several
| options are available: 'ncclient', 'scp', and
| 'paramiko', 'shell' and 'root'.
|
| 'paramiko' : is used for operational commands
| (couldn't use ncclient because of lack of pipes `|`
| support.
|
| 'scp' : is used for copying files to/from
| the device, and uses an SCP connection.
|
| 'shell' : is for sending shell commands.
|
| 'root' : is when the user is doing operational
| commands, but is logged in as root, (requires
| handling separately, since this puts the session
| into a shell prompt)
|
| 'ncclient' : is used for all other commands.
@type connect: str
@param port: The destination port on the device to attempt the
| connection.
@type port: int
@returns: an instance of the Jaide class
@rtype: jaide.Jaide object
"""
# store object properties and set initial values.
self.host = host.strip()
self.port = port
self.username = username
self.password = password
self.session_timeout = session_timeout
self.connect_timeout = connect_timeout
self._shell = ""
self._scp = ""
self.conn_type = connect
self._in_cli = False
self._filename = None
# make the connection to the device
if connect:
self.connect()
def check_instance(function):
""" Wrapper that tests the type of _session.
Purpose: This decorator function is used by all functions within
| the Jaide class that interact with a device to ensure the
| proper session type is in use. If it is not, it will
| attempt to migrate _session to that type before moving
| to the originally requested function.
| > **NOTE:** This function is a decorator, and should not be
| > used directly. All other methods in this class that touch
| > the Junos device are wrapped by this function to ensure the
| > proper connection type is used.
@param function: the function that is being wrapped around
@type function: function
@returns: the originally requested function
@rtype: function
"""
def wrapper(self, *args, **kwargs):
func_trans = {
"commit": manager.Manager,
"compare_config": manager.Manager,
"commit_check": manager.Manager,
"device_info": manager.Manager,
"diff_config": manager.Manager,
"health_check": manager.Manager,
"interface_errors": manager.Manager,
"op_cmd": paramiko.client.SSHClient,
"shell_cmd": paramiko.client.SSHClient,
"scp_pull": paramiko.client.SSHClient,
"scp_push": paramiko.client.SSHClient
}
# when doing an operational command, logging in as root
# brings you to shell, so we need to enter the device as a shell
# connection, and move to cli to perform the command
# this is a one-off because the isinstance() check will be bypassed
if self.username == "root" and function.__name__ == "op_cmd":
if not self._session:
self.conn_type = "paramiko"
self.connect()
if not self._shell:
self.conn_type = "root"
self.connect()
self.shell_to_cli() # check if we're in the cli
# Have to call shell command separately, since we are using _shell
# for comparison, not _session.
elif function.__name__ == 'shell_cmd':
if not self._shell:
self.conn_type = "shell"
self.connect()
self.cli_to_shell() # check if we're in shell.
if isinstance(self._session, func_trans[function.__name__]):
# If they're doing SCP, we have to check for both _session and
# _scp
if function.__name__ in ['scp_pull', 'scp_push']:
if not isinstance(self._scp, SCPClient):
self.conn_type = "scp"
self.connect()
else:
self.disconnect()
if function.__name__ == "op_cmd":
self.conn_type = "paramiko"
elif function.__name__ in ["scp_pull", "scp_push"]:
self.conn_type = "scp"
else:
self.conn_type = "ncclient"
self.connect()
return function(self, *args, **kwargs)
return wrapper
def cli_to_shell(self):
""" Move _shell to the shell from the command line interface (CLI). """
if self._in_cli:
self._shell.send("start shell\n")
time.sleep(2)
self._shell.recv(9999)
self._in_cli = False
return True
return False
@check_instance
def commit(self, commands="", confirmed=None, comment=None,
at_time=None, synchronize=False, req_format='text'):
""" Perform a commit operation.
Purpose: Executes a commit operation. All parameters are optional.
| commit confirm and commit at are mutually exclusive. All
| the others can be used with each other and commit confirm/at.
@param commands: A string or list of multiple commands
| that the device will compare with.
| If a string, it can be a single command,
| multiple commands separated by commas, or
| a filepath location of a file with multiple
| commands, each on its own line.
@type commands: str or list
@param confirmed: integer value of the number of **seconds** to
| confirm the commit for, if requested.
@type confirmed: int
@param comment: string that the user wants to comment the commit
| with. Will show up in the 'show system commit' log.
@type comment: str
@param at_time: string designating the time at which the commit
| should happen. Can be in one of two Junos approved
| formats.
@type comment: str
@param synchronize: boolean set to true if desiring a commit
| synchronize operation.
@type synchronize: bool
@param req_format: string to specify the response format. Accepts
| either 'text' or 'xml'
@type req_format: str
@returns: The reply from the device.
@rtype: str
"""
# ncclient doesn't support a truly blank commit, so if nothing is
# passed, use 'annotate system' to make a blank commit
if not commands:
commands = 'annotate system ""'
clean_cmds = []
for cmd in clean_lines(commands):
clean_cmds.append(cmd)
# try to lock the candidate config so we can make changes.
self.lock()
self._session.load_configuration(action='set', config=commands)
results = ""
# confirmed and commit at are mutually exclusive. commit confirm
# takes precedence.
if confirmed:
results = self._session.commit(confirmed=True,
timeout=str(confirmed),
comment=comment,
synchronize=synchronize)
else:
results = self._session.commit(comment=comment, at_time=at_time,
synchronize=synchronize)
self.unlock()
if results:
if req_format == 'xml':
return results
# commit() DOES NOT return a parse-able xml tree, so we
# convert it to an ElementTree xml tree.
results = ET.fromstring(results.tostring)
out = ''
for i in results.iter():
# the success message is just a tag, so we need to get it
# specifically.
if i.tag == 'commit-check-success':
out += 'configuration check succeeds\n'
elif i.tag == 'commit-success':
out += 'commit complete\n'
elif i.tag == 'ok':
out += 'commit complete\n'
# this is for normal output with a tag and inner text, it will
# strip the inner text and add it to the output.
elif i.text is not None:
if i.text.strip() + '\n' != '\n':
out += i.text.strip() + '\n'
# this is for elements that don't have inner text,
# it will add the tag to the output.
elif i.text is None:
if i.tag + '\n' != '\n':
out += i.tag + '\n'
return out
return False
@check_instance
def commit_check(self, commands="", req_format="text"):
""" Execute a commit check operation.
Purpose: This method will take in string of multiple commands,
| and perform and 'commit check' on the device to ensure
| the commands are syntactically correct. The response can
| be formatted as text or as xml.
@param commands: A string, filepath, or list of multiple commands
| that the device will compare with.
@type commands: str or list
@param req_format: The desired format of the response, defaults to
| 'text', but also accepts 'xml'
@type req_format: str
@returns: The reply from the device.
@rtype: str
"""
if not commands:
raise InvalidCommandError('No commands specified')
clean_cmds = []
for cmd in clean_lines(commands):
clean_cmds.append(cmd)
self.lock()
self._session.load_configuration(action='set', config=clean_cmds)
# conn.validate() DOES NOT return a parse-able xml tree, so we
# convert it to an ElementTree xml tree.
results = ET.fromstring(self._session.validate(
source='candidate').tostring)
# release the candidate configuration
self.unlock()
if req_format == "xml":
return ET.tostring(results)
out = ""
# we have to parse the elementTree object, and get the text
# from the xml.
for i in results.iter():
# the success message is just a tag, so we need to get it
# specifically.
if i.tag == 'commit-check-success':
out += 'configuration check succeeds\n'
# this is for normal output with a tag and inner text, it will
# strip the inner text and add it to the output.
elif i.text is not None:
if i.text.strip() + '\n' != '\n':
out += i.text.strip() + '\n'
# this is for elements that don't have inner text, it will add the
# tag to the output.
elif i.text is None:
if i.tag + '\n' != '\n':
out += i.tag + '\n'
return out
@check_instance
def compare_config(self, commands="", req_format="text"):
""" Execute a 'show | compare' against the specified commands.
Purpose: This method will take in string of multiple commands,
| and perform and 'show | compare' on the device to show the
| differences between the active running configuration and
| the changes proposed by the passed commands parameter.
@param commands: A string, filepath, or list of multiple commands
| that the device will compare with.
@type commands: str or list
@param req_format: The desired format of the response, defaults to
| 'text', but also accepts 'xml'
@type req_format: str
@returns: The reply from the device.
@rtype: str
"""
if not commands:
raise InvalidCommandError('No commands specified')
clean_cmds = [cmd for cmd in clean_lines(commands)]
self.lock()
self._session.load_configuration(action='set', config=clean_cmds)
out = self._session.compare_configuration()
self.unlock()
if req_format.lower() == "xml":
return out
return out.xpath(
'configuration-information/configuration-output')[0].text
def connect(self):
""" Establish a connection to the device.
Purpose: This method is used to make a connection to the junos
| device. The internal property conn_type is what
| determines the type of connection we make to the device.
| - 'paramiko' is used for operational commands (to allow
| pipes in commands)
| - 'scp' is used for copying files
| - 'shell' is used for to send shell commands
| - 'root' is used when logging into the device as root, and
| wanting to send operational commands
| - 'ncclient' is used for the rest (commit, compare_config,
| commit_check)
@returns: None
@rtype: None
"""
if self.conn_type == 'paramiko':
self._session = paramiko.SSHClient()
# These two lines set the paramiko logging to Critical to
# remove extra messages from being sent to the user output.
logger = logging.Logger.manager.getLogger('paramiko.transport')
logger.setLevel(logging.CRITICAL)
self._session.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
self._session.connect(hostname=self.host,
username=self.username,
password=self.password,
port=self.port,
timeout=self.connect_timeout)
if self.conn_type == 'scp':
self._scp_session = paramiko.SSHClient()
logger = logging.Logger.manager.getLogger('paramiko.transport')
logger.setLevel(logging.CRITICAL)
self._scp_session.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
self._scp_session.connect(hostname=self.host,
username=self.username,
password=self.password,
port=self.port,
timeout=self.connect_timeout)
self._scp = SCPClient(self._scp_session.get_transport())
elif self.conn_type == "ncclient":
self._session = manager.connect(
host=self.host,
port=self.port,
username=self.username,
password=self.password,
timeout=self.connect_timeout,
device_params={'name': 'junos'},
hostkey_verify=False
)
elif self.conn_type == 'shell':
if not self._session:
self.conn_type = 'paramiko'
self.connect()
self.conn_type = 'shell'
if not self._shell:
self._shell = self._session.invoke_shell()
time.sleep(2)
if self.username != 'root' and not self._in_cli:
self._in_cli = True
if not self.cli_to_shell():
self._shell.recv(9999)
elif self.conn_type == 'root':
# open the shell if necessary, and move into CLI
if not self._shell:
self._shell = self._session.invoke_shell()
time.sleep(2)
if not self.shell_to_cli():
self._shell.recv(9999)
self._update_timeout(self.session_timeout)
def _copy_status(self, filename, size, sent):
""" Echo status of an SCP operation.
Purpose: Callback function for an SCP operation. Used to show
| the progress of an actively running copy. This directly
| prints to stdout, one line for each file as it's copied.
| The parameters received by this function are those received
| from the scp.put or scp.get function, as explained in the
| python scp module docs.
@param filename: The filename of file being copied.
@type filename: str
@param size: The total size of the current file being copied.
@type size: str or float
@param sent: The amount of data sent for the current file being copied.
@type sent: str or float
@returns: None
"""
output = "Transferred %.0f%% of the file %s" % (
(float(sent) / float(size) * 100), path.normpath(filename))
output += (' ' * (120 - len(output)))
if filename != self._filename:
if self._filename is not None:
print('')
self._filename = filename
print(output, end='\r')
@check_instance
def device_info(self):
""" Pull basic device information.
Purpose: This function grabs the hostname, model, running version, and
| serial number of the device.
@returns: The output that should be shown to the user.
@rtype: str
"""
# get hostname, model, and version from 'show version'
resp = self._session.get_software_information(format='xml')
hostname = resp.xpath('//software-information/host-name')[0].text
model = resp.xpath('//software-information/product-model')[0].text
version = 'Unknown'
if resp.xpath('//junos-version'):
""" case:
<junos-version>15.1</junos-version>
"""
try:
version = resp.xpath('//junos-version')[0].text
except IndexError:
pass
elif resp.xpath("//package-information[name = 'junos-version']"):
""" case:
<package-information>
<name>junos-version</name>
<comment>Junos: 14.2R4</comment>
</package-information>
"""
try:
version = (resp.xpath(
"//package-information[name = 'junos-version']/comment"
)[0].text).split()[1]
except IndexError:
pass
else:
""" case:
<package-information>
<name>junos</name>
<comment>JUNOS Base OS boot [12.3R5]</comment>
</package-information>
"""
try:
version = ((resp.xpath(
'//software-information/package-information/comment'
)[0].text.split('[')[1].split(']')[0]))
except IndexError:
pass
# try looking for 'junos-version' for >= 14.2
# for element in resp.xpath('//software-information'):
# version = element.findtext('junos-version')
# if not version:
# try:
# version = ((resp.xpath(
# '//software-information/package-information/comment')
# [0].text.split('[')[1].split(']')[0]))
# except IndexError:
# version = 'Unknown'
# get uptime from 'show system uptime'
resp = self._session.get_system_uptime_information(format='xml')
try:
current_time = resp.xpath('//current-time/date-time')[0].text
except IndexError:
current_time = 'Unknown'
try:
uptime = resp.xpath('//uptime-information/up-time')[0].text
except IndexError:
uptime = 'Unknown'
# get serial number from 'show chassis hardware'
show_hardware = self._session.get_chassis_inventory(format='xml')
# If we're hitting an EX, grab each Routing Engine Serial number
# to get all RE SNs in a VC
try:
chassis_module = show_hardware.xpath(
'//chassis-inventory/chassis/chassis-module/description'
)[0].text
except IndexError:
chassis_module = 'Unknown'
if ('EX' or 'ex' or 'Ex') in chassis_module:
serial_num = ''
for eng in show_hardware.xpath(
'//chassis-inventory/chassis/chassis-module'):
if 'Routing Engine' in eng.xpath('name')[0].text:
serial_num += (eng.xpath('name')[0].text + ' Serial #: ' +
eng.xpath('serial-number')[0].text)
else: # Any other device type, just grab chassis SN
try:
serial_num = ('Chassis Serial Number: ' + show_hardware.xpath(
'//chassis-inventory/chassis/serial-number')[0].text)
except IndexError:
serial_num = 'Chassis Serial Number: ' \
+ 'Unknown (virtual machine?)'
return ('Hostname: %s\nModel: %s\nJunos Version: %s\n%s\nCurrent Time:'
' %s\nUptime: %s\n' %
(hostname, model, version, serial_num, current_time, uptime))
# TODO: [2.1] @rfe optional different username/password.
@check_instance
def diff_config(self, second_host, mode='stanza'):
""" Generate configuration differences with a second device.
Purpose: Open a second ncclient.manager.Manager with second_host, and
| and pull the configuration from it. We then use difflib to
| get the delta between the two, and yield the results.
@param second_host: the IP or hostname of the second device to
| compare against.
@type second_host: str
@param mode: string to signify 'set' mode or 'stanza' mode.
@type mode: str
@returns: iterable of strings
@rtype: str
"""
second_conn = manager.connect(
host=second_host,
port=self.port,
username=self.username,
password=self.password,
timeout=self.connect_timeout,
device_params={'name': 'junos'},
hostkey_verify=False
)
command = 'show configuration'
if mode == 'set':
command += ' | display set'
# get the raw xml config
config1 = self._session.command(command, format='text')
# for each /configuration-output snippet, turn it to text and join them
config1 = ''.join([snippet.text.lstrip('\n') for snippet in
config1.xpath('//configuration-output')])
config2 = second_conn.command(command, format='text')
config2 = ''.join([snippet.text.lstrip('\n') for snippet in
config2.xpath('//configuration-output')])
return difflib.unified_diff(config1.splitlines(), config2.splitlines(),
self.host, second_host)
def disconnect(self):
""" Close the connection(s) to the device.
Purpose: Closes the current connection(s) to the device, no matter
| what types exist.
@returns: None
@rtype: None
"""
if self._shell:
self._shell.close()
self._shell = ""
if isinstance(self._session, manager.Manager):
self._session.close_session()
elif isinstance(self._session, paramiko.client.SSHClient):
self._session.close()
self._session = ""
elif isinstance(self._session, SCPClient):
self._session.close()
self._session = ""
self._scp = ""
def _error_parse(self, interface, face):
""" Parse the extensive xml output of an interface and yield errors.
Purpose: Takes the xml output of 'show interfaces extensive' for a
| given interface and yields the error types that have a
| significant number of errors.
@param interface: The xml output of the 'sh int ext' command for
| the desired interface.
@type interface: lxml.etree._Element object
@param face: The direction of the errors we're wanting. Either 'input'
| or 'output' is accepted.
@type face: str
@returns: Yields each error that has a significant number
@rtype: iterable of strings.
"""
try:
error_list = interface.xpath(face + '-error-list')[0].getchildren()
except IndexError: # no error list on this interface
pass
else:
for x in range(len(error_list)):
if error_list[x].tag == "carrier-transitions":
if int(error_list[x].text.strip()) > 50:
yield " has greater than 50 flaps."
elif int(error_list[x].text.strip()) > 0:
yield " has %s of %s." % (error_list[x].text.strip(),
error_list[x].tag.strip())
@check_instance
def health_check(self):
""" Pull health and alarm information from the device.
Purpose: Grab the cpu/mem usage, system/chassis alarms, top 5
| processes, and states if the primary/backup partitions are on
| different versions.
@returns: The output that should be shown to the user.
@rtype: str
"""
output = 'Chassis Alarms:\n\t'
# Grab chassis alarms, system alarms, show chassis routing-engine,
# 'show system processes extensive', and also xpath to the
# relevant nodes on each.
chassis_alarms = self._session.command("show chassis alarms")
chassis_alarms = chassis_alarms.xpath('//alarm-detail')
system_alarms = self._session.command("show system alarms")
system_alarms = system_alarms.xpath('//alarm-detail')
chass = self._session.command(command="show chassis routing-engine",
format='text').xpath('//output')[0].text
proc = self._session.command("show system processes extensive")
proc = proc.xpath('output')[0].text.split('\n')
if chassis_alarms == []: # Chassis Alarms
output += 'No chassis alarms active.\n'
else:
for i in chassis_alarms:
output += (i.xpath('alarm-class')[0].text.strip() + ' Alarm \t'
'\t' + i.xpath('alarm-time')[0].text.strip() +
'\n\t' +
i.xpath('alarm-description')[0].text.strip() + '\n')
output += '\nSystem Alarms: \n\t'
if system_alarms == []: # System Alarms
output += 'No system alarms active.\n'
else:
for i in system_alarms:
output += (i.xpath('alarm-class')[0].text.strip() + ' Alarm '
'\t\t' + i.xpath('alarm-time')[0].text.strip() +
'\n\t' +
i.xpath('alarm-description')[0].text.strip() + '\n')
# add the output of the show chassis routing-engine to the command.
output += '\n' + chass
# Grabs the top 5 processes and the header line.
output += ('\n\nTop 5 busiest processes (high mgd values likely from '
'script execution):\n')
for line_number in range(8, 14):
output += proc[line_number] + '\n'
return output
@check_instance
def interface_errors(self):
""" Parse 'show interfaces extensive' and return interfaces with errors.
Purpose: This function is called for the -e flag. It will let the user
| know if there are any interfaces with errors, and what those
| interfaces are.
@returns: The output that should be shown to the user.
@rtype: str
"""
output = [] # used to store the list of interfaces with errors.
# get a string of each physical and logical interface element
dev_response = self._session.command('sh interfaces extensive')
ints = dev_response.xpath('//physical-interface')
ints += dev_response.xpath('//logical-interface')
for i in ints:
# Grab the interface name for user output.
int_name = i.xpath('name')[0].text.strip()
# Only check certain interface types.
if (('ge' or 'fe' or 'ae' or 'xe' or 'so' or 'et' or 'vlan' or
'lo0' or 'irb') in int_name):
try:
status = (i.xpath('admin-status')[0].text.strip() +
'/' + i.xpath('oper-status')[0].text.strip())
except IndexError:
pass
else:
for error in self._error_parse(i, "input"):
output.append("%s (%s)%s" % (int_name, status,
error))
for error in self._error_parse(i, "output"):
output.append("%s (%s)%s" % (int_name, status,
error))
if output == []:
output.append('No interface errors were detected on this device.')
return '\n'.join(output) + '\n'
def lock(self):
""" Lock the candidate config. Requires ncclient.manager.Manager. """
if isinstance(self._session, manager.Manager):
self._session.lock()
@check_instance
def op_cmd(self, command, req_format='text', xpath_expr=""):
""" Execute an operational mode command.
Purpose: Used to send an operational mode command to the connected
| device. This requires and uses a paramiko.SSHClient() as
| the handler so that we can easily pass and allow all pipe
| commands to be used.
|
| We indiscriminately attach ' | no-more' on the end of
| every command so the device doesn't hold output. The
| req_format parameter can be set to 'xml' to force raw
| xml output in the reply.
@param command: The single command that to retrieve output from the
| device. Any pipes will be taken into account.
@type command: str
@param req_format: The desired format of the response, defaults to
| 'text', but also accepts 'xml'. **NOTE**: 'xml'
| will still return a string, not a libxml ElementTree
@type req_format: str
@returns: The reply from the device.
@rtype: str
"""
if not command:
raise InvalidCommandError("Parameter 'command' cannot be empty")
if req_format.lower() == 'xml' or xpath_expr:
command = command.strip() + ' | display xml'
command = command.strip() + ' | no-more\n'
out = ''
# when logging in as root, we use _shell to get the response.
if self.username == 'root':
self._shell.send(command)
time.sleep(3)
while self._shell.recv_ready():
out += self._shell.recv(999999)
time.sleep(.75)
# take off the command being sent and the prompt at the end.
out = '\n'.join(out.split('\n')[1:-2])
# not logging in as root, and can grab the output as normal.
else:
stdin, stdout, stderr = self._session.exec_command(command=command,
timeout=float(self.session_timeout))
stdin.close()
# read normal output
while not stdout.channel.exit_status_ready():
out += stdout.read()
stdout.close()
# read errors
while not stderr.channel.exit_status_ready():
out += stderr.read()
stderr.close()
return out if not xpath_expr else xpath(out, xpath_expr)
@check_instance
def scp_pull(self, src, dest, progress=False, preserve_times=True):
""" Makes an SCP pull request for the specified file(s)/dir.
Purpose: By leveraging the _scp private variable, we make an scp pull
| request to retrieve file(s) from a Junos device.
@param src: string containing the source file or directory
@type src: str
@param dest: destination string of where to put the file(s)/dir
@type dest: str
@param progress: set to `True` to have the progress callback be
| printed as the operation is copying. Can also pass
| a function pointer to handoff the progress callback
| elsewhere.
@type progress: bool or function pointer
@param preserve_times: Set to false to have the times of the copied
| files set at the time of copy.
@type preserve_times: bool
@returns: `True` if the copy succeeds.
@rtype: bool
"""
# set up the progress callback if they want to see the process
if progress is True:
self._scp._progress = self._copy_status
# redirect to another function
elif hasattr(progress, '__call__'):
self._scp._progress = progress
else: # no progress callback
self._scp._progress = None
# retrieve the file(s)
self._scp.get(src, dest, recursive=True, preserve_times=preserve_times)
self._filename = None
return False
@check_instance
def scp_push(self, src, dest, progress=False, preserve_times=True):
""" Purpose: Makes an SCP push request for the specified file(s)/dir.
@param src: string containing the source file or directory
@type src: str
@param dest: destination string of where to put the file(s)/dir
@type dest: str
@param progress: set to `True` to have the progress callback be
| printed as the operation is copying. Can also pass
| a function pointer to handoff the progress callback
| elsewhere.
@type progress: bool or function pointer
@param preserve_times: Set to false to have the times of the copied
| files set at the time of copy.
@type preserve_times: bool
@returns: `True` if the copy succeeds.
@rtype: bool
"""
# set up the progress callback if they want to see the process
if progress is True:
self._scp._progress = self._copy_status
# redirect to another function
elif hasattr(progress, '__call__'):
self._scp._progress = progress
else: # no progress callback
self._scp._progress = None
# push the file(s)
self._scp.put(src, dest, recursive=True, preserve_times=preserve_times)
self._filename = None
return False
@check_instance
def shell_cmd(self, command=""):
""" Execute a shell command.
Purpose: Used to send a shell command to the connected device.
| This uses the self._shell instance, which should be a
| paramiko.Channel object, instead of a SSHClient.
| This is because we cannot send shell commands to the
| device using a SSHClient.
@param command: The single command that to retrieve output from the
| device.
@type command: str
@returns: The reply from the device.
@rtype: str
"""
if not command:
raise InvalidCommandError("Parameter 'command' must not be empty.")
command = command.strip() + '\n'
self._shell.send(command)
time.sleep(2)
out = ''
while self._shell.recv_ready():
out += self._shell.recv(999999)
time.sleep(.75)
# take off the command being sent and the prompt at the end.
return '\n'.join(out.split('\n')[1:-1])
def shell_to_cli(self):
""" Move _shell to the command line interface (CLI). """
if not self._in_cli:
self._shell.send("cli\n")
time.sleep(4)
self._shell.recv(9999)
self._in_cli = True
return True
return False
def unlock(self):
""" Unlock the candidate config.
Purpose: Unlocks the candidate configuration, so that other people can
| edit the device. Requires the _session private variable to be
| a type of a ncclient.manager.Manager.
"""
if isinstance(self._session, manager.Manager):
self._session.unlock()
def _update_timeout(self, value):
if isinstance(self._session, manager.Manager):
self._session.timeout = value
if self._shell:
self._shell.settimeout(value)
# SSHClient not here because timeout is sent with each command.
@property
def host(self):
return self.host
@host.setter
def host(self, value):
self.host = value
@property
def conn_type(self):
return self.conn_type
@conn_type.setter
def conn_type(self, value):
self.conn_type = value
@property
def username(self):
return self.username
@username.setter
def username(self, value):
self.username = value
@property
def password(self):
return self.password
@password.setter
def password(self, value):
self.password = value
@property
def port(self):
return self.port
@port.setter
def port(self, value):
self.port = value
@property
def connect_timeout(self):
return self.connect_timeout
@connect_timeout.setter
def connect_timeout(self, value):
self.connect_timeout = value
@property
def session_timeout(self):
return self.session_timeout
@session_timeout.setter
def session_timeout(self, value):
self.session_timeout = value
self._update_timeout(value)
|
NetworkAutomation/jaide
|
jaide/core.py
|
Python
|
gpl-2.0
| 43,240 | 0.000046 |
from iliasCorrector import db
def _split_ident(ident):
data = ident.split('_')
matr = int(data[-1])
last = data[0]
first = ' '.join(data[1:-2])
return first, last, matr
class Exercise(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
path = db.Column(db.String(256), unique=True)
submissions = db.relationship('Submission', backref='exercise', lazy='dynamic')
@property
def num_corrected(self):
return len(self.submissions.filter(Submission.grade != None).all())
@property
def num_submissions(self):
return len(self.submissions.all())
@property
def num_to_correct(self):
return len(self.submissions.filter_by(grade=None).all())
def __repr__(self):
return '<Exercise {}>'.format(self.name)
class Submission(db.Model):
id = db.Column(db.Integer, primary_key=True)
grade = db.Column(db.Float)
exercise_id = db.Column(db.Integer, db.ForeignKey('exercise.id'))
student_ident = db.Column(db.String(256))
files = db.relationship('File', backref='submission', lazy='dynamic')
remarks = db.Column(db.Text)
def __repr__(self):
return '<Submission of {} for exercise {}>'.format(self.student_ident,
self.exercise)
@property
def first_name(self):
return _split_ident(self.student_ident)[0]
@property
def last_name(self):
return _split_ident(self.student_ident)[1]
@property
def student(self):
return '{}, {}'.format(self.last_name, self.first_name)
@property
def matriculation_number(self):
return _split_ident(self.student_ident)[2]
class File(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
path = db.Column(db.String(256))
submission_id = db.Column(db.Integer, db.ForeignKey('submission.id'))
def __repr__(self):
return '<File {}>'.format(self.name)
|
simonsmiley/iliasCorrector
|
iliasCorrector/models.py
|
Python
|
mit
| 2,030 | 0.002463 |
import numpy as np
tansig = lambda n: 2 / (1 + np.exp(-2 * n)) - 1
sigmoid = lambda n: 1 / (1 + np.exp(-n))
hardlim = lambda n: 1 if n >= 0 else 0
purelin = lambda n: n
relu = lambda n: np.fmax(0, n)
square_error = lambda x, y: np.sum(0.5 * (x - y)**2)
sig_prime = lambda z: sigmoid(z) * (1 - sigmoid(z))
relu_prime = lambda z: relu(z) * (1 - relu(z))
softmax = lambda n: np.exp(n)/np.sum(np.exp(n))
softmax_prime = lambda n: softmax(n) * (1 - softmax(n))
cross_entropy = lambda x, y: -np.dot(x, np.log(y))
|
alexvlis/shape
|
nnmath.py
|
Python
|
gpl-3.0
| 518 | 0.021236 |
import pytest
def test_zero_division():
with pytest.raises(ZeroDivisionError):
1 / 0
def test_recursion_depth():
with pytest.raises(RuntimeError) as excinfo:
def f():
f()
f()
assert 'maximum recursion depth exceeded' in str(excinfo.value)
|
jskksj/cv2stuff
|
cv2stuff/tests/test_exception.py
|
Python
|
isc
| 307 | 0.006515 |
from __future__ import unicode_literals
from django.db import models
from db.models import Bin
class CollectionEntry(models.Model):
bin_obj = models.ForeignKey(Bin, related_name='requested_bins')
fullness = models.IntegerField()
date_added = models.DateTimeField(auto_now_add=True)
|
bath-hacker/binny
|
binny/collector/models.py
|
Python
|
mit
| 297 | 0.006734 |
# -*- coding: utf-8 -*-
import unittest
from cwr.grammar.factory.config import rule_options
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestConfigOptions(unittest.TestCase):
def setUp(self):
self._rule = rule_options
def test_zero_options(self):
line = '()'
result = self._rule.parseString(line)
self.assertEqual(1, len(result))
self.assertEqual('', result[0])
def test_one_options(self):
line = '(option2)'
result = self._rule.parseString(line)
self.assertEqual(1, len(result))
self.assertEqual('option2', result[0])
def test_two_options(self):
line = '(option1, option2)'
result = self._rule.parseString(line)
self.assertEqual(2, len(result))
self.assertEqual('option1', result[0])
self.assertEqual('option2', result[1])
|
weso/CWR-DataApi
|
tests/grammar/config/test_options.py
|
Python
|
mit
| 919 | 0 |
"""
Operations:
P - login
G - list - list the dir's content
G - read - reads a file
G - info - infos about a file
P - write - writes a file
P - mkdir - makes a dir
P - copy - to=...
P - move - to=...
P - delete - DELETE
P - logout
"""
import canister
import bottle
import os.path
import json
import sys
import shutil
import time
# a callback that can be replaced to determine whether a resource is allowed access or not
# by default, read access is authorized and write access forbidden
def authorized(write, path):
#return not write
return True
root = os.getcwd()
app = bottle.Bottle()
app.install(canister.Canister())
def fullpath(path):
global root
fp = os.path.join(root, path.strip('/'))
fp = os.path.abspath(fp)
if not fp.startswith(root):
raise Exception('Path forbidden: ' + path)
else:
return fp
@app.get('<path:path>')
def get(path='', hidden=False, jstree=False):
path = path.strip('/')
if not authorized(False, path):
return bottle.Response(status=401) # TODO
raise Exception('Unauthorized path: ' + path)
global root
fpath = fullpath(path)
print(fpath)
if os.path.isfile(fpath):
return bottle.static_file(path, root=root)
elif os.path.isdir(fpath):
files = os.listdir(fpath)
listing = []
for name in files:
if not hidden and name[0] == '.':
continue
p = os.path.join(fpath, name)
item = {
'name': name,
'type': 'dir' if os.path.isdir(p) else 'file',
'is_directory': os.path.isdir(p),
'size': os.path.getsize(p),
'last_modified': time.ctime(os.path.getmtime(p))
}
listing.append( item )
listing.sort(key=lambda x: x['name'])
bottle.response.content_type = 'application/json'
return json.dumps(listing)
else:
raise Exception('No such path: ' + path)
@app.post('<path:path>')
def post(path, cmd, to=None):
if not authorized(True, path):
return bottle.Response(status=401)
raise Exception('Unauthorized path: ' + path)
# TODO: exceptions might reveal real paths
fpath = fullpath(path)
app.log.debug('Full path: %s' % fpath)
cmd = cmd.lower()
if cmd == 'set':
content = bottle.request.body.readall()
file = open(fpath, mode='w')
file.write(content)
file.close()
elif cmd == 'upload':
for name, up in bottle.request.files.items():
file = open(os.path.join(fpath, name), mode='w')
file.write(up)
file.close()
elif cmd == 'mkdir':
# os.mkdir
# build dirs recursively
os.makedirs(fpath, exist_ok=True)
elif cmd == 'move':
if not to:
raise Exception('Missing destination ("to=...")')
fto = fullpath(to)
shutil.move(fpath, fto)
elif cmd == 'rename':
if not to:
raise Exception('Missing destination ("to=...")')
os.rename(fpath, to)
elif cmd == 'copy':
if not to:
raise Exception('Missing destination ("to=...")')
fto = fullpath(to)
shutil.copy(fpath, fto)
else:
raise Exception('Unknown command: %s' % cmd)
@app.delete('<path:path>')
def delete(path):
if not authorized(True, path):
raise Exception('Unauthorized path: ' + path) # TODO: return response instead
fpath = fullpath(path)
shutil.rmtree(fpath)
if __name__ == '__main__':
print(sys.argv)
args = sys.argv
#if len(args) != 2:
# print('Usage: %s <path-to-serve>' % os.path.basename(args[0]))
#root = os.path.abspath(args[1])
#root = os.getcwd()
import webfs
app.mount('@admin', webfs.app)
print('Serving: ' + root)
app.run(debug=True, host='0.0.0.0')
|
dagnelies/restfs
|
old/restfs.py
|
Python
|
mit
| 4,068 | 0.004916 |
"""
-------------------------------------------------------------------------
AIOpening - special.py
useful functions
created: 2017/09/01 in PyCharm
(c) 2017 Sven - ducandu GmbH
-------------------------------------------------------------------------
"""
import numpy as np
import tensorflow as tf
def weighted_sample(weights, objects):
"""
Return a random item from objects, with the weighting defined by weights (which must sum to 1).
"""
# An array of the weights, cumulatively summed.
cs = np.cumsum(weights)
# Find the index of the first weight over a random value.
idx = sum(cs < np.random.rand())
return objects[min(idx, len(objects) - 1)]
def to_one_hot(ind, dim):
ret = np.zeros(dim)
ret[ind] = 1
return ret
def to_one_hot_batch(inds, dim):
ret = np.zeros((len(inds), dim))
ret[np.arange(len(inds)), inds] = 1
return ret
def from_one_hot(v):
return np.nonzero(v)[0][0]
def from_one_hot_batch(v):
if len(v) == 0:
return []
return np.nonzero(v)[1]
def new_tensor(name, n_dim, dtype):
return tf.placeholder(dtype=dtype, shape=[None] * n_dim, name=name)
|
ducandu/aiopening
|
aiopening/misc/special.py
|
Python
|
mit
| 1,167 | 0.003428 |
#
# LMirror is Copyright (C) 2010 Robert Collins <robertc@robertcollins.net>
#
# LMirror is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
# In the LMirror source tree the file COPYING.txt contains the GNU General Public
# License version 3.
#
"""Tests for the commands command."""
from l_mirror.commands import commands
from l_mirror.ui.model import UI
from l_mirror.tests import ResourcedTestCase
class TestCommandCommands(ResourcedTestCase):
def get_test_ui_and_cmd(self):
ui = UI()
cmd = commands.commands(ui)
ui.set_command(cmd)
return ui, cmd
def test_shows_a_table_of_commands(self):
ui, cmd = self.get_test_ui_and_cmd()
cmd.execute()
self.assertEqual(1, len(ui.outputs))
self.assertEqual('table', ui.outputs[0][0])
self.assertEqual(('command', 'description'), ui.outputs[0][1][0])
command_names = [row[0] for row in ui.outputs[0][1]]
summaries = [row[1] for row in ui.outputs[0][1]]
self.assertTrue('help' in command_names)
self.assertTrue(
'Get help on a command.' in summaries)
|
rbtcollins/lmirror
|
l_mirror/tests/commands/test_commands.py
|
Python
|
gpl-3.0
| 1,692 | 0.004728 |
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import DropoutWrapper, RNNCell, LSTMStateTuple
from my.tensorflow import exp_mask, flatten
from my.tensorflow.nn import linear, softsel, double_linear_logits
class SwitchableDropoutWrapper(DropoutWrapper):
def __init__(self, cell, is_train, input_keep_prob=1.0, output_keep_prob=1.0,
seed=None):
super(SwitchableDropoutWrapper, self).__init__(cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob,
seed=seed)
self.is_train = is_train
def __call__(self, inputs, state, scope=None):
outputs_do, new_state_do = super(SwitchableDropoutWrapper, self).__call__(inputs, state, scope=scope)
tf.get_variable_scope().reuse_variables()
outputs, new_state = self._cell(inputs, state, scope)
outputs = tf.cond(self.is_train, lambda: outputs_do, lambda: outputs)
if isinstance(state, tuple):
new_state = state.__class__(*[tf.cond(self.is_train, lambda: new_state_do_i, lambda: new_state_i)
for new_state_do_i, new_state_i in zip(new_state_do, new_state)])
else:
new_state = tf.cond(self.is_train, lambda: new_state_do, lambda: new_state)
return outputs, new_state
class TreeRNNCell(RNNCell):
def __init__(self, cell, input_size, reduce_func):
self._cell = cell
self._input_size = input_size
self._reduce_func = reduce_func
def __call__(self, inputs, state, scope=None):
"""
:param inputs: [N*B, I + B]
:param state: [N*B, d]
:param scope:
:return: [N*B, d]
"""
with tf.variable_scope(scope or self.__class__.__name__):
d = self.state_size
x = tf.slice(inputs, [0, 0], [-1, self._input_size]) # [N*B, I]
mask = tf.slice(inputs, [0, self._input_size], [-1, -1]) # [N*B, B]
B = tf.shape(mask)[1]
prev_state = tf.expand_dims(tf.reshape(state, [-1, B, d]), 1) # [N, B, d] -> [N, 1, B, d]
mask = tf.tile(tf.expand_dims(tf.reshape(mask, [-1, B, B]), -1), [1, 1, 1, d]) # [N, B, B, d]
# prev_state = self._reduce_func(tf.tile(prev_state, [1, B, 1, 1]), 2)
prev_state = self._reduce_func(exp_mask(prev_state, mask), 2) # [N, B, d]
prev_state = tf.reshape(prev_state, [-1, d]) # [N*B, d]
return self._cell(x, prev_state)
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
class NoOpCell(RNNCell):
def __init__(self, num_units):
self._num_units = num_units
def __call__(self, inputs, state, scope=None):
return state, state
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
class MatchCell(RNNCell):
def __init__(self, cell, input_size, q_len):
self._cell = cell
self._input_size = input_size
# FIXME : This won't be needed with good shape guessing
self._q_len = q_len
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
"""
:param inputs: [N, d + JQ + JQ * d]
:param state: [N, d]
:param scope:
:return:
"""
with tf.variable_scope(scope or self.__class__.__name__):
c_prev, h_prev = state
x = tf.slice(inputs, [0, 0], [-1, self._input_size])
q_mask = tf.slice(inputs, [0, self._input_size], [-1, self._q_len]) # [N, JQ]
qs = tf.slice(inputs, [0, self._input_size + self._q_len], [-1, -1])
qs = tf.reshape(qs, [-1, self._q_len, self._input_size]) # [N, JQ, d]
x_tiled = tf.tile(tf.expand_dims(x, 1), [1, self._q_len, 1]) # [N, JQ, d]
h_prev_tiled = tf.tile(tf.expand_dims(h_prev, 1), [1, self._q_len, 1]) # [N, JQ, d]
f = tf.tanh(linear([qs, x_tiled, h_prev_tiled], self._input_size, True, scope='f')) # [N, JQ, d]
a = tf.nn.softmax(exp_mask(linear(f, 1, True, squeeze=True, scope='a'), q_mask)) # [N, JQ]
q = tf.reduce_sum(qs * tf.expand_dims(a, -1), 1)
z = tf.concat(1, [x, q]) # [N, 2d]
return self._cell(z, state)
class AttentionCell(RNNCell):
def __init__(self, cell, memory, mask=None, controller=None, mapper=None, input_keep_prob=1.0, is_train=None):
"""
Early fusion attention cell: uses the (inputs, state) to control the current attention.
:param cell:
:param memory: [N, M, m]
:param mask:
:param controller: (inputs, prev_state, memory) -> memory_logits
"""
self._cell = cell
self._memory = memory
self._mask = mask
self._flat_memory = flatten(memory, 2)
self._flat_mask = flatten(mask, 1)
if controller is None:
controller = AttentionCell.get_linear_controller(True, is_train=is_train)
self._controller = controller
if mapper is None:
mapper = AttentionCell.get_concat_mapper()
elif mapper == 'sim':
mapper = AttentionCell.get_sim_mapper()
self._mapper = mapper
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def __call__(self, inputs, state, scope=None):
with tf.variable_scope(scope or "AttentionCell"):
memory_logits = self._controller(inputs, state, self._flat_memory)
sel_mem = softsel(self._flat_memory, memory_logits, mask=self._flat_mask) # [N, m]
new_inputs, new_state = self._mapper(inputs, state, sel_mem)
return self._cell(new_inputs, state)
@staticmethod
def get_double_linear_controller(size, bias, input_keep_prob=1.0, is_train=None):
def double_linear_controller(inputs, state, memory):
"""
:param inputs: [N, i]
:param state: [N, d]
:param memory: [N, M, m]
:return: [N, M]
"""
rank = len(memory.get_shape())
_memory_size = tf.shape(memory)[rank-2]
tiled_inputs = tf.tile(tf.expand_dims(inputs, 1), [1, _memory_size, 1])
if isinstance(state, tuple):
tiled_states = [tf.tile(tf.expand_dims(each, 1), [1, _memory_size, 1])
for each in state]
else:
tiled_states = [tf.tile(tf.expand_dims(state, 1), [1, _memory_size, 1])]
# [N, M, d]
in_ = tf.concat(2, [tiled_inputs] + tiled_states + [memory])
out = double_linear_logits(in_, size, bias, input_keep_prob=input_keep_prob,
is_train=is_train)
return out
return double_linear_controller
@staticmethod
def get_linear_controller(bias, input_keep_prob=1.0, is_train=None):
def linear_controller(inputs, state, memory):
rank = len(memory.get_shape())
_memory_size = tf.shape(memory)[rank-2]
tiled_inputs = tf.tile(tf.expand_dims(inputs, 1), [1, _memory_size, 1])
if isinstance(state, tuple):
tiled_states = [tf.tile(tf.expand_dims(each, 1), [1, _memory_size, 1])
for each in state]
else:
tiled_states = [tf.tile(tf.expand_dims(state, 1), [1, _memory_size, 1])]
# [N, M, d]
in_ = tf.concat(2, [tiled_inputs] + tiled_states + [memory])
out = linear(in_, 1, bias, squeeze=True, input_keep_prob=input_keep_prob, is_train=is_train)
return out
return linear_controller
@staticmethod
def get_concat_mapper():
def concat_mapper(inputs, state, sel_mem):
"""
:param inputs: [N, i]
:param state: [N, d]
:param sel_mem: [N, m]
:return: (new_inputs, new_state) tuple
"""
return tf.concat(1, [inputs, sel_mem]), state
return concat_mapper
@staticmethod
def get_sim_mapper():
def sim_mapper(inputs, state, sel_mem):
"""
Assume that inputs and sel_mem are the same size
:param inputs: [N, i]
:param state: [N, d]
:param sel_mem: [N, i]
:return: (new_inputs, new_state) tuple
"""
return tf.concat(1, [inputs, sel_mem, inputs * sel_mem, tf.abs(inputs - sel_mem)]), state
return sim_mapper
|
Incogniiito/Chrononaut
|
my/tensorflow/rnn_cell.py
|
Python
|
apache-2.0
| 9,075 | 0.003857 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an fake image service"""
import copy
import datetime
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
LOG = logging.getLogger('nova.image.fake')
FLAGS = flags.FLAGS
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
# NOTE(bcwaldon): was image '123456'
image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'raw',
'disk_format': 'raw',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel,
'architecture': 'x86_64'}}
# NOTE(bcwaldon): was image 'fake'
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
# NOTE(bcwaldon): was image '2'
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': None,
'disk_format': None,
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
# NOTE(bcwaldon): was image '1'
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
# NOTE(bcwaldon): was image '3'
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id':
'155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None}}
# NOTE(sirp): was image '6'
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'False'}}
# NOTE(sirp): was image '7'
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'is_public': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel,
'architecture': 'x86_64',
'auto_disk_config': 'True'}}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
super(_FakeImageService, self).__init__()
#TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def index(self, context, **kwargs):
"""Returns list of images."""
retval = []
for img in self.images.values():
retval += [dict([(k, v) for k, v in img.iteritems()
if k in ['id', 'name']])]
return retval
#TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def get(self, context, image_id, data):
metadata = self.show(context, image_id)
data.write(self._imagedata.get(image_id, ''))
return metadata
def show(self, context, image_id):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
LOG.warn('Unable to find image id %s. Have images: %s',
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
def show_by_name(self, context, name):
"""Returns a dict containing image data for the given name."""
images = copy.deepcopy(self.images.values())
for image in images:
if name == image.get('name'):
return image
raise exception.ImageNotFound(image_id=name)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
image_id = str(metadata.get('id', utils.gen_uuid()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.Duplicate()
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
self.images[image_id] = copy.deepcopy(metadata)
def delete(self, context, image_id):
"""Delete the given image.
:raises: ImageNotFound if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def delete_all(self):
"""Clears out all images."""
self.images.clear()
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
|
rcbops/nova-buildpackage
|
nova/image/fake.py
|
Python
|
apache-2.0
| 9,011 | 0.008323 |
"""Internal module for Python 2 backwards compatibility."""
import sys
if sys.version_info[0] < 3:
from urlparse import parse_qs, urlparse
from itertools import imap, izip
from string import letters as ascii_letters
from Queue import Queue
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
iteritems = lambda x: x.iteritems()
iterkeys = lambda x: x.iterkeys()
itervalues = lambda x: x.itervalues()
nativestr = lambda x: \
x if isinstance(x, str) else x.encode('utf-8', 'replace')
u = lambda x: x.decode()
b = lambda x: x
next = lambda x: x.next()
byte_to_chr = lambda x: x
unichr = unichr
xrange = xrange
basestring = basestring
unicode = unicode
bytes = str
long = long
else:
from urllib.parse import parse_qs, urlparse
from io import BytesIO
from string import ascii_letters
from queue import Queue
iteritems = lambda x: iter(x.items())
iterkeys = lambda x: iter(x.keys())
itervalues = lambda x: iter(x.values())
byte_to_chr = lambda x: chr(x)
nativestr = lambda x: \
x if isinstance(x, str) else x.decode('utf-8', 'replace')
u = lambda x: x
b = lambda x: x.encode('iso-8859-1') if not isinstance(x, bytes) else x
next = next
unichr = chr
imap = map
izip = zip
xrange = range
basestring = str
unicode = str
bytes = bytes
long = int
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import Empty, Full
try: # Python 2.6 - 2.7
from Queue import LifoQueue
except ImportError: # Python 2.5
from Queue import Queue
# From the Python 2.7 lib. Python 2.5 already extracted the core
# methods to aid implementating different queue organisations.
class LifoQueue(Queue):
"Override queue methods to implement a last-in first-out queue."
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
|
holys/ledis-py
|
ledis/_compat.py
|
Python
|
mit
| 2,327 | 0.006446 |
import platform
import urllib
import subprocess
from progressbar import ProgressBar
class Downloader(object):
WINDOWS_DOWNLOAD_URL = "http://cache.lego.com/downloads/ldd2.0/installer/setupLDD-PC-4_3_8.exe"
MAC_DOWNLOAD_URL = "http://cache.lego.com/downloads/ldd2.0/installer/setupLDD-MAC-4_3_8.zip"
PB = None
@classmethod
def download_ldd(cls):
if platform.system() == "Darwin":
urllib.urlretrieve(cls.MAC_DOWNLOAD_URL, "ldd.zip", reporthook=cls.download_progress)
elif platform.system() == "Windows":
urllib.urlretrieve(cls.WINDOWS_DOWNLOAD_URL, "ldd.exe")
# subprocess.Popen("ldd.exe")
@classmethod
def download_progress(cls, count, block_size, total_size):
if not cls.PB:
cls.PB = ProgressBar(maxval=total_size).start()
cls.PB.update(count * block_size)
class Installer(object):
@classmethod
def install(cls):
pass
|
cbrentharris/bricklayer
|
bricklayer/utils/downloader.py
|
Python
|
mit
| 951 | 0.005258 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mock
from nose.tools import ok_
from crontabber.app import CronTabber
from socorro.unittest.cron.jobs.base import IntegrationTestBase
from socorro.unittest.cron.setup_configman import (
get_config_manager_for_crontabber,
)
from socorro.schemas import CRASH_REPORT_JSON_SCHEMA_AS_STRING
class TestUploadCrashReportJSONSchemaCronApp(IntegrationTestBase):
def _setup_config_manager(self):
return get_config_manager_for_crontabber(
jobs='socorro.cron.jobs.upload_crash_report_json_schema.'
'UploadCrashReportJSONSchemaCronApp|30d',
)
@mock.patch('boto.connect_s3')
def test_run(self, connect_s3):
key = mock.MagicMock()
connect_s3().get_bucket().get_key.return_value = None
connect_s3().get_bucket().new_key.return_value = key
with self._setup_config_manager().context() as config:
tab = CronTabber(config)
tab.run_all()
information = self._load_structure()
app_name = 'upload-crash-report-json-schema'
ok_(information[app_name])
ok_(not information[app_name]['last_error'])
ok_(information[app_name]['last_success'])
key.set_contents_from_string.assert_called_with(
CRASH_REPORT_JSON_SCHEMA_AS_STRING
)
|
m8ttyB/socorro
|
socorro/unittest/cron/jobs/test_upload_crash_report_json_schema.py
|
Python
|
mpl-2.0
| 1,523 | 0 |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,re,traceback,sys
from waflib import Utils,ansiterm
if not os.environ.get('NOSYNC',False):
if sys.stdout.isatty()and id(sys.stdout)==id(sys.__stdout__):
sys.stdout=ansiterm.AnsiTerm(sys.stdout)
if sys.stderr.isatty()and id(sys.stderr)==id(sys.__stderr__):
sys.stderr=ansiterm.AnsiTerm(sys.stderr)
import logging
LOG_FORMAT=os.environ.get('WAF_LOG_FORMAT','%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s')
HOUR_FORMAT=os.environ.get('WAF_HOUR_FORMAT','%H:%M:%S')
zones=[]
verbose=0
colors_lst={'USE':True,'BOLD':'\x1b[01;1m','RED':'\x1b[01;31m','GREEN':'\x1b[32m','YELLOW':'\x1b[33m','PINK':'\x1b[35m','BLUE':'\x1b[01;34m','CYAN':'\x1b[36m','GREY':'\x1b[37m','NORMAL':'\x1b[0m','cursor_on':'\x1b[?25h','cursor_off':'\x1b[?25l',}
indicator='\r\x1b[K%s%s%s'
try:
unicode
except NameError:
unicode=None
def enable_colors(use):
if use==1:
if not(sys.stderr.isatty()or sys.stdout.isatty()):
use=0
if Utils.is_win32 and os.name!='java':
term=os.environ.get('TERM','')
else:
term=os.environ.get('TERM','dumb')
if term in('dumb','emacs'):
use=0
if use>=1:
os.environ['TERM']='vt100'
colors_lst['USE']=use
try:
get_term_cols=ansiterm.get_term_cols
except AttributeError:
def get_term_cols():
return 80
get_term_cols.__doc__="""
Returns the console width in characters.
:return: the number of characters per line
:rtype: int
"""
def get_color(cl):
if colors_lst['USE']:
return colors_lst.get(cl,'')
return''
class color_dict(object):
def __getattr__(self,a):
return get_color(a)
def __call__(self,a):
return get_color(a)
colors=color_dict()
re_log=re.compile(r'(\w+): (.*)',re.M)
class log_filter(logging.Filter):
def __init__(self,name=''):
logging.Filter.__init__(self,name)
def filter(self,rec):
global verbose
rec.zone=rec.module
if rec.levelno>=logging.INFO:
return True
m=re_log.match(rec.msg)
if m:
rec.zone=m.group(1)
rec.msg=m.group(2)
if zones:
return getattr(rec,'zone','')in zones or'*'in zones
elif not verbose>2:
return False
return True
class log_handler(logging.StreamHandler):
def emit(self,record):
try:
try:
self.stream=record.stream
except AttributeError:
if record.levelno>=logging.WARNING:
record.stream=self.stream=sys.stderr
else:
record.stream=self.stream=sys.stdout
self.emit_override(record)
self.flush()
except(KeyboardInterrupt,SystemExit):
raise
except:
self.handleError(record)
def emit_override(self,record,**kw):
self.terminator=getattr(record,'terminator','\n')
stream=self.stream
if unicode:
msg=self.formatter.format(record)
fs='%s'+self.terminator
try:
if(isinstance(msg,unicode)and getattr(stream,'encoding',None)):
fs=fs.decode(stream.encoding)
try:
stream.write(fs%msg)
except UnicodeEncodeError:
stream.write((fs%msg).encode(stream.encoding))
else:
stream.write(fs%msg)
except UnicodeError:
stream.write((fs%msg).encode('utf-8'))
else:
logging.StreamHandler.emit(self,record)
class formatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self,LOG_FORMAT,HOUR_FORMAT)
def format(self,rec):
try:
msg=rec.msg.decode('utf-8')
except Exception:
msg=rec.msg
use=colors_lst['USE']
if(use==1 and rec.stream.isatty())or use==2:
c1=getattr(rec,'c1',None)
if c1 is None:
c1=''
if rec.levelno>=logging.ERROR:
c1=colors.RED
elif rec.levelno>=logging.WARNING:
c1=colors.YELLOW
elif rec.levelno>=logging.INFO:
c1=colors.GREEN
c2=getattr(rec,'c2',colors.NORMAL)
msg='%s%s%s'%(c1,msg,c2)
else:
msg=re.sub(r'\r(?!\n)|\x1B\[(K|.*?(m|h|l))','',msg)
if rec.levelno>=logging.INFO:
if rec.args:
return msg%rec.args
return msg
rec.msg=msg
rec.c1=colors.PINK
rec.c2=colors.NORMAL
return logging.Formatter.format(self,rec)
log=None
def debug(*k,**kw):
global verbose
if verbose:
k=list(k)
k[0]=k[0].replace('\n',' ')
global log
log.debug(*k,**kw)
def error(*k,**kw):
global log,verbose
log.error(*k,**kw)
if verbose>2:
st=traceback.extract_stack()
if st:
st=st[:-1]
buf=[]
for filename,lineno,name,line in st:
buf.append(' File %r, line %d, in %s'%(filename,lineno,name))
if line:
buf.append(' %s'%line.strip())
if buf:log.error('\n'.join(buf))
def warn(*k,**kw):
global log
log.warn(*k,**kw)
def info(*k,**kw):
global log
log.info(*k,**kw)
def init_log():
global log
log=logging.getLogger('waflib')
log.handlers=[]
log.filters=[]
hdlr=log_handler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
def make_logger(path,name):
logger=logging.getLogger(name)
hdlr=logging.FileHandler(path,'w')
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
def make_mem_logger(name,to_log,size=8192):
from logging.handlers import MemoryHandler
logger=logging.getLogger(name)
hdlr=MemoryHandler(size,target=to_log)
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.memhandler=hdlr
logger.setLevel(logging.DEBUG)
return logger
def free_logger(logger):
try:
for x in logger.handlers:
x.close()
logger.removeHandler(x)
except Exception:
pass
def pprint(col,msg,label='',sep='\n'):
global info
info('%s%s%s %s',colors(col),msg,colors.NORMAL,label,extra={'terminator':sep})
|
Gnurou/glmark2
|
waflib/Logs.py
|
Python
|
gpl-3.0
| 5,584 | 0.068947 |
#!/usr/bin/python
#
# $Id: gen-data-queryperf.py,v 1.1.10.1 2003/05/15 05:07:21 marka Exp $
#
# Contributed by Stephane Bortzmeyer <bortzmeyer@nic.fr>
#
# "A small tool which may be useful with contrib/queryperf. This script
# can generate files of queries, both with random names (to test the
# behaviour with NXdomain) and with domains from a real zone file."
#
import sys
import getopt
import random
import re
ldh = []
# Letters
for i in range(97, 122):
ldh.append(chr(i))
# Digits
for i in range(48, 57):
ldh.append(chr(i))
# Hyphen
ldh.append('-')
maxsize=10
tld='org'
num=4
percent_random = 0.3
gen = None
zone_file = None
domains = {}
domain_ns = "^([a-z0-9-]+)(\.([a-z0-9-\.]+|)|)( +IN|) +NS"
domain_ns_re = re.compile(domain_ns, re.IGNORECASE)
def gen_random_label():
label = ""
for i in range(gen.randint(1, maxsize)):
label = label + gen.choice(ldh)
return label
def make_domain(label):
return "www." + label + "." + tld + " A"
def usage():
sys.stdout.write("Usage: " + sys.argv[0] + " [-n number] " + \
"[-p percent-random] [-t TLD]\n")
sys.stdout.write(" [-m MAXSIZE] [-f zone-file]\n")
try:
optlist, args = getopt.getopt(sys.argv[1:], "hp:f:n:t:m:",
["help", "percentrandom=", "zonefile=",
"num=", "tld=",
"maxsize="])
for option, value in optlist:
if option == "--help" or option == "-h":
usage()
sys.exit(0)
elif option == "--number" or option == "-n":
num = int(value)
elif option == "--maxsize" or option == "-m":
maxsize = int(value)
elif option == "--percentrandom" or option == "-p":
percent_random = float(value)
elif option == "--tld" or option == "-t":
tld = str(value)
elif option == "--zonefile" or option == "-f":
zone_file = str(value)
else:
error("Unknown option " + option)
except getopt.error, reason:
sys.stderr.write(sys.argv[0] + ": " + str(reason) + "\n")
usage()
sys.exit(1)
if len(args) <> 0:
usage()
sys.exit(1)
gen = random.Random()
if zone_file:
file = open(zone_file)
line = file.readline()
while line:
domain_line = domain_ns_re.match(line)
if domain_line:
domain = domain_line.group(1)
domains[domain] = 1
line = file.readline()
file.close()
for i in range(num):
if zone_file:
if gen.random() < percent_random:
print make_domain(gen_random_label())
else:
print make_domain(gen.choice(domains.keys()))
else:
print make_domain(gen_random_label())
|
atmark-techno/atmark-dist
|
user/bind/contrib/queryperf/utils/gen-data-queryperf.py
|
Python
|
gpl-2.0
| 2,783 | 0.004671 |
import numpy as np
from gpaw import KohnShamConvergenceError
class SCFLoop:
"""Self-consistent field loop.
converged: Do we have a self-consistent solution?
"""
def __init__(self, eigenstates=0.1, energy=0.1, density=0.1, maxiter=100,
fixdensity=False, niter_fixdensity=None):
self.max_eigenstates_error = max(eigenstates, 1e-20)
self.max_energy_error = energy
self.max_density_error = max(density, 1e-20)
self.maxiter = maxiter
self.fixdensity = fixdensity
if niter_fixdensity is None:
niter_fixdensity = 2
self.niter_fixdensity = niter_fixdensity
if fixdensity:
self.fix_density()
self.reset()
def fix_density(self):
self.fixdensity = True
self.niter_fixdensity = 10000000
self.max_density_error = np.inf
def reset(self):
self.energies = []
self.eigenstates_error = None
self.energy_error = None
self.density_error = None
self.converged = False
def run(self, wfs, hamiltonian, density, occupations):
if self.converged:
return
for iter in range(1, self.maxiter + 1):
wfs.eigensolver.iterate(hamiltonian, wfs)
occupations.calculate(wfs)
# XXX ortho, dens, wfs?
energy = hamiltonian.get_energy(occupations)
self.energies.append(energy)
self.check_convergence(density, wfs.eigensolver)
yield iter
if self.converged:
break
if iter > self.niter_fixdensity:
density.update(wfs)
hamiltonian.update(density)
else:
hamiltonian.npoisson = 0
# Don't fix the density in the next step:
self.niter_fixdensity = 0
def check_convergence(self, density, eigensolver):
"""Check convergence of eigenstates, energy and density."""
if self.converged:
return True
self.eigenstates_error = eigensolver.error
if len(self.energies) < 3:
self.energy_error = self.max_energy_error
else:
self.energy_error = np.ptp(self.energies[-3:])
self.density_error = density.mixer.get_charge_sloshing()
if self.density_error is None:
self.density_error = 1000000.0
self.converged = (
self.eigenstates_error < self.max_eigenstates_error and
self.energy_error < self.max_energy_error and
self.density_error < self.max_density_error)
return self.converged
|
qsnake/gpaw
|
gpaw/scf.py
|
Python
|
gpl-3.0
| 2,689 | 0.002975 |
from netfilterqueue import NetfilterQueue
from dpkt import ip, icmp, tcp, udp
from scapy.all import *
import socket
def print_and_accept(pkt):
data=pkt.get_payload()
res = ip.IP(data)
res2 = IP(data)
i = ICMP(data)
t = TCP(data)
u = UDP(data)
print "SOURCE IP: %s\tDESTINATION IP: %s" % (socket.inet_ntoa(res.src),socket.inet_ntoa(res.dst))
print res2.show2()
resp=srp1(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst='192.168.0.34'),iface="eth0",timeout=2)
print resp.dst
eth_dst = resp.src
eth_src = resp.dst
eth = Ether(src=eth_src, dst=eth_dst)
eth.type = 2048
sendp(eth/res2/res2,iface="eth0")
pkt.accept()
nfqueue = NetfilterQueue()
nfqueue.bind(6, print_and_accept)
try:
nfqueue.run()
except KeyboardInterrupt, ex:
print ex
|
rafaelsilvag/pyNFRouter
|
test/teste.py
|
Python
|
gpl-2.0
| 798 | 0.012531 |
#!/usr/bin/env python
import csv
# create an empty list that will be filled with the rows of data from the CSV as dictionaries
csv_content = []
# open and loop through each line of the csv file to populate our data file
with open('aaj1945_DataS1_Egg_shape_by_species_v2.csv') as csv_file:
csv_reader = csv.DictReader(csv_file)
lineNo = 0
for row in csv_reader: # process each row of the csv file
csv_content.append(row)
if lineNo < 3: # print out a few lines of data for our inspection
print(row)
lineNo += 1
# create some empty lists that we will fill with values for each column of data
order = []
family = []
species = []
asymmetry = []
ellipticity = []
avglength = []
# for each row of data in our dataset write a set of values into the lists of column values
for item in csv_content:
order.append(item['\ufeffOrder'])
family.append(item['Family'])
species.append(item['Species'])
# deal with issues
try:
asymmetry.append(float(item['Asymmetry']))
except:
asymmetry.append(-9999)
try:
ellipticity.append(float(item['Ellipticity']))
except:
ellipticity.append(-9999)
try:
avglength.append(float(item['AvgLength (cm)']))
except:
avglength.append(-9999)
print()
print()
# Calculate and print some statistics
mean_asymmetry = sum(asymmetry)/len(asymmetry)
print("Mean Asymmetry: ", str(mean_asymmetry))
mean_ellipticity = sum(ellipticity)/len(ellipticity)
print("Mean Ellipticity: ", str(mean_ellipticity))
mean_avglength = sum(avglength)/len(avglength)
print("Mean Average Length: ", str(mean_avglength))
# What's wrong with these results? What would you do next to fix the problem?
|
unmrds/cc-python
|
.ipynb_checkpoints/eggs-checkpoint.py
|
Python
|
apache-2.0
| 1,783 | 0.00673 |
################################################################################
# Copyright 2015 Samuel Gongora Garcia (s.gongoragarcia@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
# Author: s.gongoragarcia[at]gmail.com
################################################################################
class Read_predict_data:
def __init__(self, index_satellite):
from os import getcwd, chdir
index_satellite = index_satellite + 1
directorio_script = getcwd()
# predict routine
self.open_predict(directorio_script)
self.open_files_predict(index_satellite)
chdir(directorio_script)
def open_predict(self, directorio_script):
from os import chdir, listdir, getcwd
chdir(directorio_script + '/results/predict')
self.files_predict = listdir(getcwd())
self.files_predict.sort()
def open_files_predict(self, index_satellite):
for i in range(index_satellite):
self.open_file_predict(self.files_predict[i])
def open_file_predict(self, name):
self.predict_simulation_time = []
self.predict_alt_satellite = []
self.predict_az_satellite = []
import csv
with open(name) as tsv:
for line in csv.reader(tsv, delimiter = "\t"):
if float(line[1]) >= 0:
linea0 = float(line[0])
self.predict_simulation_time.append(linea0)
self.predict_alt_satellite.append(float(line[1]))
self.predict_az_satellite.append(float(line[2]))
|
satnet-project/propagators
|
output_predict.py
|
Python
|
apache-2.0
| 2,281 | 0.003069 |
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^external/', include('external.urls')),
url(r'^dev/', include('dev.urls')),
]
|
SequencingDOTcom/App-Market-API-integration
|
python/bootstrap/urls.py
|
Python
|
mit
| 231 | 0 |
# Copyright 2012-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Discover environment and server configuration, initialize PyMongo client."""
import os
import socket
import sys
from functools import wraps
from test.utils import create_user
from test.version import Version
from unittest import SkipTest
import pymongo.errors
HAVE_SSL = True
try:
import ssl
except ImportError:
HAVE_SSL = False
ssl = None
HAVE_TORNADO = True
try:
import tornado
except ImportError:
HAVE_TORNADO = False
tornado = None
HAVE_ASYNCIO = True
try:
import asyncio
except ImportError:
HAVE_ASYNCIO = False
asyncio = None
HAVE_AIOHTTP = True
try:
import aiohttp
except ImportError:
HAVE_AIOHTTP = False
aiohttp = None
# Copied from PyMongo.
def partition_node(node):
"""Split a host:port string into (host, int(port)) pair."""
host = node
port = 27017
idx = node.rfind(":")
if idx != -1:
host, port = node[:idx], int(node[idx + 1 :])
if host.startswith("["):
host = host[1:-1]
return host, port
def connected(client):
"""Convenience, wait for a new PyMongo MongoClient to connect."""
client.admin.command("ping") # Force connection.
return client
# If these are set to the empty string, substitute None.
db_user = os.environ.get("DB_USER") or None
db_password = os.environ.get("DB_PASSWORD") or None
CERT_PATH = os.environ.get(
"CERT_DIR", os.path.join(os.path.dirname(os.path.realpath(__file__)), "certificates")
)
CLIENT_PEM = os.path.join(CERT_PATH, "client.pem")
CA_PEM = os.path.join(CERT_PATH, "ca.pem")
MONGODB_X509_USERNAME = "CN=client,OU=kerneluser,O=10Gen,L=New York City,ST=New York,C=US"
def is_server_resolvable():
"""Returns True if 'server' is resolvable."""
socket_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(1)
try:
socket.gethostbyname("server")
return True
except socket.error:
return False
finally:
socket.setdefaulttimeout(socket_timeout)
class TestEnvironment(object):
def __init__(self):
self.initialized = False
self.host = None
self.port = None
self.mongod_started_with_ssl = False
self.mongod_validates_client_cert = False
self.server_is_resolvable = is_server_resolvable()
self.sync_cx = None
self.is_standalone = False
self.is_mongos = False
self.is_replica_set = False
self.rs_name = None
self.w = 1
self.hosts = None
self.arbiters = None
self.primary = None
self.secondaries = None
self.v8 = False
self.auth = False
self.uri = None
self.rs_uri = None
self.version = None
self.sessions_enabled = False
self.fake_hostname_uri = None
self.server_status = None
def setup(self):
assert not self.initialized
self.setup_sync_cx()
self.setup_auth_and_uri()
self.setup_version()
self.setup_v8()
self.server_status = self.sync_cx.admin.command("serverStatus")
self.initialized = True
def setup_sync_cx(self):
"""Get a synchronous PyMongo MongoClient and determine SSL config."""
host = os.environ.get("DB_IP", "localhost")
port = int(os.environ.get("DB_PORT", 27017))
connectTimeoutMS = 100
serverSelectionTimeoutMS = 100
socketTimeoutMS = 10000
try:
client = connected(
pymongo.MongoClient(
host,
port,
username=db_user,
password=db_password,
directConnection=True,
connectTimeoutMS=connectTimeoutMS,
socketTimeoutMS=socketTimeoutMS,
serverSelectionTimeoutMS=serverSelectionTimeoutMS,
tlsCAFile=CA_PEM,
ssl=True,
)
)
self.mongod_started_with_ssl = True
except pymongo.errors.ServerSelectionTimeoutError:
try:
client = connected(
pymongo.MongoClient(
host,
port,
username=db_user,
password=db_password,
directConnection=True,
connectTimeoutMS=connectTimeoutMS,
socketTimeoutMS=socketTimeoutMS,
serverSelectionTimeoutMS=serverSelectionTimeoutMS,
tlsCAFile=CA_PEM,
tlsCertificateKeyFile=CLIENT_PEM,
)
)
self.mongod_started_with_ssl = True
self.mongod_validates_client_cert = True
except pymongo.errors.ServerSelectionTimeoutError:
client = connected(
pymongo.MongoClient(
host,
port,
username=db_user,
password=db_password,
directConnection=True,
connectTimeoutMS=connectTimeoutMS,
socketTimeoutMS=socketTimeoutMS,
serverSelectionTimeoutMS=serverSelectionTimeoutMS,
)
)
response = client.admin.command("ismaster")
self.sessions_enabled = "logicalSessionTimeoutMinutes" in response
self.is_mongos = response.get("msg") == "isdbgrid"
if "setName" in response:
self.is_replica_set = True
self.rs_name = str(response["setName"])
self.w = len(response["hosts"])
self.hosts = set([partition_node(h) for h in response["hosts"]])
host, port = self.primary = partition_node(response["primary"])
self.arbiters = set([partition_node(h) for h in response.get("arbiters", [])])
self.secondaries = [
partition_node(m)
for m in response["hosts"]
if m != self.primary and m not in self.arbiters
]
elif not self.is_mongos:
self.is_standalone = True
# Reconnect to found primary, without short timeouts.
if self.mongod_started_with_ssl:
client = connected(
pymongo.MongoClient(
host,
port,
username=db_user,
password=db_password,
directConnection=True,
tlsCAFile=CA_PEM,
tlsCertificateKeyFile=CLIENT_PEM,
)
)
else:
client = connected(
pymongo.MongoClient(
host,
port,
username=db_user,
password=db_password,
directConnection=True,
ssl=False,
)
)
self.sync_cx = client
self.host = host
self.port = port
def setup_auth_and_uri(self):
"""Set self.auth and self.uri."""
if db_user or db_password:
if not (db_user and db_password):
sys.stderr.write("You must set both DB_USER and DB_PASSWORD, or neither\n")
sys.exit(1)
self.auth = True
uri_template = "mongodb://%s:%s@%s:%s/admin"
self.uri = uri_template % (db_user, db_password, self.host, self.port)
# If the hostname 'server' is resolvable, this URI lets us use it
# to test SSL hostname validation with auth.
self.fake_hostname_uri = uri_template % (db_user, db_password, "server", self.port)
else:
self.uri = "mongodb://%s:%s/admin" % (self.host, self.port)
self.fake_hostname_uri = "mongodb://%s:%s/admin" % ("server", self.port)
if self.rs_name:
self.rs_uri = self.uri + "?replicaSet=" + self.rs_name
def setup_version(self):
"""Set self.version to the server's version."""
self.version = Version.from_client(self.sync_cx)
def setup_v8(self):
"""Determine if server is running SpiderMonkey or V8."""
if self.sync_cx.server_info().get("javascriptEngine") == "V8":
self.v8 = True
@property
def storage_engine(self):
try:
return self.server_status.get("storageEngine", {}).get("name")
except AttributeError:
# Raised if self.server_status is None.
return None
def supports_transactions(self):
if self.storage_engine == "mmapv1":
return False
if self.version.at_least(4, 1, 8):
return self.is_mongos or self.is_replica_set
if self.version.at_least(4, 0):
return self.is_replica_set
return False
def require(self, condition, msg, func=None):
def make_wrapper(f):
@wraps(f)
def wrap(*args, **kwargs):
if condition():
return f(*args, **kwargs)
raise SkipTest(msg)
return wrap
if func is None:
def decorate(f):
return make_wrapper(f)
return decorate
return make_wrapper(func)
def require_auth(self, func):
"""Run a test only if the server is started with auth."""
return self.require(lambda: self.auth, "Server must be start with auth", func=func)
def require_version_min(self, *ver):
"""Run a test only if the server version is at least ``version``."""
other_version = Version(*ver)
return self.require(
lambda: self.version >= other_version,
"Server version must be at least %s" % str(other_version),
)
def require_version_max(self, *ver):
"""Run a test only if the server version is at most ``version``."""
other_version = Version(*ver)
return self.require(
lambda: self.version <= other_version,
"Server version must be at most %s" % str(other_version),
)
def require_replica_set(self, func):
"""Run a test only if the client is connected to a replica set."""
return self.require(
lambda: self.is_replica_set, "Not connected to a replica set", func=func
)
def require_transactions(self, func):
"""Run a test only if the deployment might support transactions.
*Might* because this does not test the FCV.
"""
return self.require(self.supports_transactions, "Transactions are not supported", func=func)
def create_user(self, dbname, user, pwd=None, roles=None, **kwargs):
kwargs["writeConcern"] = {"w": self.w}
return create_user(self.sync_cx[dbname], user, pwd, roles, **kwargs)
def drop_user(self, dbname, user):
self.sync_cx[dbname].command("dropUser", user, writeConcern={"w": self.w})
env = TestEnvironment()
|
mongodb/motor
|
test/test_environment.py
|
Python
|
apache-2.0
| 11,605 | 0.001034 |
import os
import random
import string
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
TEST_APP_URL = os.getenv('TEST_APP_URL')
TEST_CLIENT_URL = os.getenv('TEST_CLIENT_URL')
E2E_ARGS = os.getenv('E2E_ARGS')
TEST_URL = TEST_CLIENT_URL if E2E_ARGS == 'client' else TEST_APP_URL
def random_string(length=8):
return ''.join(random.choice(string.ascii_letters) for x in range(length))
def register(selenium, user):
selenium.get(f'{TEST_URL}/register')
selenium.implicitly_wait(1)
username = selenium.find_element_by_id('username')
username.send_keys(user.get('username'))
email = selenium.find_element_by_id('email')
email.send_keys(user.get('email'))
password = selenium.find_element_by_id('password')
password.send_keys(user.get('password'))
password_conf = selenium.find_element_by_id('confirm-password')
password_conf.send_keys(user.get('password_conf'))
submit_button = selenium.find_element_by_tag_name('button')
submit_button.click()
def login(selenium, user):
selenium.get(f'{TEST_URL}/login')
selenium.implicitly_wait(1)
email = selenium.find_element_by_id('email')
email.send_keys(user.get('email'))
password = selenium.find_element_by_id('password')
password.send_keys(user.get('password'))
submit_button = selenium.find_element_by_tag_name('button')
submit_button.click()
def register_valid_user(selenium):
user_name = random_string()
user = {
'username': user_name,
'email': f'{user_name}@example.com',
'password': 'p@ssw0rd',
'password_conf': 'p@ssw0rd',
}
register(selenium, user)
WebDriverWait(selenium, 15).until(EC.url_changes(f"{TEST_URL}/register"))
return user
def register_valid_user_and_logout(selenium):
user_name = random_string()
user = {
'username': user_name,
'email': f'{user_name}@example.com',
'password': 'p@ssw0rd',
'password_conf': 'p@ssw0rd',
}
register(selenium, user)
WebDriverWait(selenium, 15).until(EC.url_changes(f"{TEST_URL}/register"))
user_menu = selenium.find_element_by_class_name('nav-items-user-menu')
logout_link = user_menu.find_elements_by_class_name('nav-item')[2]
logout_link.click()
selenium.implicitly_wait(1)
return user
def login_valid_user(selenium, user):
login(selenium, user)
WebDriverWait(selenium, 10).until(EC.url_changes(f"{TEST_URL}/login"))
return user
def assert_navbar(selenium, user):
nav = selenium.find_element_by_id('nav').text
assert 'Register' not in nav
assert 'Login' not in nav
assert 'Dashboard' in nav
assert 'Workouts' in nav
assert 'Statistics' in nav
assert 'Add a workout' in nav
assert user['username'] in nav
assert 'Logout' in nav
|
SamR1/FitTrackee
|
e2e/utils.py
|
Python
|
agpl-3.0
| 2,863 | 0 |
UL_CATEGORY_LI = '//ul[@class="category"]/li'
H2_A_TITLELINK = './h2/a[@class="titlelink"]'
SPAN_A_TITLELINK = './span/a[@class="titlelink"]'
DIV_BODYFIELD_P = '//div[contains(@class,"bodyfield")]/p'
CATEGORY_H2_XPATH = [ UL_CATEGORY_LI, H2_A_TITLELINK ]
BODYFIELD_SPAN_XPATH = [ DIV_BODYFIELD_P, SPAN_A_TITLELINK ]
"""Mapping of relative URL (for EOPSS pages) to the xpath needed
to extract documents (1st xpath for section, 2nd xpath for document link)
"""
MASSGOV_DICT = {
'homeland-sec/grants/docs/':
[
UL_CATEGORY_LI,
'./h2/span/a[@class="titlelink"]'
],
'homeland-sec/grants/hs-grant-guidance-and-policies.html':
BODYFIELD_SPAN_XPATH,
'homeland-sec/grants/standard-documents.html':
[
'//div[contains(@class,"bodyfield")]/ul/li',
SPAN_A_TITLELINK
],
'law-enforce/grants/': CATEGORY_H2_XPATH,
'law-enforce/grants/2017-muni-public-safety-staffing-grant.html':
BODYFIELD_SPAN_XPATH,
'law-enforce/grants/le-grants-public-records.html':
BODYFIELD_SPAN_XPATH,
'justice-and-prev/grants/': CATEGORY_H2_XPATH,
'justice-and-prev/grants/bgp/': CATEGORY_H2_XPATH,
'hwy-safety/grants/': CATEGORY_H2_XPATH,
'hwy-safety/grants/ffy-2017-traffic-enforcement-grant-program.html':
BODYFIELD_SPAN_XPATH,
'hwy-safety/grants/ffy2017-hsd-grant-opportunities.html':
BODYFIELD_SPAN_XPATH,
'hwy-safety/grants/ffy-2017-step.html': BODYFIELD_SPAN_XPATH,
'hwy-safety/grants/highway-safety-grants-public-records.html':
BODYFIELD_SPAN_XPATH
}
|
RagtagOpen/bidwire
|
bidwire/scrapers/massgov/url_scraper_dict.py
|
Python
|
mit
| 1,717 | 0.008154 |
# orm/loading.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to convert database
rows into object instances and associated state.
the functions here are called primarily by Query, Mapper,
as well as some of the attribute loading strategies.
"""
from __future__ import absolute_import
import collections
from . import attributes
from . import exc as orm_exc
from . import path_registry
from . import strategy_options
from .base import _DEFER_FOR_STATE
from .base import _RAISE_FOR_STATE
from .base import _SET_DEFERRED_EXPIRED
from .util import _none_set
from .util import aliased
from .util import state_str
from .. import exc as sa_exc
from .. import util
from ..engine import result_tuple
from ..sql import util as sql_util
_new_runid = util.counter()
def instances(query, cursor, context):
"""Return an ORM result as an iterator."""
context.runid = _new_runid()
context.post_load_paths = {}
filtered = query._has_mapper_entities
single_entity = query.is_single_entity
if filtered:
if single_entity:
filter_fn = id
else:
def filter_fn(row):
return tuple(
id(item) if ent.use_id_for_hash else item
for ent, item in zip(query._entities, row)
)
try:
(process, labels, extra) = list(
zip(
*[
query_entity.row_processor(query, context, cursor)
for query_entity in query._entities
]
)
)
if not single_entity:
keyed_tuple = result_tuple(labels, extra)
while True:
context.partials = {}
if query._yield_per:
fetch = cursor.fetchmany(query._yield_per)
if not fetch:
break
else:
fetch = cursor.fetchall()
if single_entity:
proc = process[0]
rows = [proc(row) for row in fetch]
else:
rows = [
keyed_tuple([proc(row) for proc in process])
for row in fetch
]
for path, post_load in context.post_load_paths.items():
post_load.invoke(context, path)
if filtered:
rows = util.unique_list(rows, filter_fn)
for row in rows:
yield row
if not query._yield_per:
break
except Exception:
with util.safe_reraise():
cursor.close()
@util.preload_module("sqlalchemy.orm.query")
def merge_result(query, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session."""
querylib = util.preloaded.orm_query
session = query.session
if load:
# flush current contents if we expect to load data
session._autoflush()
autoflush = session.autoflush
try:
session.autoflush = False
single_entity = len(query._entities) == 1
if single_entity:
if isinstance(query._entities[0], querylib._MapperEntity):
result = [
session._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load,
_recursive={},
_resolve_conflict_map={},
)
for instance in iterator
]
else:
result = list(iterator)
else:
mapped_entities = [
i
for i, e in enumerate(query._entities)
if isinstance(e, querylib._MapperEntity)
]
result = []
keys = [ent._label_name for ent in query._entities]
keyed_tuple = result_tuple(
keys, [ent.entities for ent in query._entities]
)
for row in iterator:
newrow = list(row)
for i in mapped_entities:
if newrow[i] is not None:
newrow[i] = session._merge(
attributes.instance_state(newrow[i]),
attributes.instance_dict(newrow[i]),
load=load,
_recursive={},
_resolve_conflict_map={},
)
result.append(keyed_tuple(newrow))
return iter(result)
finally:
session.autoflush = autoflush
def get_from_identity(session, mapper, key, passive):
"""Look up the given key in the given session's identity map,
check the object for expired state if found.
"""
instance = session.identity_map.get(key)
if instance is not None:
state = attributes.instance_state(instance)
if mapper.inherits and not state.mapper.isa(mapper):
return attributes.PASSIVE_CLASS_MISMATCH
# expired - ensure it still exists
if state.expired:
if not passive & attributes.SQL_OK:
# TODO: no coverage here
return attributes.PASSIVE_NO_RESULT
elif not passive & attributes.RELATED_OBJECT_OK:
# this mode is used within a flush and the instance's
# expired state will be checked soon enough, if necessary
return instance
try:
state._load_expired(state, passive)
except orm_exc.ObjectDeletedError:
session._remove_newly_deleted([state])
return None
return instance
else:
return None
def load_on_ident(
query,
key,
refresh_state=None,
with_for_update=None,
only_load_props=None,
no_autoflush=False,
):
"""Load the given identity key from the database."""
if key is not None:
ident = key[1]
identity_token = key[2]
else:
ident = identity_token = None
if no_autoflush:
query = query.autoflush(False)
return load_on_pk_identity(
query,
ident,
refresh_state=refresh_state,
with_for_update=with_for_update,
only_load_props=only_load_props,
identity_token=identity_token,
)
def load_on_pk_identity(
query,
primary_key_identity,
refresh_state=None,
with_for_update=None,
only_load_props=None,
identity_token=None,
):
"""Load the given primary key identity from the database."""
if refresh_state is None:
q = query._clone()
q._get_condition()
else:
q = query._clone()
if primary_key_identity is not None:
mapper = query._mapper_zero()
(_get_clause, _get_params) = mapper._get_clause
# None present in ident - turn those comparisons
# into "IS NULL"
if None in primary_key_identity:
nones = set(
[
_get_params[col].key
for col, value in zip(
mapper.primary_key, primary_key_identity
)
if value is None
]
)
_get_clause = sql_util.adapt_criterion_to_null(_get_clause, nones)
if len(nones) == len(primary_key_identity):
util.warn(
"fully NULL primary key identity cannot load any "
"object. This condition may raise an error in a future "
"release."
)
_get_clause = q._adapt_clause(_get_clause, True, False)
q._criterion = _get_clause
params = dict(
[
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(
primary_key_identity, mapper.primary_key
)
]
)
q._params = params
# with_for_update needs to be query.LockmodeArg()
if with_for_update is not None:
version_check = True
q._for_update_arg = with_for_update
elif query._for_update_arg is not None:
version_check = True
q._for_update_arg = query._for_update_arg
else:
version_check = False
if refresh_state and refresh_state.load_options:
q = q._with_current_path(refresh_state.load_path.parent)
q = q._conditional_options(refresh_state.load_options)
q._get_options(
populate_existing=bool(refresh_state),
version_check=version_check,
only_load_props=only_load_props,
refresh_state=refresh_state,
identity_token=identity_token,
)
q._order_by = None
try:
return q.one()
except orm_exc.NoResultFound:
return None
def _setup_entity_query(
context,
mapper,
query_entity,
path,
adapter,
column_collection,
with_polymorphic=None,
only_load_props=None,
polymorphic_discriminator=None,
**kw
):
if with_polymorphic:
poly_properties = mapper._iterate_polymorphic_properties(
with_polymorphic
)
else:
poly_properties = mapper._polymorphic_properties
quick_populators = {}
path.set(context.attributes, "memoized_setups", quick_populators)
for value in poly_properties:
if only_load_props and value.key not in only_load_props:
continue
value.setup(
context,
query_entity,
path,
adapter,
only_load_props=only_load_props,
column_collection=column_collection,
memoized_populators=quick_populators,
**kw
)
if (
polymorphic_discriminator is not None
and polymorphic_discriminator is not mapper.polymorphic_on
):
if adapter:
pd = adapter.columns[polymorphic_discriminator]
else:
pd = polymorphic_discriminator
column_collection.append(pd)
def _warn_for_runid_changed(state):
util.warn(
"Loading context for %s has changed within a load/refresh "
"handler, suggesting a row refresh operation took place. If this "
"event handler is expected to be "
"emitting row refresh operations within an existing load or refresh "
"operation, set restore_load_context=True when establishing the "
"listener to ensure the context remains unchanged when the event "
"handler completes." % (state_str(state),)
)
def _instance_processor(
mapper,
context,
result,
path,
adapter,
only_load_props=None,
refresh_state=None,
polymorphic_discriminator=None,
_polymorphic_from=None,
):
"""Produce a mapper level row processor callable
which processes rows into mapped instances."""
# note that this method, most of which exists in a closure
# called _instance(), resists being broken out, as
# attempts to do so tend to add significant function
# call overhead. _instance() is the most
# performance-critical section in the whole ORM.
identity_class = mapper._identity_class
populators = collections.defaultdict(list)
props = mapper._prop_set
if only_load_props is not None:
props = props.intersection(mapper._props[k] for k in only_load_props)
quick_populators = path.get(
context.attributes, "memoized_setups", _none_set
)
for prop in props:
if prop in quick_populators:
# this is an inlined path just for column-based attributes.
col = quick_populators[prop]
if col is _DEFER_FOR_STATE:
populators["new"].append(
(prop.key, prop._deferred_column_loader)
)
elif col is _SET_DEFERRED_EXPIRED:
# note that in this path, we are no longer
# searching in the result to see if the column might
# be present in some unexpected way.
populators["expire"].append((prop.key, False))
elif col is _RAISE_FOR_STATE:
populators["new"].append((prop.key, prop._raise_column_loader))
else:
getter = None
# the "adapter" can be here via different paths,
# e.g. via adapter present at setup_query or adapter
# applied to the query afterwards via eager load subquery.
# If the column here
# were already a product of this adapter, sending it through
# the adapter again can return a totally new expression that
# won't be recognized in the result, and the ColumnAdapter
# currently does not accommodate for this. OTOH, if the
# column were never applied through this adapter, we may get
# None back, in which case we still won't get our "getter".
# so try both against result._getter(). See issue #4048
if adapter:
adapted_col = adapter.columns[col]
if adapted_col is not None:
getter = result._getter(adapted_col, False)
if not getter:
getter = result._getter(col, False)
if getter:
populators["quick"].append((prop.key, getter))
else:
# fall back to the ColumnProperty itself, which
# will iterate through all of its columns
# to see if one fits
prop.create_row_processor(
context, path, mapper, result, adapter, populators
)
else:
prop.create_row_processor(
context, path, mapper, result, adapter, populators
)
propagate_options = context.propagate_options
load_path = (
context.query._current_path + path
if context.query._current_path.path
else path
)
session_identity_map = context.session.identity_map
populate_existing = context.populate_existing or mapper.always_refresh
load_evt = bool(mapper.class_manager.dispatch.load)
refresh_evt = bool(mapper.class_manager.dispatch.refresh)
persistent_evt = bool(context.session.dispatch.loaded_as_persistent)
if persistent_evt:
loaded_as_persistent = context.session.dispatch.loaded_as_persistent
instance_state = attributes.instance_state
instance_dict = attributes.instance_dict
session_id = context.session.hash_key
runid = context.runid
identity_token = context.identity_token
version_check = context.version_check
if version_check:
version_id_col = mapper.version_id_col
if version_id_col is not None:
if adapter:
version_id_col = adapter.columns[version_id_col]
version_id_getter = result._getter(version_id_col)
else:
version_id_getter = None
if not refresh_state and _polymorphic_from is not None:
key = ("loader", path.path)
if key in context.attributes and context.attributes[key].strategy == (
("selectinload_polymorphic", True),
):
selectin_load_via = mapper._should_selectin_load(
context.attributes[key].local_opts["entities"],
_polymorphic_from,
)
else:
selectin_load_via = mapper._should_selectin_load(
None, _polymorphic_from
)
if selectin_load_via and selectin_load_via is not _polymorphic_from:
# only_load_props goes w/ refresh_state only, and in a refresh
# we are a single row query for the exact entity; polymorphic
# loading does not apply
assert only_load_props is None
callable_ = _load_subclass_via_in(context, path, selectin_load_via)
PostLoad.callable_for_path(
context,
load_path,
selectin_load_via.mapper,
selectin_load_via,
callable_,
selectin_load_via,
)
post_load = PostLoad.for_context(context, load_path, only_load_props)
if refresh_state:
refresh_identity_key = refresh_state.key
if refresh_identity_key is None:
# super-rare condition; a refresh is being called
# on a non-instance-key instance; this is meant to only
# occur within a flush()
refresh_identity_key = mapper._identity_key_from_state(
refresh_state
)
else:
refresh_identity_key = None
pk_cols = mapper.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
tuple_getter = result._tuple_getter(pk_cols, True)
if mapper.allow_partial_pks:
is_not_primary_key = _none_set.issuperset
else:
is_not_primary_key = _none_set.intersection
def _instance(row):
# determine the state that we'll be populating
if refresh_identity_key:
# fixed state that we're refreshing
state = refresh_state
instance = state.obj()
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = True
loaded_instance = False
else:
# look at the row, see if that identity is in the
# session, or we have to create a new one
identitykey = (identity_class, tuple_getter(row), identity_token)
instance = session_identity_map.get(identitykey)
if instance is not None:
# existing instance
state = instance_state(instance)
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = not isnew
loaded_instance = False
if version_check and version_id_getter and not currentload:
_validate_version_id(
mapper, state, dict_, row, version_id_getter
)
else:
# create a new instance
# check for non-NULL values in the primary key columns,
# else no entity is returned for the row
if is_not_primary_key(identitykey[1]):
return None
isnew = True
currentload = True
loaded_instance = True
instance = mapper.class_manager.new_instance()
dict_ = instance_dict(instance)
state = instance_state(instance)
state.key = identitykey
state.identity_token = identity_token
# attach instance to session.
state.session_id = session_id
session_identity_map._add_unpresent(state, identitykey)
# populate. this looks at whether this state is new
# for this load or was existing, and whether or not this
# row is the first row with this identity.
if currentload or populate_existing:
# full population routines. Objects here are either
# just created, or we are doing a populate_existing
# be conservative about setting load_path when populate_existing
# is in effect; want to maintain options from the original
# load. see test_expire->test_refresh_maintains_deferred_options
if isnew and (propagate_options or not populate_existing):
state.load_options = propagate_options
state.load_path = load_path
_populate_full(
context,
row,
state,
dict_,
isnew,
load_path,
loaded_instance,
populate_existing,
populators,
)
if isnew:
# state.runid should be equal to context.runid / runid
# here, however for event checks we are being more conservative
# and checking against existing run id
# assert state.runid == runid
existing_runid = state.runid
if loaded_instance:
if load_evt:
state.manager.dispatch.load(state, context)
if state.runid != existing_runid:
_warn_for_runid_changed(state)
if persistent_evt:
loaded_as_persistent(context.session, state)
if state.runid != existing_runid:
_warn_for_runid_changed(state)
elif refresh_evt:
state.manager.dispatch.refresh(
state, context, only_load_props
)
if state.runid != runid:
_warn_for_runid_changed(state)
if populate_existing or state.modified:
if refresh_state and only_load_props:
state._commit(dict_, only_load_props)
else:
state._commit_all(dict_, session_identity_map)
if post_load:
post_load.add_state(state, True)
else:
# partial population routines, for objects that were already
# in the Session, but a row matches them; apply eager loaders
# on existing objects, etc.
unloaded = state.unloaded
isnew = state not in context.partials
if not isnew or unloaded or populators["eager"]:
# state is having a partial set of its attributes
# refreshed. Populate those attributes,
# and add to the "context.partials" collection.
to_load = _populate_partial(
context,
row,
state,
dict_,
isnew,
load_path,
unloaded,
populators,
)
if isnew:
if refresh_evt:
existing_runid = state.runid
state.manager.dispatch.refresh(state, context, to_load)
if state.runid != existing_runid:
_warn_for_runid_changed(state)
state._commit(dict_, to_load)
if post_load and context.invoke_all_eagers:
post_load.add_state(state, False)
return instance
if mapper.polymorphic_map and not _polymorphic_from and not refresh_state:
# if we are doing polymorphic, dispatch to a different _instance()
# method specific to the subclass mapper
def ensure_no_pk(row):
identitykey = (
identity_class,
tuple_getter(row),
identity_token,
)
if not is_not_primary_key(identitykey[1]):
return identitykey
else:
return None
_instance = _decorate_polymorphic_switch(
_instance,
context,
mapper,
result,
path,
polymorphic_discriminator,
adapter,
ensure_no_pk,
)
return _instance
def _load_subclass_via_in(context, path, entity):
mapper = entity.mapper
zero_idx = len(mapper.base_mapper.primary_key) == 1
if entity.is_aliased_class:
q, enable_opt, disable_opt = mapper._subclass_load_via_in(entity)
else:
q, enable_opt, disable_opt = mapper._subclass_load_via_in_mapper
def do_load(context, path, states, load_only, effective_entity):
orig_query = context.query
q2 = q._with_lazyload_options(
(enable_opt,) + orig_query._with_options + (disable_opt,),
path.parent,
cache_path=path,
)
if orig_query._populate_existing:
q2.add_criteria(lambda q: q.populate_existing())
q2(context.session).params(
primary_keys=[
state.key[1][0] if zero_idx else state.key[1]
for state, load_attrs in states
]
).all()
return do_load
def _populate_full(
context,
row,
state,
dict_,
isnew,
load_path,
loaded_instance,
populate_existing,
populators,
):
if isnew:
# first time we are seeing a row with this identity.
state.runid = context.runid
for key, getter in populators["quick"]:
dict_[key] = getter(row)
if populate_existing:
for key, set_callable in populators["expire"]:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
else:
for key, set_callable in populators["expire"]:
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
populator(state, dict_, row)
elif load_path != state.load_path:
# new load path, e.g. object is present in more than one
# column position in a series of rows
state.load_path = load_path
# if we have data, and the data isn't in the dict, OK, let's put
# it in.
for key, getter in populators["quick"]:
if key not in dict_:
dict_[key] = getter(row)
# otherwise treat like an "already seen" row
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: allow "existing" populator to know this is
# a new path for the state:
# populator(state, dict_, row, new_path=True)
else:
# have already seen rows with this identity in this same path.
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: same path
# populator(state, dict_, row, new_path=False)
def _populate_partial(
context, row, state, dict_, isnew, load_path, unloaded, populators
):
if not isnew:
to_load = context.partials[state]
for key, populator in populators["existing"]:
if key in to_load:
populator(state, dict_, row)
else:
to_load = unloaded
context.partials[state] = to_load
for key, getter in populators["quick"]:
if key in to_load:
dict_[key] = getter(row)
for key, set_callable in populators["expire"]:
if key in to_load:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["eager"]:
if key not in unloaded:
populator(state, dict_, row)
return to_load
def _validate_version_id(mapper, state, dict_, row, getter):
if mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col
) != getter(row):
raise orm_exc.StaleDataError(
"Instance '%s' has version id '%s' which "
"does not match database-loaded version id '%s'."
% (
state_str(state),
mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col
),
getter(row),
)
)
def _decorate_polymorphic_switch(
instance_fn,
context,
mapper,
result,
path,
polymorphic_discriminator,
adapter,
ensure_no_pk,
):
if polymorphic_discriminator is not None:
polymorphic_on = polymorphic_discriminator
else:
polymorphic_on = mapper.polymorphic_on
if polymorphic_on is None:
return instance_fn
if adapter:
polymorphic_on = adapter.columns[polymorphic_on]
def configure_subclass_mapper(discriminator):
try:
sub_mapper = mapper.polymorphic_map[discriminator]
except KeyError:
raise AssertionError(
"No such polymorphic_identity %r is defined" % discriminator
)
else:
if sub_mapper is mapper:
return None
elif not sub_mapper.isa(mapper):
return False
return _instance_processor(
sub_mapper,
context,
result,
path,
adapter,
_polymorphic_from=mapper,
)
polymorphic_instances = util.PopulateDict(configure_subclass_mapper)
getter = result._getter(polymorphic_on)
def polymorphic_instance(row):
discriminator = getter(row)
if discriminator is not None:
_instance = polymorphic_instances[discriminator]
if _instance:
return _instance(row)
elif _instance is False:
identitykey = ensure_no_pk(row)
if identitykey:
raise sa_exc.InvalidRequestError(
"Row with identity key %s can't be loaded into an "
"object; the polymorphic discriminator column '%s' "
"refers to %s, which is not a sub-mapper of "
"the requested %s"
% (
identitykey,
polymorphic_on,
mapper.polymorphic_map[discriminator],
mapper,
)
)
else:
return None
else:
return instance_fn(row)
else:
identitykey = ensure_no_pk(row)
if identitykey:
raise sa_exc.InvalidRequestError(
"Row with identity key %s can't be loaded into an "
"object; the polymorphic discriminator column '%s' is "
"NULL" % (identitykey, polymorphic_on)
)
else:
return None
return polymorphic_instance
class PostLoad(object):
"""Track loaders and states for "post load" operations.
"""
__slots__ = "loaders", "states", "load_keys"
def __init__(self):
self.loaders = {}
self.states = util.OrderedDict()
self.load_keys = None
def add_state(self, state, overwrite):
# the states for a polymorphic load here are all shared
# within a single PostLoad object among multiple subtypes.
# Filtering of callables on a per-subclass basis needs to be done at
# the invocation level
self.states[state] = overwrite
def invoke(self, context, path):
if not self.states:
return
path = path_registry.PathRegistry.coerce(path)
for token, limit_to_mapper, loader, arg, kw in self.loaders.values():
states = [
(state, overwrite)
for state, overwrite in self.states.items()
if state.manager.mapper.isa(limit_to_mapper)
]
if states:
loader(context, path, states, self.load_keys, *arg, **kw)
self.states.clear()
@classmethod
def for_context(cls, context, path, only_load_props):
pl = context.post_load_paths.get(path.path)
if pl is not None and only_load_props:
pl.load_keys = only_load_props
return pl
@classmethod
def path_exists(self, context, path, key):
return (
path.path in context.post_load_paths
and key in context.post_load_paths[path.path].loaders
)
@classmethod
def callable_for_path(
cls, context, path, limit_to_mapper, token, loader_callable, *arg, **kw
):
if path.path in context.post_load_paths:
pl = context.post_load_paths[path.path]
else:
pl = context.post_load_paths[path.path] = PostLoad()
pl.loaders[token] = (token, limit_to_mapper, loader_callable, arg, kw)
def load_scalar_attributes(mapper, state, attribute_names, passive):
"""initiate a column-based attribute refresh operation."""
# assert mapper is _state_mapper(state)
session = state.session
if not session:
raise orm_exc.DetachedInstanceError(
"Instance %s is not bound to a Session; "
"attribute refresh operation cannot proceed" % (state_str(state))
)
has_key = bool(state.key)
result = False
no_autoflush = passive & attributes.NO_AUTOFLUSH
# in the case of inheritance, particularly concrete and abstract
# concrete inheritance, the class manager might have some keys
# of attributes on the superclass that we didn't actually map.
# These could be mapped as "concrete, dont load" or could be completely
# exluded from the mapping and we know nothing about them. Filter them
# here to prevent them from coming through.
if attribute_names:
attribute_names = attribute_names.intersection(mapper.attrs.keys())
if mapper.inherits and not mapper.concrete:
# because we are using Core to produce a select() that we
# pass to the Query, we aren't calling setup() for mapped
# attributes; in 1.0 this means deferred attrs won't get loaded
# by default
statement = mapper._optimized_get_statement(state, attribute_names)
if statement is not None:
wp = aliased(mapper, statement)
result = load_on_ident(
session.query(wp)
.options(strategy_options.Load(wp).undefer("*"))
.from_statement(statement),
None,
only_load_props=attribute_names,
refresh_state=state,
no_autoflush=no_autoflush,
)
if result is False:
if has_key:
identity_key = state.key
else:
# this codepath is rare - only valid when inside a flush, and the
# object is becoming persistent but hasn't yet been assigned
# an identity_key.
# check here to ensure we have the attrs we need.
pk_attrs = [
mapper._columntoproperty[col].key for col in mapper.primary_key
]
if state.expired_attributes.intersection(pk_attrs):
raise sa_exc.InvalidRequestError(
"Instance %s cannot be refreshed - it's not "
" persistent and does not "
"contain a full primary key." % state_str(state)
)
identity_key = mapper._identity_key_from_state(state)
if (
_none_set.issubset(identity_key) and not mapper.allow_partial_pks
) or _none_set.issuperset(identity_key):
util.warn_limited(
"Instance %s to be refreshed doesn't "
"contain a full primary key - can't be refreshed "
"(and shouldn't be expired, either).",
state_str(state),
)
return
result = load_on_ident(
session.query(mapper),
identity_key,
refresh_state=state,
only_load_props=attribute_names,
no_autoflush=no_autoflush,
)
# if instance is pending, a refresh operation
# may not complete (even if PK attributes are assigned)
if has_key and result is None:
raise orm_exc.ObjectDeletedError(state)
|
graingert/sqlalchemy
|
lib/sqlalchemy/orm/loading.py
|
Python
|
mit
| 36,424 | 0 |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from datetime import (
datetime,
timedelta,
)
from dateutil.relativedelta import relativedelta
from base.tests.model_maker import clean_and_save
from booking.models import (
Booking,
Category,
Location,
)
def get_alpe_d_huez():
return Booking.objects.get(title='Alpe D Huez')
def make_booking(start_date, end_date, title, **kwargs):
defaults = dict(
start_date=start_date,
end_date=end_date,
title=title,
)
defaults.update(kwargs)
return clean_and_save(Booking(**defaults))
def make_booking_in_past(start_date, end_date, title):
"""Save a booking without cleaning (validating) the data."""
b = Booking(**dict(
start_date=start_date,
end_date=end_date,
title=title,
))
b.save()
return b
def next_weekday(d, weekday):
"""Find the date for the next weekday.
Copied from:
http://stackoverflow.com/questions/6558535/python-find-the-date-for-the-first-monday-after-a-given-a-date
"""
days_ahead = weekday - d.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7
return d + timedelta(days_ahead)
def demo_data():
# set-up some dates
today = datetime.today().date()
# 1st week last month starting Saturday
first_prev_month = today + relativedelta(months=-1, day=1)
start_date = next_weekday(first_prev_month, 5)
end_date = start_date + timedelta(days=7)
make_booking_in_past(start_date, end_date, 'Tignes')
# 2nd week last month
make_booking_in_past(end_date, end_date + timedelta(days=7), 'Meribel')
# 1st week this month starting Saturday
first_this_month = today + relativedelta(day=1)
start_date = next_weekday(first_this_month, 5)
make_booking_in_past(start_date, start_date + timedelta(days=3), 'Whistler')
# later this month starting Tuesday
start_date = next_weekday(first_this_month + timedelta(days=10), 1)
make_booking_in_past(start_date, start_date + timedelta(days=3), 'Dorset')
# span this and next month
start_date = datetime(today.year, today.month, 27).date()
first_next_month = today + relativedelta(months=+1, day=1)
end_date = datetime(first_next_month.year, first_next_month.month, 2).date()
make_booking_in_past(start_date, end_date, 'Devon')
# next month
start_date = next_weekday(first_next_month + timedelta(days=3), 2)
end_date = next_weekday(start_date, 5)
make_booking(start_date, end_date, 'Alpe D Huez')
make_booking(end_date, end_date + timedelta(days=4), 'Cornwall')
# misc
Category.objects.create_category('Meeting')
Location.objects.create_location('Community Centre')
|
pkimber/booking
|
booking/tests/scenario.py
|
Python
|
apache-2.0
| 2,769 | 0.001083 |
# Rest Imports
from rest_framework import status
# Local Imports
from ava_core.abstract.test_data import AvaCoreTestData
from ava_core.integration.integration_ldap.models import LDAPIntegrationAdapter
# Implementation
class LDAPIntegrationAdapterTestData(AvaCoreTestData):
"""
Test data for LDAPIntegrationAdapter
"""
@staticmethod
def init_requirements():
# Import the required model and data
from ava_core.gather.gather_ldap.models import LDAPGatherHistory
from ava_core.gather.gather_ldap.test_data import LDAPGatherHistoryTestData
# Check that requirements haven't already been created.
# True - Create necessary requirements.
if LDAPGatherHistory.objects.count() == 0:
LDAPGatherHistoryTestData.init_requirements()
model = LDAPGatherHistory.objects.create(**LDAPGatherHistoryTestData.get_data('standard'))
model.save()
model = LDAPGatherHistory.objects.create(**LDAPGatherHistoryTestData.get_data('unique'))
model.save()
# Import the required model and data
from ava_core.integration.integration_abstract.models import IntegrationAdapter
from ava_core.integration.integration_abstract.test_data import IntegrationAdapterTestData
# Check that requirements haven't already been created.
# True - Create necessary requirements.
if IntegrationAdapter.objects.count() == 0:
IntegrationAdapterTestData.init_requirements()
model = IntegrationAdapter.objects.create(**IntegrationAdapterTestData.get_data('standard'))
model.save()
model = IntegrationAdapter.objects.create(**IntegrationAdapterTestData.get_data('unique'))
model.save()
# Store self information
model = LDAPIntegrationAdapter
url = '/example'
standard = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
unique = {
'ldap_password': 'unique_char',
'salt': 'unique_char',
'dump_dn': 'unique_char',
'ldap_user': 'unique_char',
'ldap_integration_history': '/example/2/',
'integrationadapter_ptr': 'default',
'server': 'unique_char',
}
missing_ldap_password = {
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_ldap_password = {
'ldap_password': 'modified_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_salt = {
'ldap_password': 'standard_char',
'salt': 'modified_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
missing_salt = {
'ldap_password': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_dump_dn = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'modified_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
missing_dump_dn = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
missing_ldap_user = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_ldap_user = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'modified_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_ldap_integration_history = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/2/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
missing_ldap_integration_history = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
modified_integrationadapter_ptr = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'standard_char',
}
missing_integrationadapter_ptr = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'server': 'standard_char',
}
missing_server = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
}
modified_server = {
'ldap_password': 'standard_char',
'salt': 'standard_char',
'dump_dn': 'standard_char',
'ldap_user': 'standard_char',
'ldap_integration_history': '/example/1/',
'integrationadapter_ptr': 'default',
'server': 'modified_char',
}
|
alzeih/ava
|
ava_core/integration/integration_ldap/test_data.py
|
Python
|
gpl-3.0
| 6,603 | 0.001212 |
"""Group Anagrams
Given an array of strings, group anagrams together.
Example:
Input:
["eat", "tea", "tan", "ate", "nat", "bat"]
Output:
[
["ate","eat","tea"],
["nat","tan"],
["bat"]
]
Note:
All inputs will be in lowercase.
The order of your output does not matter.
"""
from typing import List
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
store = {}
for item in strs:
key = ''.join(sorted(item))
if key in store:
store[key].append(item)
else:
store[key] = [item]
return store.values()
if __name__ == '__main__':
cases = [
(
["eat", "tea", "tan", "ate", "nat", "bat"],
[
["ate", "eat", "tea"],
["nat", "tan"],
["bat"]
]
),
] # yapf: disable
for case in cases:
for S in [Solution]:
result = S().groupAnagrams(case[0])
for l in case[1]:
for item in l:
found = False
for ll in result:
if item in ll:
found = True
assert found
|
aiden0z/snippets
|
leetcode/049_group_anagrams.py
|
Python
|
mit
| 1,315 | 0.00076 |
class Content(object):
def __init__(self, type_=None, value=None):
self._type = None
self._value = None
if type_ is not None:
self.type = type_
if value is not None:
self.value = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def get(self):
content = {}
if self.type is not None:
content["type"] = self.type
if self.value is not None:
content["value"] = self.value
return content
|
galihmelon/sendgrid-python
|
sendgrid/helpers/mail/content.py
|
Python
|
mit
| 735 | 0 |
"""
Implement the command-line tool interface.
"""
from __future__ import unicode_literals
import argparse
import os
import sys
import diff_cover
from diff_cover.diff_reporter import GitDiffReporter
from diff_cover.git_diff import GitDiffTool
from diff_cover.git_path import GitPathTool
from diff_cover.violations_reporter import (
XmlCoverageReporter, Pep8QualityReporter,
PyflakesQualityReporter, PylintQualityReporter
)
from diff_cover.report_generator import (
HtmlReportGenerator, StringReportGenerator,
HtmlQualityReportGenerator, StringQualityReportGenerator
)
from lxml import etree
import six
COVERAGE_XML_HELP = "XML coverage report"
HTML_REPORT_HELP = "Diff coverage HTML output"
COMPARE_BRANCH_HELP = "Branch to compare"
VIOLATION_CMD_HELP = "Which code quality tool to use"
INPUT_REPORTS_HELP = "Pep8, pyflakes or pylint reports to use"
OPTIONS_HELP = "Options to be passed to the violations tool"
QUALITY_REPORTERS = {
'pep8': Pep8QualityReporter,
'pyflakes': PyflakesQualityReporter,
'pylint': PylintQualityReporter
}
import logging
LOGGER = logging.getLogger(__name__)
def parse_coverage_args(argv):
"""
Parse command line arguments, returning a dict of
valid options:
{
'coverage_xml': COVERAGE_XML,
'html_report': None | HTML_REPORT
}
where `COVERAGE_XML` is a path, and `HTML_REPORT` is a path.
The path strings may or may not exist.
"""
parser = argparse.ArgumentParser(description=diff_cover.DESCRIPTION)
parser.add_argument(
'coverage_xml',
type=str,
help=COVERAGE_XML_HELP,
nargs='+'
)
parser.add_argument(
'--html-report',
type=str,
default=None,
help=HTML_REPORT_HELP
)
parser.add_argument(
'--compare-branch',
type=str,
default='origin/master',
help=COMPARE_BRANCH_HELP
)
return vars(parser.parse_args(argv))
def parse_quality_args(argv):
"""
Parse command line arguments, returning a dict of
valid options:
{
'violations': pep8 | pyflakes | pylint
'html_report': None | HTML_REPORT
}
where `HTML_REPORT` is a path.
"""
parser = argparse.ArgumentParser(
description=diff_cover.QUALITY_DESCRIPTION
)
parser.add_argument(
'--violations',
type=str,
help=VIOLATION_CMD_HELP,
required=True
)
parser.add_argument(
'--html-report',
type=str,
default=None,
help=HTML_REPORT_HELP
)
parser.add_argument(
'--compare-branch',
type=str,
default='origin/master',
help=COMPARE_BRANCH_HELP
)
parser.add_argument(
'input_reports',
type=str,
nargs="*",
default=[],
help=INPUT_REPORTS_HELP
)
parser.add_argument(
'--options',
type=str,
nargs='?',
default=None,
help=OPTIONS_HELP
)
return vars(parser.parse_args(argv))
def generate_coverage_report(coverage_xml, compare_branch, html_report=None):
"""
Generate the diff coverage report, using kwargs from `parse_args()`.
"""
diff = GitDiffReporter(compare_branch, git_diff=GitDiffTool())
xml_roots = [etree.parse(xml_root) for xml_root in coverage_xml]
coverage = XmlCoverageReporter(xml_roots)
# Build a report generator
if html_report is not None:
reporter = HtmlReportGenerator(coverage, diff)
with open(html_report, "wb") as output_file:
reporter.generate_report(output_file)
reporter = StringReportGenerator(coverage, diff)
output_file = sys.stdout if six.PY2 else sys.stdout.buffer
# Generate the report
reporter.generate_report(output_file)
def generate_quality_report(tool, compare_branch, html_report=None):
"""
Generate the quality report, using kwargs from `parse_args()`.
"""
diff = GitDiffReporter(compare_branch, git_diff=GitDiffTool())
if html_report is not None:
reporter = HtmlQualityReportGenerator(tool, diff)
output_file = open(html_report, "wb")
else:
reporter = StringQualityReportGenerator(tool, diff)
output_file = sys.stdout if six.PY2 else sys.stdout.buffer
reporter.generate_report(output_file)
def main():
"""
Main entry point for the tool, used by setup.py
"""
progname = sys.argv[0]
# Init the path tool to work with the current directory
try:
cwd = os.getcwdu()
except AttributeError:
cwd = os.getcwd()
GitPathTool.set_cwd(cwd)
if progname.endswith('diff-cover'):
arg_dict = parse_coverage_args(sys.argv[1:])
generate_coverage_report(
arg_dict['coverage_xml'],
arg_dict['compare_branch'],
html_report=arg_dict['html_report'],
)
elif progname.endswith('diff-quality'):
arg_dict = parse_quality_args(sys.argv[1:])
tool = arg_dict['violations']
user_options = arg_dict.get('options')
if user_options:
user_options = user_options[1:-1] # Strip quotes
reporter_class = QUALITY_REPORTERS.get(tool)
if reporter_class is not None:
# If we've been given pre-generated reports,
# try to open the files
input_reports = []
for path in arg_dict['input_reports']:
try:
input_reports.append(open(path, 'rb'))
except IOError:
LOGGER.warning("Could not load '{0}'".format(path))
try:
reporter = reporter_class(tool, input_reports, user_options=user_options)
generate_quality_report(
reporter,
arg_dict['compare_branch'],
arg_dict['html_report']
)
# Close any reports we opened
finally:
for file_handle in input_reports:
file_handle.close()
else:
LOGGER.error("Quality tool not recognized: '{0}'".format(tool))
exit(1)
if __name__ == "__main__":
main()
|
hugovk/diff-cover
|
diff_cover/tool.py
|
Python
|
agpl-3.0
| 6,230 | 0.000482 |
"""
Support the OwnTracks platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.owntracks/
"""
import json
import logging
import threading
from collections import defaultdict
import homeassistant.components.mqtt as mqtt
from homeassistant.const import STATE_HOME
from homeassistant.util import convert, slugify
DEPENDENCIES = ['mqtt']
REGIONS_ENTERED = defaultdict(list)
MOBILE_BEACONS_ACTIVE = defaultdict(list)
BEACON_DEV_ID = 'beacon'
LOCATION_TOPIC = 'owntracks/+/+'
EVENT_TOPIC = 'owntracks/+/+/event'
_LOGGER = logging.getLogger(__name__)
LOCK = threading.Lock()
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
def setup_scanner(hass, config, see):
"""Setup an OwnTracks tracker."""
max_gps_accuracy = config.get(CONF_MAX_GPS_ACCURACY)
def validate_payload(payload, data_type):
"""Validate OwnTracks payload."""
try:
data = json.loads(payload)
except ValueError:
# If invalid JSON
_LOGGER.error('Unable to parse payload as JSON: %s', payload)
return None
if not isinstance(data, dict) or data.get('_type') != data_type:
_LOGGER.debug('Skipping %s update for following data '
'because of missing or malformatted data: %s',
data_type, data)
return None
if max_gps_accuracy is not None and \
convert(data.get('acc'), float, 0.0) > max_gps_accuracy:
_LOGGER.debug('Skipping %s update because expected GPS '
'accuracy %s is not met: %s',
data_type, max_gps_accuracy, data)
return None
if convert(data.get('acc'), float, 1.0) == 0.0:
_LOGGER.debug('Skipping %s update because GPS accuracy'
'is zero',
data_type)
return None
return data
def owntracks_location_update(topic, payload, qos):
"""MQTT message received."""
# Docs on available data:
# http://owntracks.org/booklet/tech/json/#_typelocation
data = validate_payload(payload, 'location')
if not data:
return
dev_id, kwargs = _parse_see_args(topic, data)
# Block updates if we're in a region
with LOCK:
if REGIONS_ENTERED[dev_id]:
_LOGGER.debug(
"location update ignored - inside region %s",
REGIONS_ENTERED[-1])
return
see(**kwargs)
see_beacons(dev_id, kwargs)
def owntracks_event_update(topic, payload, qos):
"""MQTT event (geofences) received."""
# Docs on available data:
# http://owntracks.org/booklet/tech/json/#_typetransition
data = validate_payload(payload, 'transition')
if not data:
return
if data.get('desc') is None:
_LOGGER.error(
"Location missing from `Entering/Leaving` message - "
"please turn `Share` on in OwnTracks app")
return
# OwnTracks uses - at the start of a beacon zone
# to switch on 'hold mode' - ignore this
location = slugify(data['desc'].lstrip("-"))
if location.lower() == 'home':
location = STATE_HOME
dev_id, kwargs = _parse_see_args(topic, data)
def enter_event():
"""Execute enter event."""
zone = hass.states.get("zone.{}".format(location))
with LOCK:
if zone is None and data.get('t') == 'b':
# Not a HA zone, and a beacon so assume mobile
beacons = MOBILE_BEACONS_ACTIVE[dev_id]
if location not in beacons:
beacons.append(location)
_LOGGER.info("Added beacon %s", location)
else:
# Normal region
regions = REGIONS_ENTERED[dev_id]
if location not in regions:
regions.append(location)
_LOGGER.info("Enter region %s", location)
_set_gps_from_zone(kwargs, location, zone)
see(**kwargs)
see_beacons(dev_id, kwargs)
def leave_event():
"""Execute leave event."""
with LOCK:
regions = REGIONS_ENTERED[dev_id]
if location in regions:
regions.remove(location)
new_region = regions[-1] if regions else None
if new_region:
# Exit to previous region
zone = hass.states.get("zone.{}".format(new_region))
_set_gps_from_zone(kwargs, new_region, zone)
_LOGGER.info("Exit to %s", new_region)
see(**kwargs)
see_beacons(dev_id, kwargs)
else:
_LOGGER.info("Exit to GPS")
# Check for GPS accuracy
if not ('acc' in data and
max_gps_accuracy is not None and
data['acc'] > max_gps_accuracy):
see(**kwargs)
see_beacons(dev_id, kwargs)
else:
_LOGGER.info("Inaccurate GPS reported")
beacons = MOBILE_BEACONS_ACTIVE[dev_id]
if location in beacons:
beacons.remove(location)
_LOGGER.info("Remove beacon %s", location)
if data['event'] == 'enter':
enter_event()
elif data['event'] == 'leave':
leave_event()
else:
_LOGGER.error(
'Misformatted mqtt msgs, _type=transition, event=%s',
data['event'])
return
def see_beacons(dev_id, kwargs_param):
"""Set active beacons to the current location."""
kwargs = kwargs_param.copy()
# the battery state applies to the tracking device, not the beacon
kwargs.pop('battery', None)
for beacon in MOBILE_BEACONS_ACTIVE[dev_id]:
kwargs['dev_id'] = "{}_{}".format(BEACON_DEV_ID, beacon)
kwargs['host_name'] = beacon
see(**kwargs)
mqtt.subscribe(hass, LOCATION_TOPIC, owntracks_location_update, 1)
mqtt.subscribe(hass, EVENT_TOPIC, owntracks_event_update, 1)
return True
def _parse_see_args(topic, data):
"""Parse the OwnTracks location parameters, into the format see expects."""
parts = topic.split('/')
dev_id = slugify('{}_{}'.format(parts[1], parts[2]))
host_name = parts[1]
kwargs = {
'dev_id': dev_id,
'host_name': host_name,
'gps': (data['lat'], data['lon'])
}
if 'acc' in data:
kwargs['gps_accuracy'] = data['acc']
if 'batt' in data:
kwargs['battery'] = data['batt']
return dev_id, kwargs
def _set_gps_from_zone(kwargs, location, zone):
"""Set the see parameters from the zone parameters."""
if zone is not None:
kwargs['gps'] = (
zone.attributes['latitude'],
zone.attributes['longitude'])
kwargs['gps_accuracy'] = zone.attributes['radius']
kwargs['location_name'] = location
return kwargs
|
mikaelboman/home-assistant
|
homeassistant/components/device_tracker/owntracks.py
|
Python
|
mit
| 7,427 | 0 |
# Consider a row of n coins of values v1 . . . vn, where n is even.
# We play a game against an opponent by alternating turns. In each turn,
# a player selects either the first or last coin from the row, removes it
# from the row permanently, and receives the value of the coin. Determine the
# maximum possible amount of money we can definitely win if we move first.
# Note: The opponent is as clever as the user.
# http://www.geeksforgeeks.org/dynamic-programming-set-31-optimal-strategy-for-a-game/
def find_max_val_recur(coins,l,r):
if l + 1 == r:
return max(coins[l],coins[r])
if l == r:
return coins[i]
left_choose = coins[l] + min(find_max_val_recur(coins,l+1,r - 1),find_max_val_recur(coins,l+2,r))
right_choose = coins[r] + min(find_max_val_recur(coins,l + 1,r-1),find_max_val_recur(coins,l,r-2))
return max(left_choose,right_choose)
coin_map = {}
def find_max_val_memo(coins,l,r):
if l + 1 == r:
return max(coins[l],coins[r])
if l == r:
return coins[i]
if (l,r) in coin_map:
return coin_map[(l,r)]
left_choose = coins[l] + min(find_max_val_memo(coins,l+1,r - 1),find_max_val_memo(coins,l+2,r))
right_choose = coins[r] + min(find_max_val_memo(coins,l + 1,r-1),find_max_val_memo(coins,l,r-2))
max_val = max(left_choose,right_choose)
coin_map[(l,r)] = max_val
return max_val
def find_max_val_bottom_up(coins):
coins_len = len(coins)
table = [[0] * coins_len for i in range(coins_len + 1)]
for gap in range(coins_len):
i = 0
for j in range(gap,coins_len):
# Here x is value of F(i+2, j), y is F(i+1, j-1) and
# z is F(i, j-2) in above recursive formula
x = table[i+2][j] if (i+2) <= j else 0
y = table[i+1][j-1] if (i+1) <= (j-1) else 0
z = table[i][j-2] if i <= (j-2) else 0
table[i][j] = max(coins[i] + min(x,y),coins[j] + min(y,z))
i += 1
return table[0][coins_len - 1]
if __name__=="__main__":
coins = [8,15,3,7]
print(find_max_val_bottom_up(coins))
|
bkpathak/Algorithms-collections
|
src/DP/coin_play.py
|
Python
|
apache-2.0
| 2,084 | 0.024952 |
# Yith Library Server is a password storage server.
# Copyright (C) 2012-2013 Yaco Sistemas
# Copyright (C) 2012-2013 Alejandro Blanco Escudero <alejandro.b.e@gmail.com>
# Copyright (C) 2012-2015 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import logging
from deform import Button, Form, ValidationFailure
from pyramid.i18n import get_locale_name
from pyramid.httpexceptions import HTTPFound
from pyramid.renderers import render_to_response
from pyramid.view import view_config
from yithlibraryserver.email import send_email_to_admins
from yithlibraryserver.i18n import TranslationString as _
from yithlibraryserver.schemas import ContactSchema
log = logging.getLogger(__name__)
@view_config(route_name='home', renderer='templates/home.pt')
def home(request):
return {}
@view_config(route_name='contact', renderer='templates/contact.pt')
def contact(request):
button1 = Button('submit', _('Send message'))
button1.css_class = 'btn-primary'
button2 = Button('cancel', _('Cancel'))
button2.css_class = 'btn-default'
form = Form(ContactSchema(), buttons=(button1, button2))
if 'submit' in request.POST:
controls = request.POST.items()
try:
appstruct = form.validate(controls)
except ValidationFailure as e:
return {'form': e.render()}
context = {'link': request.route_url('contact')}
context.update(appstruct)
subject = ("%s sent a message from Yith's contact form"
% appstruct['name'])
result = send_email_to_admins(
request,
'yithlibraryserver:templates/email_contact',
context,
subject,
extra_headers={'Reply-To': appstruct['email']},
)
if result is None:
log.error(
'%s <%s> tried to send a message from the contact form but no '
'admin emails were configured. Message: %s' % (
appstruct['name'],
appstruct['email'],
appstruct['message'],
)
)
request.session.flash(
_('Thank you very much for sharing your opinion'),
'info',
)
return HTTPFound(location=request.route_path('home'))
elif 'cancel' in request.POST:
return HTTPFound(location=request.route_path('home'))
initial = {}
if request.user is not None:
initial['name'] = request.user.first_name
if request.user.email_verified:
initial['email'] = request.user.email
return {'form': form.render(initial)}
@view_config(route_name='tos', renderer='templates/tos.pt')
def tos(request):
return {}
@view_config(route_name='faq', renderer='string')
def faq(request):
# We don't want to mess up the gettext .po file
# with a lot of strings which don't belong to the
# application interface.
#
# We consider the FAQ as application content
# so we simple use a different template for each
# language. When a new locale is added to the
# application it needs to translate the .po files
# as well as this template
locale_name = get_locale_name(request)
template = 'yithlibraryserver:templates/faq-%s.pt' % locale_name
return render_to_response(template, {}, request=request)
@view_config(route_name='credits', renderer='templates/credits.pt')
def credits(request):
return {}
|
lorenzogil/yith-library-server
|
yithlibraryserver/views.py
|
Python
|
agpl-3.0
| 4,160 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2019-03-05 14:58
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('database', '0020_daymetdata'),
]
operations = [
migrations.CreateModel(
name='PublicProjects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('List', models.TextField(blank=True)),
],
),
migrations.RemoveField(
model_name='daymetdata',
name='id',
),
migrations.AddField(
model_name='userprofile',
name='privateProjectList',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='daymetdata',
name='user',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
manterd/myPhyloDB
|
database/migrations/0021_auto_20190305_1458.py
|
Python
|
gpl-3.0
| 1,211 | 0.001652 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Optional
import attr
import papermill as pm
from airflow.lineage.entities import File
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
@attr.s(auto_attribs=True)
class NoteBook(File):
"""
Jupyter notebook
"""
type_hint: Optional[str] = "jupyter_notebook"
parameters: Optional[Dict] = {}
meta_schema: str = __name__ + '.NoteBook'
class PapermillOperator(BaseOperator):
"""
Executes a jupyter notebook through papermill that is annotated with parameters
:param input_nb: input notebook (can also be a NoteBook or a File inlet)
:type input_nb: str
:param output_nb: output notebook (can also be a NoteBook or File outlet)
:type output_nb: str
:param parameters: the notebook parameters to set
:type parameters: dict
"""
supports_lineage = True
@apply_defaults
def __init__(self,
input_nb: Optional[str] = None,
output_nb: Optional[str] = None,
parameters: Optional[Dict] = None,
*args, **kwargs) -> None:
super().__init__(*args, **kwargs)
if input_nb:
self.inlets.append(NoteBook(url=input_nb,
parameters=parameters))
if output_nb:
self.outlets.append(NoteBook(url=output_nb))
def execute(self, context):
if not self.inlets or not self.outlets:
raise ValueError("Input notebook or output notebook is not specified")
for i in range(len(self.inlets)):
pm.execute_notebook(self.inlets[i].url, self.outlets[i].url,
parameters=self.inlets[i].parameters,
progress_bar=False, report_mode=True)
|
wooga/airflow
|
airflow/providers/papermill/operators/papermill.py
|
Python
|
apache-2.0
| 2,587 | 0.000773 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import sys
import threading
from setproctitle import setproctitle as set_process_title
from pants.goal.run_tracker import RunTracker
from pants.logging.setup import setup_logging
from pants.pantsd.process_manager import ProcessManager
from pants.pantsd.util import clean_global_runtime_state
class _StreamLogger(object):
"""A sys.{stdout,stderr} replacement that pipes output to a logger."""
def __init__(self, logger, log_level):
"""
:param logging.Logger logger: The logger instance to emit writes to.
:param int log_level: The log level to use for the given logger.
"""
self._logger = logger
self._log_level = log_level
def write(self, msg):
for line in msg.rstrip().splitlines():
self._logger.log(self._log_level, line.rstrip())
def flush(self):
return
class PantsDaemon(ProcessManager):
"""A daemon that manages PantsService instances."""
JOIN_TIMEOUT_SECONDS = 1
LOG_NAME = 'pantsd.log'
class StartupFailure(Exception): pass
class RuntimeFailure(Exception): pass
def __init__(self, build_root, work_dir, log_level, log_dir=None, services=None,
metadata_base_dir=None):
"""
:param string build_root: The pants build root.
:param string work_dir: The pants work directory.
:param int log_level: The log level to use for daemon logging.
:param string log_dir: The directory to use for file-based logging via the daemon. (Optional)
:param tuple services: A tuple of PantsService instances to launch/manage. (Optional)
"""
super(PantsDaemon, self).__init__(name='pantsd', metadata_base_dir=metadata_base_dir)
self._logger = logging.getLogger(__name__)
self._build_root = build_root
self._work_dir = work_dir
self._log_level = log_level
self._log_dir = log_dir or os.path.join(work_dir, self.name)
self._services = services or ()
self._socket_map = {}
# N.B. This Event is used as nothing more than a convenient atomic flag - nothing waits on it.
self._kill_switch = threading.Event()
@property
def is_killed(self):
return self._kill_switch.is_set()
def set_services(self, services):
self._services = services
def set_socket_map(self, socket_map):
self._socket_map = socket_map
def shutdown(self, service_thread_map):
"""Gracefully terminate all services and kill the main PantsDaemon loop."""
for service, service_thread in service_thread_map.items():
self._logger.info('terminating pantsd service: {}'.format(service))
service.terminate()
service_thread.join()
self._logger.info('terminating pantsd')
self._kill_switch.set()
@staticmethod
def _close_fds():
"""Close pre-fork stdio streams to avoid output in the pants process that launched pantsd."""
for fd in (sys.stdin, sys.stdout, sys.stderr):
file_no = fd.fileno()
fd.flush()
fd.close()
os.close(file_no)
def _setup_logging(self, log_level):
"""Reinitialize logging post-fork to clear all handlers, file descriptors, locks etc.
This must happen first thing post-fork, before any further logging is emitted.
"""
# Re-initialize the childs logging locks post-fork to avoid potential deadlocks if pre-fork
# threads have any locks acquired at the time of fork.
logging._lock = threading.RLock() if logging.thread else None
for handler in logging.getLogger().handlers:
handler.createLock()
# Invoke a global teardown for all logging handlers created before now.
logging.shutdown()
# Reinitialize logging for the daemon context.
setup_logging(log_level, console_stream=None, log_dir=self._log_dir, log_name=self.LOG_NAME)
# Close out pre-fork file descriptors.
self._close_fds()
# Redirect stdio to the root logger.
sys.stdout = _StreamLogger(logging.getLogger(), logging.INFO)
sys.stderr = _StreamLogger(logging.getLogger(), logging.WARN)
self._logger.debug('logging initialized')
def _setup_services(self, services):
for service in services:
self._logger.info('setting up service {}'.format(service))
service.setup()
def _run_services(self, services):
"""Service runner main loop."""
if not services:
self._logger.critical('no services to run, bailing!')
return
service_thread_map = {service: threading.Thread(target=service.run) for service in services}
# Start services.
for service, service_thread in service_thread_map.items():
self._logger.info('starting service {}'.format(service))
try:
service_thread.start()
except (RuntimeError, service.ServiceError):
self.shutdown(service_thread_map)
raise self.StartupFailure('service {} failed to start, shutting down!'.format(service))
# Monitor services.
while not self.is_killed:
for service, service_thread in service_thread_map.items():
if not service_thread.is_alive():
self.shutdown(service_thread_map)
raise self.RuntimeFailure('service failure for {}, shutting down!'.format(service))
else:
# Avoid excessive CPU utilization.
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
def _write_named_sockets(self, socket_map):
"""Write multiple named sockets using a socket mapping."""
for socket_name, socket_info in socket_map.items():
self.write_named_socket(socket_name, socket_info)
def _run(self):
"""Synchronously run pantsd."""
# Switch log output to the daemon's log stream from here forward.
self._setup_logging(self._log_level)
self._logger.info('pantsd starting, log level is {}'.format(self._log_level))
# Purge as much state as possible from the pants run that launched us.
clean_global_runtime_state()
# Set the process name in ps output to 'pantsd' vs './pants compile src/etc:: -ldebug'.
set_process_title('pantsd [{}]'.format(self._build_root))
# Write service socket information to .pids.
self._write_named_sockets(self._socket_map)
# Enter the main service runner loop.
self._setup_services(self._services)
self._run_services(self._services)
def pre_fork(self):
"""Pre-fork() callback for ProcessManager.daemonize()."""
# Teardown the RunTracker's SubprocPool pre-fork.
RunTracker.global_instance().shutdown_worker_pool()
# TODO(kwlzn): This currently aborts tracking of the remainder of the pants run that launched
# pantsd.
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemonize()."""
self._run()
|
ity/pants
|
src/python/pants/pantsd/pants_daemon.py
|
Python
|
apache-2.0
| 6,887 | 0.009438 |
from ethereum.slogging import get_logger
log = get_logger('eth.block_creation')
from ethereum.block import Block, BlockHeader
from ethereum.common import mk_block_from_prevstate, validate_header, \
verify_execution_results, validate_transaction_tree, \
set_execution_results, add_transactions, post_finalize
from ethereum.consensus_strategy import get_consensus_strategy
from ethereum.messages import apply_transaction
from ethereum.state import State
from ethereum.utils import sha3, encode_hex
import rlp
# Applies the block-level state transition function
def apply_block(state, block):
# Pre-processing and verification
snapshot = state.snapshot()
cs = get_consensus_strategy(state.config)
try:
# Start a new block context
cs.initialize(state, block)
# Basic validation
assert validate_header(state, block.header)
assert cs.check_seal(state, block.header)
assert cs.validate_uncles(state, block)
assert validate_transaction_tree(state, block)
# Process transactions
for tx in block.transactions:
apply_transaction(state, tx)
# Finalize (incl paying block rewards)
cs.finalize(state, block)
# Verify state root, tx list root, receipt root
assert verify_execution_results(state, block)
# Post-finalize (ie. add the block header to the state for now)
post_finalize(state, block)
except (ValueError, AssertionError) as e:
state.revert(snapshot)
raise e
return state
# Creates a candidate block on top of the given chain
def make_head_candidate(chain, txqueue=None,
parent=None,
timestamp=None,
coinbase='\x35'*20,
extra_data='moo ha ha says the laughing cow.',
min_gasprice=0):
log.info('Creating head candidate')
if parent is None:
temp_state = State.from_snapshot(chain.state.to_snapshot(root_only=True), chain.env)
else:
temp_state = chain.mk_poststate_of_blockhash(parent.hash)
cs = get_consensus_strategy(chain.env.config)
# Initialize a block with the given parent and variables
blk = mk_block_from_prevstate(chain, temp_state, timestamp, coinbase, extra_data)
# Find and set the uncles
blk.uncles = cs.get_uncles(chain, temp_state)
blk.header.uncles_hash = sha3(rlp.encode(blk.uncles))
# Call the initialize state transition function
cs.initialize(temp_state, blk)
# Add transactions
add_transactions(temp_state, blk, txqueue, min_gasprice)
# Call the finalize state transition function
cs.finalize(temp_state, blk)
# Set state root, receipt root, etc
set_execution_results(temp_state, blk)
log.info('Created head candidate successfully')
return blk, temp_state
|
nirenzang/Serpent-Pyethereum-Tutorial
|
pyethereum/ethereum/meta.py
|
Python
|
gpl-3.0
| 2,865 | 0.00349 |
from django import forms
from .models import PassType, Registration
class SignupForm(forms.ModelForm):
pass_type = forms.ModelChoiceField(
queryset=PassType.objects.filter(active=True),
widget=forms.widgets.RadioSelect(),
)
class Meta:
model = Registration
fields = (
"first_name",
"last_name",
"email",
"residing_country",
"dance_role",
"pass_type",
"workshop_partner_name",
"workshop_partner_email",
"lunch",
)
widgets = {
"dance_role": forms.widgets.RadioSelect(),
"lunch": forms.widgets.RadioSelect(),
}
class Media:
css = {"all": ("css/forms.css",)}
email_repeat = forms.EmailField()
agree_to_terms = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
self.fields["pass_type"].empty_label = None
self.fields["lunch"].empty_label = None
def clean_workshop_partner_email(self):
"""
Take care of uniqueness constraint ourselves
"""
email = self.cleaned_data.get("workshop_partner_email")
qs = Registration.objects.filter(workshop_partner_email=email).exists()
if email and qs:
raise forms.ValidationError("Workshop parter already taken.")
return email
def clean_agree_to_terms(self):
data = self.cleaned_data["agree_to_terms"]
if data is False:
raise forms.ValidationError("You must agree to the terms.")
return data
def clean(self):
cleaned_data = super().clean()
email = cleaned_data.get("email")
email_repeat = cleaned_data.get("email_repeat")
ws_partner_email = cleaned_data.get("workshop_partner_email")
if email != email_repeat:
raise forms.ValidationError("Ensure email verfication matches.")
if email and ws_partner_email and email == ws_partner_email:
raise forms.ValidationError("You can't partner with yourself.")
|
smokeyfeet/smokeyfeet-registration
|
src/smokeyfeet/registration/forms.py
|
Python
|
mit
| 2,149 | 0 |
# -*- coding: utf-8 -*-
{
'name': "Better validation for Attendance",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "Jörn Mankiewicz",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Uncategorized',
'version': '8.0.0.1',
# any module necessary for this one to work correctly
'depends': ['base','hr_attendance','hr_timesheet_improvement'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'views/hr_attendance.xml',
],
# only loaded in demonstration mode
'demo': [
'demo.xml',
],
}
|
jmankiewicz/odooAddons
|
hr_attendance_new_check/__openerp__.py
|
Python
|
agpl-3.0
| 958 | 0.003135 |
import sys
import networkx as nx
def main(graphml):
g = nx.read_graphml(graphml)
nx.write_graphml(g, graphml)
if __name__ == '__main__':
main(sys.argv[1])
|
sebwink/deregnet
|
graphs/kegg/keggtranslator/bin/make_graphml_igraph_readable.py
|
Python
|
bsd-3-clause
| 169 | 0.011834 |
import numpy as np
# Example taken from : http://cs231n.github.io/python-numpy-tutorial/#numpy
x = np.array([[1,2],[3,4]])
print np.sum(x) # Compute sum of all elements; prints "10"
print np.sum(x, axis=0) # Compute sum of each column; prints "[4 6]"
print np.sum(x, axis=1) # Compute sum of each row; prints "[3 7]"
|
kmova/bootstrap
|
docker/py2docker/numpy-sum.py
|
Python
|
apache-2.0
| 323 | 0.009288 |
# This file is part of pybootchartgui.
# pybootchartgui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pybootchartgui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
import cairo
import draw
def render(writer, res, options, filename):
handlers = {
"png": (lambda w, h: cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h), \
lambda sfc: sfc.write_to_png(filename)),
"pdf": (lambda w, h: cairo.PDFSurface(filename, w, h), lambda sfc: 0),
"svg": (lambda w, h: cairo.SVGSurface(filename, w, h), lambda sfc: 0)
}
if options.format is None:
fmt = filename.rsplit('.', 1)[1]
else:
fmt = options.format
if not (fmt in handlers):
writer.error ("Unknown format '%s'." % fmt)
return 10
make_surface, write_surface = handlers[fmt]
(w, h) = draw.extents (1.0, *res)
w = max (w, draw.MIN_IMG_W)
surface = make_surface (w, h)
ctx = cairo.Context (surface)
draw.render (ctx, options, 1.0, *res)
write_surface (surface)
writer.status ("bootchart written to '%s'" % filename)
|
dpaleino/bootchart2
|
pybootchartgui/batch.py
|
Python
|
gpl-3.0
| 1,604 | 0.006858 |
#!/usr/bin/env python
import httplib
try:
import simplejson as json
except ImportError:
import json
import os
import sys
from urlparse import urljoin
try:
import requests
except ImportError:
raise ImportError('Missing dependency "requests". Do ``pip install requests``.')
try:
import yaml
except ImportError:
raise ImportError('Missing dependency "pyyaml". Do ``pip install pyyaml``.')
# ST2 configuration
ST2_CONFIG_FILE = './config.yaml'
ST2_API_BASE_URL = 'http://localhost:9101/v1'
ST2_AUTH_BASE_URL = 'http://localhost:9100'
ST2_USERNAME = None
ST2_PASSWORD = None
ST2_AUTH_TOKEN = None
ST2_AUTH_PATH = 'tokens'
ST2_WEBHOOKS_PATH = 'webhooks/st2/'
ST2_TRIGGERS_PATH = 'triggertypes/'
ST2_TRIGGERTYPE_PACK = 'sensu'
ST2_TRIGGERTYPE_NAME = 'event_handler'
ST2_TRIGGERTYPE_REF = '.'.join([ST2_TRIGGERTYPE_PACK, ST2_TRIGGERTYPE_NAME])
REGISTERED_WITH_ST2 = False
OK_CODES = [httplib.OK, httplib.CREATED, httplib.ACCEPTED, httplib.CONFLICT]
def _create_trigger_type():
try:
url = _get_st2_triggers_url()
payload = {
'name': ST2_TRIGGERTYPE_NAME,
'pack': ST2_TRIGGERTYPE_PACK,
'description': 'Trigger type for sensu event handler.'
}
# sys.stdout.write('POST: %s: Body: %s\n' % (url, payload))
headers = {}
headers['Content-Type'] = 'application/json; charset=utf-8'
if ST2_AUTH_TOKEN:
headers['X-Auth-Token'] = ST2_AUTH_TOKEN
post_resp = requests.post(url, data=json.dumps(payload), headers=headers)
except:
sys.stderr.write('Unable to register trigger type with st2.')
raise
else:
status = post_resp.status_code
if status not in OK_CODES:
sys.stderr.write('Failed to register trigger type with st2. HTTP_CODE: %d\n' %
status)
raise
else:
sys.stdout.write('Registered trigger type with st2.\n')
def _get_auth_url():
return urljoin(ST2_AUTH_BASE_URL, ST2_AUTH_PATH)
def _get_auth_token():
global ST2_AUTH_TOKEN
auth_url = _get_auth_url()
try:
resp = requests.post(auth_url, json.dumps({'ttl': 5 * 60}),
auth=(ST2_USERNAME, ST2_PASSWORD))
except:
raise Exception('Cannot get auth token from st2. Will try unauthed.')
else:
ST2_AUTH_TOKEN = resp.json()['token']
def _register_with_st2():
global REGISTERED_WITH_ST2
try:
url = urljoin(_get_st2_triggers_url(), ST2_TRIGGERTYPE_REF)
# sys.stdout.write('GET: %s\n' % url)
if not ST2_AUTH_TOKEN:
_get_auth_token()
if ST2_AUTH_TOKEN:
get_resp = requests.get(url, headers={'X-Auth-Token': ST2_AUTH_TOKEN})
else:
get_resp = requests.get(url)
if get_resp.status_code != httplib.OK:
_create_trigger_type()
else:
body = json.loads(get_resp.text)
if len(body) == 0:
_create_trigger_type()
except:
raise
else:
REGISTERED_WITH_ST2 = True
def _get_st2_triggers_url():
url = urljoin(ST2_API_BASE_URL, ST2_TRIGGERS_PATH)
return url
def _get_st2_webhooks_url():
url = urljoin(ST2_API_BASE_URL, ST2_WEBHOOKS_PATH)
return url
def _post_event_to_st2(url, body):
headers = {}
headers['X-ST2-Integration'] = 'sensu.'
headers['Content-Type'] = 'application/json; charset=utf-8'
if ST2_AUTH_TOKEN:
headers['X-Auth-Token'] = ST2_AUTH_TOKEN
try:
sys.stdout.write('POST: url: %s, body: %s\n' % (url, body))
r = requests.post(url, data=json.dumps(body), headers=headers)
except:
sys.stderr.write('Cannot connect to st2 endpoint.')
else:
status = r.status_code
if status not in OK_CODES:
sys.stderr.write('Failed posting sensu event to st2. HTTP_CODE: %d\n' % status)
else:
sys.stdout.write('Sent sensu event to st2. HTTP_CODE: %d\n' % status)
def main(args):
body = {}
body['trigger'] = ST2_TRIGGERTYPE_REF
body['payload'] = json.loads(sys.stdin.read().strip())
_post_event_to_st2(_get_st2_webhooks_url(), body)
if __name__ == '__main__':
try:
if not os.path.exists(ST2_CONFIG_FILE):
sys.stderr.write('Configuration file not found. Exiting.\n')
sys.exit(1)
with open(ST2_CONFIG_FILE) as f:
config = yaml.safe_load(f)
ST2_USERNAME = config['st2_username']
ST2_PASSWORD = config['st2_password']
ST2_API_BASE_URL = config['st2_api_base_url']
ST2_AUTH_BASE_URL = config['st2_auth_base_url']
if not REGISTERED_WITH_ST2:
_register_with_st2()
except:
sys.stderr.write('Failed registering with st2. Won\'t post event.\n')
else:
main(sys.argv)
|
lmEshoo/st2contrib
|
packs/sensu/etc/st2_handler.py
|
Python
|
apache-2.0
| 4,880 | 0.002459 |
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
from nose.tools import eq_
from ryu.ofproto.ofproto_common import *
LOG = logging.getLogger('test_ofproto_common')
class TestOfprotCommon(unittest.TestCase):
""" Test case for ofprotp_common
"""
def test_struct_ofp_header(self):
eq_(OFP_HEADER_PACK_STR, '!BBHI')
eq_(OFP_HEADER_SIZE, 8)
def test_define_constants(self):
eq_(OFP_TCP_PORT, 6633)
eq_(OFP_SSL_PORT, 6633)
|
ykaneko/ryu
|
ryu/tests/unit/ofproto/test_ofproto_common.py
|
Python
|
apache-2.0
| 1,111 | 0 |
# -*- coding: utf-8 -*-
from django.shortcuts import render
from .models import *
from .forms import *
from comunicacion.lugar.models import *
from mapeo.models import *
from django.http import HttpResponse
from django.db.models import Sum, Count, Avg
import collections
import numpy as np
# Create your views here.
def _queryset_filtrado(request):
params = {}
if request.session['year']:
params['annio'] = request.session['year']
if request.session['municipio']:
params['productor__comunidad__municipio__in'] = request.session['municipio']
else:
if request.session['comunidad']:
params['productor__comunidad__in'] = request.session['comunidad']
if request.session['ciclo']:
params['ciclo_productivo'] = request.session['ciclo']
if request.session['rubro']:
params['cultivo'] = request.session['rubro']
if request.session['organizacion']:
params['productor__productor__organizacion'] = request.session['organizacion']
unvalid_keys = []
for key in params:
if not params[key]:
unvalid_keys.append(key)
for key in unvalid_keys:
del params[key]
return Monitoreo.objects.filter(**params)
def consulta(request,template="granos_basicos/consulta.html"):
if request.method == 'POST':
mensaje = None
form = Consulta(request.POST)
if form.is_valid():
request.session['year'] = form.cleaned_data['year']
request.session['municipio'] = form.cleaned_data['municipio']
request.session['comunidad'] = form.cleaned_data['comunidad']
request.session['ciclo'] = form.cleaned_data['ciclo']
request.session['rubro'] = form.cleaned_data['rubro']
request.session['organizacion'] = form.cleaned_data['organizacion']
mensaje = "Todas las variables estan correctamente :)"
request.session['activo'] = True
centinela = 1
else:
centinela = 0
else:
form = Consulta()
mensaje = "Existen alguno errores"
centinela = 0
try:
del request.session['year']
del request.session['municipio']
del request.session['comunidad']
del request.session['ciclo']
del request.session['rubro']
del request.session['organizacion']
except:
pass
return render(request, template, locals())
def genero_produccion(request,template="granos_basicos/productores/genero_produccion.html"):
filtro = _queryset_filtrado(request)
productores = filtro.distinct('productor').count()
CHOICE_SEXO = ((1,'Hombre'),(2,'Mujer'))
choice = ((1,'Hombre'),(2,'Mujer'),(3,'Compartida'))
sexo_productor = {}
for obj in choice:
conteo = filtro.filter(productor__productor__jefe = obj[0]).distinct('productor').count()
sexo_productor[obj[1]] = conteo
if request.GET.get('jefe'):
jefe = request.GET['jefe']
if jefe == '1':
CHOICE_SEXO_JEFE = ((1,'Hombre'),)
elif jefe == '2':
CHOICE_SEXO_JEFE = ((2,'Mujer'),)
elif jefe == '3':
CHOICE_SEXO_JEFE = ((3,'Compartida'),)
else:
CHOICE_SEXO_JEFE = ((1,'Hombre'),(2,'Mujer'),(3,'Compartida'))
RELACION_CHOICES = ((1,'Jefe/Jefa de familia'),(2,'Cónyuge'),
(3,'Hijo/Hija'),(4,'Otro familiar'),
(5,'Administrador'),)
prod_gb = {}
prod = {}
dic_relacion = {}
for obj in CHOICE_SEXO_JEFE:
for x in CHOICE_SEXO:
#relacion entre responsables de familia
jefe_familia = filtro.filter(productor__sexo = x[0],productor__productor__jefe = obj[0]).distinct('productor').count()
prod[x[1]] = jefe_familia
for relacion in RELACION_CHOICES:
conteo = filtro.filter(productor__productorgranosbasicos__relacion = relacion[0],productor__productor__jefe = obj[0]).distinct('productor').count()
dic_relacion[relacion[1]] = conteo
for x in CHOICE_SEXO:
conteo = filtro.filter(productor__sexo = x[0]).distinct('productor').count()
prod_gb[x[1]] = conteo
return render(request, template, locals())
def composicion_familiar(request,template="granos_basicos/productores/composicion_familiar.html"):
filtro = _queryset_filtrado(request)
productores = filtro.distinct('productor').count()
#nuevas salidas
lista_hijos = []
lista_hijas = []
lista_sumatoria = []
for obj in filtro:
hijos = ComposicionFamiliar.objects.filter(persona = obj.productor,familia = '3').count()
lista_hijos.append(hijos)
hijas = ComposicionFamiliar.objects.filter(persona = obj.productor,familia = '4').count()
lista_hijas.append(hijas)
sumatoria = hijos + hijas
lista_sumatoria.append(sumatoria)
result = []
#promedio,mediana,desviacion standard, minimo y maximo
promedios = [np.mean(lista_hijos),np.mean(lista_hijas),np.mean(lista_sumatoria)]
mediana = [np.median(lista_hijos),np.median(lista_hijas),np.median(lista_sumatoria)]
desviacion = [np.std(lista_hijos),np.std(lista_hijas),np.std(lista_sumatoria)]
minimo = [min(lista_hijos),min(lista_hijas),min(lista_sumatoria)]
maximo = [max(lista_hijos),max(lista_hijas),max(lista_sumatoria)]
# agregando a la lista
result.append(promedios)
result.append(mediana)
result.append(desviacion)
result.append(minimo)
result.append(maximo)
#grafico nivel educativo de los padres en las familias
ESCOLARIDAD_CHOICES = (
(1,'Ninguno'),(2,'Primaria Incompleta'),(3,'Primaria'),
(4,'Secundaria Incompleta'),(5,'Secundaria'),(6,'Técnico'),
(7,'Universitario'),(8,'Profesional'))
escolaridad = collections.OrderedDict()
for obj in ESCOLARIDAD_CHOICES:
madre = filtro.filter(productor__composicionfamiliar__familia = '2',
productor__composicionfamiliar__escolaridad = obj[0]).distinct('productor__composicionfamiliar').count()
padre = filtro.filter(productor__composicionfamiliar__familia = '1',
productor__composicionfamiliar__escolaridad = obj[0]).distinct('productor__composicionfamiliar').count()
#hijos--------------------
hijos_5_12 = filtro.filter(productor__composicionfamiliar__familia = '3',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (5,12)).distinct('productor__composicionfamiliar').count()
hijos_13_18 = filtro.filter(productor__composicionfamiliar__familia = '3',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (13,18)).distinct('productor__composicionfamiliar').count()
hijos_19 = filtro.filter(productor__composicionfamiliar__familia = '3',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (19,100)).distinct('productor__composicionfamiliar').count()
#hijas--------------------
hijas_5_12 = filtro.filter(productor__composicionfamiliar__familia = '4',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (5,12)).distinct('productor__composicionfamiliar').count()
hijas_13_18 = filtro.filter(productor__composicionfamiliar__familia = '4',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (13,18)).distinct('productor__composicionfamiliar').count()
hijas_19 = filtro.filter(productor__composicionfamiliar__familia = '4',
productor__composicionfamiliar__escolaridad = obj[0],
productor__composicionfamiliar__edad__range = (19,100)).distinct('productor__composicionfamiliar').count()
escolaridad[obj[1]] = (madre,padre,
hijos_5_12,hijos_13_18,hijos_19,
hijas_5_12,hijas_13_18,hijas_19)
#--------------------------------------------------------------------------------
SI_NO_CHOICES = ((1,'Si'),(2,'No'))
FAMILIA_CHOICES = ((1,'Padre'),(2,'Madre'),(3,'Hijo'),(4,'Hija'),(5,'Hermano'),
(6,'Hermana'),(7,'Sobrino'),(8,'Sobrina'),(9,'Abuelo'),
(10,'Abuela'),(11,'Cuñado'),(12,'Cuñada'),(13,'Yerno'),
(14,'Nuera'),(15,'Otro'),)
list_participacion = []
for obj in FAMILIA_CHOICES:
total = filtro.filter(productor__composicionfamiliar__familia = obj[0]).distinct(
'productor__composicionfamiliar').count()
si_participa = filtro.filter(productor__composicionfamiliar__familia = obj[0],
productor__composicionfamiliar__participacion = '1').distinct(
'productor__composicionfamiliar').count()
promedio = total / float(productores)
promedio = round(promedio, 2)
list_participacion.append((obj[1],saca_porcentajes(si_participa,total,False),promedio))
return render(request, template, locals())
def georeferencia(request,template="granos_basicos/monitoreos/georeferencia.html"):
filtro = _queryset_filtrado(request)
productores = filtro.distinct('productor').count()
lista_mapa = filtro.values('nombre_parcela','latitud','longitud')
mapa = []
for obj in lista_mapa:
if obj['latitud'] != None and obj['longitud'] != None:
mapa.append((obj['nombre_parcela'],obj['latitud'],obj['longitud']))
return render(request, template, locals())
def caracteristicas_parcela(request,template="granos_basicos/monitoreos/caracteristicas_parcela.html"):
filtro = _queryset_filtrado(request)
productores = filtro.distinct('productor').count()
lista_parcela = []
lista_inclinado = []
lista_plano = []
#edad parcela y profundidad capa arable
parcela = filtro.values('edad_parcela','profundidad_capa')
for obj in parcela:
if obj['edad_parcela'] != None and obj['profundidad_capa'] != None:
lista_parcela.append((obj['edad_parcela'],obj['profundidad_capa']))
#edad de las parcelas
menor_5 = filtro.filter(edad_parcela__range = (0,5)).count()
edad_6_20 = filtro.filter(edad_parcela__range = (5.1,20)).count()
mayor_20 = filtro.filter(edad_parcela__range = (20.1,100)).count()
for obj in filtro:
# % area inclinado > 60%
area = DistribucionPendiente.objects.filter(monitoreo = obj,seleccion = '1').values_list('inclinado',flat = True)
for x in area:
if x >= 60:
inclinado = DistribucionPendiente.objects.filter(monitoreo = obj,seleccion = '2').values_list('inclinado','monitoreo__profundidad_capa')
lista_inclinado.append(inclinado)
# % area plano > 60%
area1 = DistribucionPendiente.objects.filter(monitoreo = obj,seleccion = '1').values_list('plano',flat = True)
for y in area1:
if y >= 60:
plano = DistribucionPendiente.objects.filter(monitoreo = obj,seleccion = '2').values_list('plano','monitoreo__profundidad_capa')
lista_plano.append(plano)
#acceso agua
SI_NO_CHOICES = ((1,'Si'),(2,'No'))
acceso_agua = {}
conteo_si = 0
for obj in SI_NO_CHOICES:
conteo = filtro.filter(acceso_agua = obj[0]).count()
acceso_agua[obj[1]] = conteo
#fuente agua
fuente_agua = {}
conteo_si = filtro.filter(acceso_agua = 1).count()
for obj in ACCESO_AGUA_CHOICES:
conteo = filtro.filter(fuente_agua__icontains = obj[0]).count()
fuente_agua[obj[1]] = saca_porcentajes(conteo,conteo_si,False)
return render(request, template, locals())
def ciclo_productivo(request,template="granos_basicos/monitoreos/ciclo_productivo.html"):
filtro = _queryset_filtrado(request)
productores = filtro.distinct('productor').count()
#siembra
fecha_siembra = filtro.values_list('datosmonitoreo__fecha_siembra',flat = True)
lista_siembra = []
for obj in fecha_siembra:
if obj != None:
x = obj.isocalendar()[1]
lista_siembra.append(x)
l_siembra = sorted(lista_siembra)
dic_siembra = collections.OrderedDict()
for v in l_siembra:
count = l_siembra.count(v)
dic_siembra[v] = count
#cosecha
fecha_cosecha = filtro.values_list('datosmonitoreo__fecha_cosecha',flat = True)
lista_cosecha = []
for obj in fecha_cosecha:
if obj != None:
x = obj.isocalendar()[1]
lista_cosecha.append(x)
l_cosecha = sorted(lista_cosecha)
dic_cosecha = collections.OrderedDict()
for v in l_cosecha:
count = l_cosecha.count(v)
dic_cosecha[v] = count
return render(request, template, locals())
def uso_suelo(request,template="granos_basicos/monitoreos/uso_suelo.html"):
filtro = _queryset_filtrado(request)
productores = filtro.distinct('productor').count()
USO_SUELO_CHOICES = ((1,'Área Total'),(2,'Cultivos Anuales (GB)'),(3,'Cultivos perennes'),
(4,'Tacotales'),(5,'Potreros'),(6,'Pasto de Corte'))
total = filtro.filter(productor__usosuelo__uso = '1').aggregate(total = Sum('productor__usosuelo__cantidad'))['total']
uso_suelo = collections.OrderedDict()
for obj in USO_SUELO_CHOICES:
#tabla 1
familias = filtro.filter(productor__usosuelo__uso = obj[0]).count()
mz = filtro.filter(productor__usosuelo__uso = obj[0]).aggregate(total = Sum('productor__usosuelo__cantidad'))['total']
porcentaje = saca_porcentajes(mz,total,False)
try:
promedio = mz / float(productores)
except:
promedio = 0
#diccionario de datos
uso_suelo[obj[1]] = (familias,mz,porcentaje,promedio)
#tabla 2
tamano_finca = filtro.filter(productor__usosuelo__uso = '1').values_list('productor__usosuelo__cantidad',flat = True)
granos_basicos = filtro.filter(productor__usosuelo__uso = '2').values_list('productor__usosuelo__cantidad',flat = True)
area_siembra = filtro.values_list('datosmonitoreo__area_siembra',flat = True)
result = []
#promedio,mediana,desviacion standard, minimo y maximo
promedios = [np.mean(tamano_finca),np.mean(granos_basicos)]
mediana = [np.median(tamano_finca),np.median(granos_basicos)]
desviacion = [np.std(tamano_finca),np.std(granos_basicos)]
minimo = [min(tamano_finca),min(granos_basicos)]
maximo = [max(tamano_finca),max(granos_basicos)]
# agregando a la lista
result.append(promedios)
result.append(mediana)
result.append(desviacion)
result.append(minimo)
result.append(maximo)
#distribucion area de siembra
menor_1 = filtro.filter(datosmonitoreo__area_siembra__range = (0,0.99)).count()
entre_1_2 = filtro.filter(datosmonitoreo__area_siembra__range = (1,2)).count()
entre_2_3 = filtro.filter(datosmonitoreo__area_siembra__range = (2.1,3)).count()
entre_3_4 = filtro.filter(datosmonitoreo__area_siembra__range = (3.1,4)).count()
entre_4_5 = filtro.filter(datosmonitoreo__area_siembra__range = (4.1,5)).count()
#promedio area de siembra
area_siembra = filtro.values_list('datosmonitoreo__area_siembra',flat = True)
lista = []
for obj in area_siembra:
if obj != None:
lista.append(obj)
promedio_area = np.mean(lista)
desviacion_area = np.std(lista)
mediana_area = np.median(lista)
minimo_area = min(lista)
maximo_area = max(lista)
return render(request, template, locals())
def recursos_economicos(request,template="granos_basicos/monitoreos/recursos_economicos.html"):
filtro = _queryset_filtrado(request)
productores = filtro.distinct('productor').count()
dic = {}
for obj in RESPUESTA_CHOICES:
conteo = filtro.filter(recursossiembra__respuesta = obj[0]).count()
dic[obj[1]] = conteo
return render(request, template, locals())
def rendimiento(request,template="granos_basicos/monitoreos/rendimiento.html"):
filtro = _queryset_filtrado(request)
productores = filtro.distinct('productor').count()
ANIO_CHOICES = ((2014,'2014'),(2015,'2015'),(2016,'2016'),(2017,'2017'),
(2018,'2018'),(2019,'2019'),(2020,'2020'),)
#maiz
rend_maiz = collections.OrderedDict()
productores_maiz = HistorialRendimiento.objects.filter(ciclo_productivo = '1',rubro = '1').distinct('monitoreo__productor').count()
for obj in ANIO_CHOICES:
primera_maiz = HistorialRendimiento.objects.filter(ciclo_productivo = '1',rubro = '1',
anio = obj[1]).aggregate(avg = Avg('rendimiento'))['avg']
postrera_maiz = HistorialRendimiento.objects.filter(ciclo_productivo = '2',rubro = '1',
anio = obj[1]).aggregate(avg = Avg('rendimiento'))['avg']
apante_maiz = HistorialRendimiento.objects.filter(ciclo_productivo = '3',rubro = '1',
anio = obj[1]).aggregate(avg = Avg('rendimiento'))['avg']
if primera_maiz != None or postrera_maiz != None or apante_maiz != None:
rend_maiz[obj[1]] = (primera_maiz,postrera_maiz,apante_maiz)
#frijol
rend_frijol = collections.OrderedDict()
productores_frijol = HistorialRendimiento.objects.filter(ciclo_productivo = '1',rubro = '2').distinct('monitoreo__productor').count()
for obj in ANIO_CHOICES:
primera_frijol = HistorialRendimiento.objects.filter(ciclo_productivo = '1',rubro = '2',
anio = obj[1]).aggregate(avg = Avg('rendimiento'))['avg']
postrera_frijol = HistorialRendimiento.objects.filter(ciclo_productivo = '2',rubro = '3',
anio = obj[1]).aggregate(avg = Avg('rendimiento'))['avg']
apante_frijol = HistorialRendimiento.objects.filter(ciclo_productivo = '3',rubro = '4',
anio = obj[1]).aggregate(avg = Avg('rendimiento'))['avg']
if primera_frijol != None or postrera_frijol != None or apante_frijol != None:
rend_frijol[obj[1]] = (primera_frijol,postrera_frijol,apante_frijol)
return render(request, template, locals())
def get_comunies(request):
ids = request.GET.get('ids', '')
results = []
dicc = {}
if ids:
lista = ids.split(',')
for id in lista:
monitoreos = Monitoreo.objects.filter(productor__municipio__id = id).distinct().values_list('productor__comunidad__id', flat=True)
municipios = Municipio.objects.get(pk = id)
comunies = Comunidad.objects.filter(municipio__id = municipios.pk,id__in = monitoreos).order_by('nombre')
lista1 = []
for c in comunies:
comu = {}
comu['id'] = c.id
comu['nombre'] = c.nombre
lista1.append(comu)
dicc[municipios.nombre] = lista1
return HttpResponse(simplejson.dumps(dicc), content_type = 'application/json')
def saca_porcentajes(dato, total, formato=True):
if dato != None:
try:
porcentaje = (dato/float(total)) * 100 if total != None or total != 0 else 0
except:
return 0
if formato:
return porcentaje
else:
return '%.2f' % porcentaje
else:
return 0
|
ErickMurillo/ciat_plataforma
|
ficha_granos_basicos/views.py
|
Python
|
mit
| 17,583 | 0.05029 |
"""Test of models for embargo app"""
import json
import pytest
import six
from django.db.utils import IntegrityError
from django.test import TestCase
from opaque_keys.edx.locator import CourseLocator
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from ..models import (
Country,
CountryAccessRule,
CourseAccessRuleHistory,
EmbargoedCourse,
EmbargoedState,
IPFilter,
RestrictedCourse
)
class EmbargoModelsTest(CacheIsolationTestCase):
"""Test each of the 3 models in embargo.models"""
ENABLED_CACHES = ['default']
def test_course_embargo(self):
course_id = CourseLocator('abc', '123', 'doremi')
# Test that course is not authorized by default
assert not EmbargoedCourse.is_embargoed(course_id)
# Authorize
cauth = EmbargoedCourse(course_id=course_id, embargoed=True)
cauth.save()
# Now, course should be embargoed
assert EmbargoedCourse.is_embargoed(course_id)
assert six.text_type(cauth) == u"Course '{course_id}' is Embargoed".format(course_id=course_id)
# Unauthorize by explicitly setting email_enabled to False
cauth.embargoed = False
cauth.save()
# Test that course is now unauthorized
assert not EmbargoedCourse.is_embargoed(course_id)
assert six.text_type(cauth) == u"Course '{course_id}' is Not Embargoed".format(course_id=course_id)
def test_state_embargo(self):
# Azerbaijan and France should not be blocked
good_states = ['AZ', 'FR']
# Gah block USA and Antartica
blocked_states = ['US', 'AQ']
currently_blocked = EmbargoedState.current().embargoed_countries_list
for state in blocked_states + good_states:
assert state not in currently_blocked
# Block
cauth = EmbargoedState(embargoed_countries='US, AQ')
cauth.save()
currently_blocked = EmbargoedState.current().embargoed_countries_list
for state in good_states:
assert state not in currently_blocked
for state in blocked_states:
assert state in currently_blocked
# Change embargo - block Isle of Man too
blocked_states.append('IM')
cauth.embargoed_countries = 'US, AQ, IM'
cauth.save()
currently_blocked = EmbargoedState.current().embargoed_countries_list
for state in good_states:
assert state not in currently_blocked
for state in blocked_states:
assert state in currently_blocked
def test_ip_blocking(self):
whitelist = u'127.0.0.1'
blacklist = u'18.244.51.3'
cwhitelist = IPFilter.current().whitelist_ips
assert whitelist not in cwhitelist
cblacklist = IPFilter.current().blacklist_ips
assert blacklist not in cblacklist
IPFilter(whitelist=whitelist, blacklist=blacklist).save()
cwhitelist = IPFilter.current().whitelist_ips
assert whitelist in cwhitelist
cblacklist = IPFilter.current().blacklist_ips
assert blacklist in cblacklist
def test_ip_network_blocking(self):
whitelist = u'1.0.0.0/24'
blacklist = u'1.1.0.0/16'
IPFilter(whitelist=whitelist, blacklist=blacklist).save()
cwhitelist = IPFilter.current().whitelist_ips
assert u'1.0.0.100' in cwhitelist
assert u'1.0.0.10' in cwhitelist
assert u'1.0.1.0' not in cwhitelist
cblacklist = IPFilter.current().blacklist_ips
assert u'1.1.0.0' in cblacklist
assert u'1.1.0.1' in cblacklist
assert u'1.1.1.0' in cblacklist
assert u'1.2.0.0' not in cblacklist
class RestrictedCourseTest(CacheIsolationTestCase):
"""Test RestrictedCourse model. """
ENABLED_CACHES = ['default']
def test_unicode_values(self):
course_id = CourseLocator('abc', '123', 'doremi')
restricted_course = RestrictedCourse.objects.create(course_key=course_id)
assert six.text_type(restricted_course) == six.text_type(course_id)
def test_restricted_course_cache_with_save_delete(self):
course_id = CourseLocator('abc', '123', 'doremi')
RestrictedCourse.objects.create(course_key=course_id)
# Warm the cache
with self.assertNumQueries(1):
RestrictedCourse.is_restricted_course(course_id)
RestrictedCourse.is_disabled_access_check(course_id)
# it should come from cache
with self.assertNumQueries(0):
RestrictedCourse.is_restricted_course(course_id)
RestrictedCourse.is_disabled_access_check(course_id)
assert not RestrictedCourse.is_disabled_access_check(course_id)
# add new the course so the cache must get delete and again hit the db
new_course_id = CourseLocator('def', '123', 'doremi')
RestrictedCourse.objects.create(course_key=new_course_id, disable_access_check=True)
with self.assertNumQueries(1):
RestrictedCourse.is_restricted_course(new_course_id)
RestrictedCourse.is_disabled_access_check(new_course_id)
# it should come from cache
with self.assertNumQueries(0):
RestrictedCourse.is_restricted_course(new_course_id)
RestrictedCourse.is_disabled_access_check(new_course_id)
assert RestrictedCourse.is_disabled_access_check(new_course_id)
# deleting an object will delete cache also.and hit db on
# get the is_restricted course
abc = RestrictedCourse.objects.get(course_key=new_course_id)
abc.delete()
with self.assertNumQueries(1):
RestrictedCourse.is_restricted_course(new_course_id)
# it should come from cache
with self.assertNumQueries(0):
RestrictedCourse.is_restricted_course(new_course_id)
class CountryTest(TestCase):
"""Test Country model. """
def test_unicode_values(self):
country = Country.objects.create(country='NZ')
assert six.text_type(country) == 'New Zealand (NZ)'
class CountryAccessRuleTest(CacheIsolationTestCase):
"""Test CountryAccessRule model. """
ENABLED_CACHES = ['default']
def test_unicode_values(self):
course_id = CourseLocator('abc', '123', 'doremi')
country = Country.objects.create(country='NZ')
restricted_course1 = RestrictedCourse.objects.create(course_key=course_id)
access_rule = CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.WHITELIST_RULE,
country=country
)
assert six.text_type(access_rule) == u'Whitelist New Zealand (NZ) for {course_key}'.format(course_key=course_id)
course_id = CourseLocator('def', '123', 'doremi')
restricted_course1 = RestrictedCourse.objects.create(course_key=course_id)
access_rule = CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.BLACKLIST_RULE,
country=country
)
assert six.text_type(access_rule) == u'Blacklist New Zealand (NZ) for {course_key}'.format(course_key=course_id)
def test_unique_together_constraint(self):
"""
Course with specific country can be added either as whitelist or blacklist
trying to add with both types will raise error
"""
course_id = CourseLocator('abc', '123', 'doremi')
country = Country.objects.create(country='NZ')
restricted_course1 = RestrictedCourse.objects.create(course_key=course_id)
CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.WHITELIST_RULE,
country=country
)
with pytest.raises(IntegrityError):
CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.BLACKLIST_RULE,
country=country
)
def test_country_access_list_cache_with_save_delete(self):
course_id = CourseLocator('abc', '123', 'doremi')
country = Country.objects.create(country='NZ')
restricted_course1 = RestrictedCourse.objects.create(course_key=course_id)
course = CountryAccessRule.objects.create(
restricted_course=restricted_course1,
rule_type=CountryAccessRule.WHITELIST_RULE,
country=country
)
# Warm the cache
with self.assertNumQueries(1):
CountryAccessRule.check_country_access(course_id, 'NZ')
with self.assertNumQueries(0):
CountryAccessRule.check_country_access(course_id, 'NZ')
# Deleting an object will invalidate the cache
course.delete()
with self.assertNumQueries(1):
CountryAccessRule.check_country_access(course_id, 'NZ')
class CourseAccessRuleHistoryTest(TestCase):
"""Test course access rule history. """
def setUp(self):
super(CourseAccessRuleHistoryTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.course_key = CourseLocator('edx', 'DemoX', 'Demo_Course')
self.restricted_course = RestrictedCourse.objects.create(course_key=self.course_key)
self.countries = {
'US': Country.objects.create(country='US'),
'AU': Country.objects.create(country='AU')
}
def test_course_access_history_no_rules(self):
self._assert_history([])
self.restricted_course.delete()
self._assert_history_deleted()
def test_course_access_history_with_rules(self):
# Add one rule
us_rule = CountryAccessRule.objects.create(
restricted_course=self.restricted_course,
country=self.countries['US'],
rule_type=CountryAccessRule.WHITELIST_RULE
)
self._assert_history([('US', 'whitelist')])
# Add another rule
au_rule = CountryAccessRule.objects.create(
restricted_course=self.restricted_course,
country=self.countries['AU'],
rule_type=CountryAccessRule.BLACKLIST_RULE
)
self._assert_history([
('US', 'whitelist'),
('AU', 'blacklist')
])
# Delete the first rule
us_rule.delete()
self._assert_history([('AU', 'blacklist')])
# Delete the second rule
au_rule.delete()
self._assert_history([])
def test_course_access_history_delete_all(self):
# Create a rule
CountryAccessRule.objects.create(
restricted_course=self.restricted_course,
country=self.countries['US'],
rule_type=CountryAccessRule.WHITELIST_RULE
)
# Delete the course (and, implicitly, all the rules)
self.restricted_course.delete()
self._assert_history_deleted()
def test_course_access_history_change_message(self):
# Change the message key
self.restricted_course.enroll_msg_key = 'embargo'
self.restricted_course.access_msg_key = 'embargo'
self.restricted_course.save()
# Expect a history entry with the changed keys
self._assert_history([], enroll_msg='embargo', access_msg='embargo')
def _assert_history(self, country_rules, enroll_msg='default', access_msg='default'):
"""Check the latest history entry.
Arguments:
country_rules (list): List of rules, each of which are tuples
of the form `(country_code, rule_type)`.
Keyword Arguments:
enroll_msg (str): The expected enrollment message key.
access_msg (str): The expected access message key.
Raises:
AssertionError
"""
record = CourseAccessRuleHistory.objects.latest()
# Check that the record is for the correct course
assert record.course_key == self.course_key
# Load the history entry and verify the message keys
snapshot = json.loads(record.snapshot)
assert snapshot['enroll_msg'] == enroll_msg
assert snapshot['access_msg'] == access_msg
# For each rule, check that there is an entry
# in the history record.
for (country, rule_type) in country_rules:
assert {'country': country, 'rule_type': rule_type} in snapshot['country_rules']
# Check that there are no duplicate entries
assert len(snapshot['country_rules']) == len(country_rules)
def _assert_history_deleted(self):
"""Check the latest history entry for a 'DELETED' placeholder.
Raises:
AssertionError
"""
record = CourseAccessRuleHistory.objects.latest()
assert record.course_key == self.course_key
assert record.snapshot == 'DELETED'
|
stvstnfrd/edx-platform
|
openedx/core/djangoapps/embargo/tests/test_models.py
|
Python
|
agpl-3.0
| 12,890 | 0.001164 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 7, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 0);
|
antoinecarme/pyaf
|
tests/artificial/transf_Anscombe/trend_Lag1Trend/cycle_7/ar_/test_artificial_32_Anscombe_Lag1Trend_7__100.py
|
Python
|
bsd-3-clause
| 263 | 0.087452 |
#!/usr/bin/python3
#import nltk
#import pattern.en
from nltk import word_tokenize
from nltk import pos_tag
from nltk.corpus import wordnet
#import nltk.fuf.linearizer
from nltk.stem.wordnet import WordNetLemmatizer as wnl
from re import sub
import string
import random
from .genderPredictor import genderPredictor
#nltk.download()
# nltk downloads: maxent_ne_chunker, maxent_treebank_pos_tagger, punkt, wordnet
# install numpy
# WordNetLemmatizer().lemmatize(word,'v')
class Extrapolate:
def __init__(self):
self.sent_syns = []
print("Setting up Gender Predictor: ")
self.gp = genderPredictor.genderPredictor()
accuracy = self.gp.trainAndTest()
# print("Accuracy:", accuracy)
# print ('Most Informative Features')
feats = self.gp.getMostInformativeFeatures(10)
# for feat in feats:
# print (feat)
def change_gender(self, pnoun, gender):
pnlist = [(("her", "F"), ("him", "M")),
(("she", "F"), ("he", "M")),
(("hers", "F"), ("his", "M")),
(("herself", "F"), ("himself", "M"))]
for pair in pnlist:
for pi, p in enumerate(pair):
if p[0] == pnoun and p[1] != gender:
return pair[(pi-1)%2][0]
else:
return pnoun
def find_synonyms(self, w, wpos):
syn_words = []
synsets = wordnet.synsets(w, pos=wpos)
for s in synsets:
for l in s.lemmas():
syn_words.append(l.name())
return syn_words
def replace_synonyms(self, o_sent, n_sent):
o_tagged = pos_tag(word_tokenize(o_sent))
n_tagged = pos_tag(word_tokenize(n_sent))
for n in n_tagged:
for sdx, syn_list in enumerate(self.sent_syns):
for syn in syn_list:
if (n[0] == syn):
n_sent = sub(r"\b%s\b" %n[0], o_tagged[sdx][0], n_sent)
return n_sent
def replace_proper_nouns(self, o_sent, n_sent):
proper_nouns = []
p_pnouns = []
o_tagged = pos_tag(word_tokenize(o_sent))
n_tagged = pos_tag(word_tokenize(n_sent))
# print("\nTransforming the output:")
# print("Input sentence:", o_sent)
# print("Found sentence:", n_sent)
# print("Input sentence tagged:", o_tagged)
# print("Found sentence tagged:", n_tagged)
for o in o_tagged:
if o[1] == 'NNP' and o not in proper_nouns:
proper_nouns.append(o)
for n in n_tagged:
if (n[1] == 'PRP' or n[1] == 'PRP$' or n[1] == 'NNP') and n not in p_pnouns:
p_pnouns.append(n)
# print("")
if (len(proper_nouns) == 1) and (len(p_pnouns) > 0):
n_sent = sub(r"\b%s\b" %p_pnouns[0][0] , proper_nouns[0][0], n_sent, 1)
gender = self.gp.classify(proper_nouns[0][0])
# print(proper_nouns[0][0], "is classified as", gender)
for pnoun in p_pnouns:
n_pnoun = self.change_gender(pnoun[0], gender)
n_sent = sub(r"\b%s\b" %pnoun[0] , n_pnoun, n_sent)
elif len(proper_nouns) < 1:
print("No proper nouns to replace")
else:
print("Not yet implemented, :P")
return n_sent
def transform(self, o_sent, n_sent):
n_sent = self.replace_proper_nouns(o_sent, n_sent)
n_sent = self.replace_synonyms(o_sent, n_sent)
return(n_sent)
def strip_pos_copy(self, tag):
new_tag = []
for item in tag:
new_tag.append(item[0])
return new_tag
def extrapolate(self, sent):
# tags the part of speech in each word
tagged = pos_tag(word_tokenize(sent))
tag_list = []
for item in tagged:
tag_list.append(list(item))
# puts nouns and verbs in their base form
for idx, item in enumerate(tag_list):
if item[1][0] == 'V':
tag_list[idx][0] = wnl().lemmatize(item[0],'v')
elif item[1] == 'NN' or item[1] == 'NNS':
tag_list[idx][0] = wnl().lemmatize(item[0],'n')
synonyms = [[] for i in range(len(tag_list))]
# finds synonyms for each wnoun, verb, adj in tag_list -> puts in corresponding index in synonyms
for idx, item in enumerate(tag_list):
if item[1][0] == 'V':
synonyms[idx] = self.find_synonyms(item[0], wordnet.VERB)
#for v in synonyms[idx]:
# v = en.verb.past(v)
elif item[1] == 'NN' or item[1] == 'NNS':
synonyms[idx] = self.find_synonyms(item[0], wordnet.NOUN)
elif item[1][0] == 'J':
synonyms[idx] = self.find_synonyms(item[0], wordnet.ADJ)
# gets rid of duplicates
for si, s in enumerate(synonyms):
s = list(set(s))
# print(tag_list[si][0], ": ", s)
self.sent_syns = synonyms
search_sent = []
# creates a list of similar sentences to search for
for idx, item in enumerate(tag_list):
# looks for synonyms at the corresponding index
for s in synonyms[idx]:
temp = sub(r"\b%s\b" %item[0], s, sent)
search_sent.append(temp)
# will get rid of duplicates once i make it hashable
search_sent = list(set(search_sent))
# print("\nSample list of synonymous sentences:")
# for i in range(min(len(search_sent), 20)):
# print(search_sent[i])
return search_sent
if __name__ == '__main__':
#list of pretend sentences to search through
sent_list = []
sent_list.append("She danced with the prince and they fall in love.")
sent_list.append("The emperor realized he was swindled but continues the parade anyway.")
sent_list.append("He and his wife were very poor.")
sent_list.append("She promised anything if he would get it for her. ")
sent_list.append("The bears came home and frightened her and she ran away.")
sent_list.append("They came upon a house made of sweets and they ate some. ")
sent_list.append("He climbed the beanstalk and found a giant there who had gold coins that he wanted. ")
sent_list.append("The rats follow him and he led them into the harbor and they die.")
sent_list.append("He begged to be spared and told him about his poor father.")
sent_list.append("The two were married and live happily everafter.")
sent_list.append("The good fairies made another spell so that she would only sleep for 100 years and a prince would awaken her. ")
sent_list.append("The stepmother ordered her to be killed but the huntsman spared her life.")
sent_list.append("The wolf fell into it and died.")
sent_list.append("A fairy granted her wish and gave her a seed to plant. ")
sent_list.append("He decided to run away and came across a cottage. ")
#instantiating extrapolate class, TAKES NOTHING
e = Extrapolate()
#expected input from storytellingbot
o_sent = "Elsa took a sharp sword to slay the monster"
print("\nInput:" + o_sent)
#o_sent = input("Enter a sentence: ")
search_sent = e.extrapolate(o_sent)
index = 6
#index = random.randint(0, len(sent_list)-1)
print("\nTest index: "+ str(index+1))
#index = int(input("Enter a number between 1 and "+str(len(sent_list))+": "))-1
#print(sent_list[index])
output = e.transform(o_sent, sent_list[index])
print(output)
#this would be the post
|
theopak/storytellingbot
|
Extrapolate/Extrapolate.py
|
Python
|
mit
| 7,576 | 0.005148 |
#############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms and
## conditions see http://www.qt.io/terms-conditions. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
# entry of test
def main():
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
# create qt quick application
checkedTargets, projectName = createNewQtQuickApplication(tempDir(), "SampleApp")
# build it - on all build configurations
availableConfigs = iterateBuildConfigs(len(checkedTargets))
if not availableConfigs:
test.fatal("Haven't found a suitable Qt version - leaving without building.")
for kit, config in availableConfigs:
selectBuildConfig(len(checkedTargets), kit, config)
# try to compile
test.log("Testing build configuration: " + config)
clickButton(waitForObject(":*Qt Creator.Build Project_Core::Internal::FancyToolButton"))
waitForCompile()
# check output if build successful
ensureChecked(waitForObject(":Qt Creator_CompileOutput_Core::Internal::OutputPaneToggleButton"))
waitFor("object.exists(':*Qt Creator.Cancel Build_QToolButton')", 20000)
cancelBuildButton = findObject(':*Qt Creator.Cancel Build_QToolButton')
waitFor("not cancelBuildButton.enabled", 30000)
compileOutput = waitForObject(":Qt Creator.Compile Output_Core::OutputWindow")
if not test.verify(compileSucceeded(compileOutput.plainText),
"Verifying building of simple qt quick application."):
test.log(compileOutput.plainText)
# exit qt creator
invokeMenuItem("File", "Exit")
|
martyone/sailfish-qtcreator
|
tests/system/suite_SCOM/tst_SCOM01/test.py
|
Python
|
lgpl-2.1
| 3,047 | 0.009846 |
from __future__ import print_function
import fileinput
from datetime import datetime
for line in fileinput.input():
line = line.rstrip()
# This condition removes CDX header lines
if line[0] is ' ':
continue
# Extract just the timestamp from line
timestamp = line.split(' ', 2)[1]
# Datetiem format in CDX is 20121125005312
date_object = datetime.strptime(timestamp, '%Y%m%d%H%M%S')
# print(date_object.strftime('%Y-%m-%d'))
print(date_object.strftime('%Y-%m-%d %H:%M:%S'))
|
vphill/eot-cdx-analysis
|
code/cdx_extract_date.py
|
Python
|
cc0-1.0
| 524 | 0 |
from redbreast.core.utils import *
import pytest
from uliweb import manage, functions
import os
def test_import():
a = CommonUtils.get_class('redbreast.core.spec.TaskSpec')
assert str(a) == "<class 'redbreast.core.spec.task.TaskSpec'>"
def test_import_not_exist():
a = CommonUtils.get_class('redbreast.core.spec.NotExistSpec')
assert str(a) == 'None'
def test_import_error():
with pytest.raises(ImportError):
a = CommonUtils.get_class('not.exist.module.NotExistSpec')
class TestUtilInProject(object):
def setup(self):
locate_dir = os.path.dirname(__file__)
os.chdir(locate_dir)
os.chdir('test_project')
import shutil
shutil.rmtree('database.db', ignore_errors=True)
manage.call('uliweb syncdb')
manage.call('uliweb syncspec')
from uliweb.manage import make_simple_application
app = make_simple_application(apps_dir='./apps')
def teardown(self):
import shutil
shutil.rmtree('database.db', ignore_errors=True)
def test_import(self):
maps = {
'simple_task' : 'redbreast.core.spec.task.SimpleTask',
'join_task' : 'redbreast.core.spec.task.JoinTask',
'choice_task' : 'redbreast.core.spec.task.ChoiceTask',
'split_task' : 'redbreast.core.spec.task.SplitTask',
'multichocie_task' : 'redbreast.core.spec.task.MultiChoiceTask',
'auto_simple_task' : 'redbreast.core.spec.task.AutoSimpleTask',
'auto_join_task' : 'redbreast.core.spec.task.AutoJoinTask',
'auto_choice_task' : 'redbreast.core.spec.task.AutoChoiceTask',
'auto_split_task' : 'redbreast.core.spec.task.AutoSplitTask',
'auto_multichoice_task' :'redbreast.core.spec.task.AutoMultiChoiceTask',
}
for spec in maps:
a = CommonUtils.get_spec(spec)
assert str(a) == "<class '%s'>" % maps[spec]
spec1 = maps[spec].replace("redbreast.core.spec.task.", "")
b = CommonUtils.get_spec(spec1)
assert str(b) == "<class '%s'>" % maps[spec]
|
uliwebext/uliweb-redbreast
|
test/test_core_utils.py
|
Python
|
bsd-2-clause
| 2,255 | 0.010643 |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
# Jona Sassenhagen <jona.sassenhagen@gmail.com>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises
from mne import read_events, Epochs, pick_types, read_cov
from mne.channels import read_layout
from mne.io import read_raw_fif
from mne.utils import slow_test, run_tests_if_main
from mne.viz.evoked import _butterfly_onselect, plot_compare_evokeds
from mne.viz.utils import _fake_click
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.1
n_chan = 6
layout = read_layout('Vectorview-all')
def _get_raw():
"""Get raw data."""
return read_raw_fif(raw_fname, preload=False, add_eeg_ref=False)
def _get_events():
"""Get events."""
return read_events(event_name)
def _get_picks(raw):
"""Get picks."""
return pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
def _get_epochs():
"""Get epochs."""
raw = _get_raw()
raw.add_proj([], remove_existing=True)
events = _get_events()
picks = _get_picks(raw)
# Use a subset of channels for plotting speed
picks = picks[np.round(np.linspace(0, len(picks) - 1, n_chan)).astype(int)]
picks[0] = 2 # make sure we have a magnetometer
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0), add_eeg_ref=False)
epochs.info['bads'] = [epochs.ch_names[-1]]
return epochs
def _get_epochs_delayed_ssp():
"""Get epochs with delayed SSP."""
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
reject = dict(mag=4e-12)
epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
proj='delayed', reject=reject,
add_eeg_ref=False)
return epochs_delayed_ssp
@slow_test
def test_plot_evoked():
"""Test plotting of evoked."""
import matplotlib.pyplot as plt
evoked = _get_epochs().average()
with warnings.catch_warnings(record=True):
fig = evoked.plot(proj=True, hline=[1], exclude=[], window_title='foo')
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax,
[line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax,
[ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
# plot with bad channels excluded & spatial_colors & zorder
evoked.plot(exclude='bads')
evoked.plot(exclude=evoked.info['bads'], spatial_colors=True, gfp=True,
zorder='std')
# test selective updating of dict keys is working.
evoked.plot(hline=[1], units=dict(mag='femto foo'))
evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
evoked_delayed_ssp.plot(proj='interactive')
evoked_delayed_ssp.apply_proj()
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
evoked_delayed_ssp.info['projs'] = []
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive', axes='foo')
plt.close('all')
# test GFP only
evoked.plot(gfp='only')
assert_raises(ValueError, evoked.plot, gfp='foo')
evoked.plot_image(proj=True)
# plot with bad channels excluded
evoked.plot_image(exclude='bads', cmap='interactive')
evoked.plot_image(exclude=evoked.info['bads']) # does the same thing
plt.close('all')
evoked.plot_topo() # should auto-find layout
_butterfly_onselect(0, 200, ['mag', 'grad'], evoked)
plt.close('all')
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
evoked.plot_white(cov)
evoked.plot_white([cov, cov])
# plot_compare_evokeds: test condition contrast, CI, color assignment
plot_compare_evokeds(evoked.copy().pick_types(meg='mag'))
evoked.rename_channels({'MEG 2142': "MEG 1642"})
assert len(plot_compare_evokeds(evoked)) == 2
colors = dict(red='r', blue='b')
linestyles = dict(red='--', blue='-')
red, blue = evoked.copy(), evoked.copy()
red.data *= 1.1
blue.data *= 0.9
plot_compare_evokeds([red, blue], picks=3) # list of evokeds
plot_compare_evokeds([[red, evoked], [blue, evoked]],
picks=3) # list of lists
# test picking & plotting grads
contrast = dict()
contrast["red/stim"] = list((evoked.copy(), red))
contrast["blue/stim"] = list((evoked.copy(), blue))
# test a bunch of params at once
plot_compare_evokeds(contrast, colors=colors, linestyles=linestyles,
picks=[0, 2], vlines=[.01, -.04], invert_y=True,
truncate_yaxis=False, ylim=dict(mag=(-10, 10)),
styles={"red/stim": {"linewidth": 1}})
assert_raises(ValueError, plot_compare_evokeds,
contrast, picks='str') # bad picks: not int
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
colors=dict(fake=1)) # 'fake' not in conds
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
styles=dict(fake=1)) # 'fake' not in conds
assert_raises(ValueError, plot_compare_evokeds, [[1, 2], [3, 4]],
picks=3) # evoked must contain Evokeds
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
styles=dict(err=1)) # bad styles dict
assert_raises(ValueError, plot_compare_evokeds, evoked, picks=3,
gfp=True) # no single-channel GFP
assert_raises(TypeError, plot_compare_evokeds, evoked, picks=3,
ci='fake') # ci must be float or None
contrast["red/stim"] = red
contrast["blue/stim"] = blue
plot_compare_evokeds(contrast, picks=[0], colors=['r', 'b'],
ylim=dict(mag=(1, 10)))
# Hack to test plotting of maxfiltered data
evoked_sss = evoked.copy()
evoked_sss.info['proc_history'] = [dict(max_info=None)]
evoked_sss.plot_white(cov)
evoked_sss.plot_white(cov_fname)
plt.close('all')
evoked.plot_sensors() # Test plot_sensors
plt.close('all')
run_tests_if_main()
|
jniediek/mne-python
|
mne/viz/tests/test_evoked.py
|
Python
|
bsd-3-clause
| 7,380 | 0.000136 |
"""n-body simulator to derive TDV+TTV diagrams of planet-moon configurations.
Credit for part of the source is given to
https://github.com/akuchling/50-examples/blob/master/gravity.rst
Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License
"""
import numpy
import math
import matplotlib.pylab as plt
from modified_turtle import Turtle
from phys_const import *
class Body(Turtle):
"""Subclass of Turtle representing a gravitationally-acting body"""
name = 'Body'
vx = vy = 0.0 # velocities in m/s
px = py = 0.0 # positions in m
def attraction(self, other):
"""(Body): (fx, fy) Returns the force exerted upon this body by the other body"""
# Distance of the other body
sx, sy = self.px, self.py
ox, oy = other.px, other.py
dx = (ox-sx)
dy = (oy-sy)
d = math.sqrt(dx**2 + dy**2)
# Force f and direction to the body
f = G * self.mass * other.mass / (d**2)
theta = math.atan2(dy, dx)
# direction of the force
fx = math.cos(theta) * f
fy = math.sin(theta) * f
return fx, fy
def loop(bodies, orbit_duration):
"""([Body]) Loops and updates the positions of all the provided bodies"""
# Calculate the duration of our simulation: One full orbit of the outer moon
seconds_per_day = 24*60*60
timesteps_per_day = 1000
timestep = seconds_per_day / timesteps_per_day
total_steps = int(orbit_duration / 3600 / 24 * timesteps_per_day)
#print total_steps, orbit_duration / 24 / 60 / 60
for body in bodies:
body.penup()
body.hideturtle()
for step in range(total_steps):
for body in bodies:
if body.name == 'planet':
# Add current position and velocity to our list
tdv_list.append(body.vx)
ttv_list.append(body.px)
force = {}
for body in bodies:
# Add up all of the forces exerted on 'body'
total_fx = total_fy = 0.0
for other in bodies:
# Don't calculate the body's attraction to itself
if body is other:
continue
fx, fy = body.attraction(other)
total_fx += fx
total_fy += fy
# Record the total force exerted
force[body] = (total_fx, total_fy)
# Update velocities based upon on the force
for body in bodies:
fx, fy = force[body]
body.vx += fx / body.mass * timestep
body.vy += fy / body.mass * timestep
# Update positions
body.px += body.vx * timestep
body.py += body.vy * timestep
#body.goto(body.px*SCALE, body.py*SCALE)
#body.dot(3)
def run_sim(R_star, transit_duration, bodies):
"""Run 3-body sim and convert results to TTV + TDV values in [minutes]"""
# Run 3-body sim for one full orbit of the outermost moon
loop(bodies, orbit_duration)
# Move resulting data from lists to numpy arrays
ttv_array = numpy.array([])
ttv_array = ttv_list
tdv_array = numpy.array([])
tdv_array = tdv_list
# Zeropoint correction
middle_point = numpy.amin(ttv_array) + numpy.amax(ttv_array)
ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
ttv_array = numpy.divide(ttv_array, 1000) # km/s
# Compensate for barycenter offset of planet at start of simulation:
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
ttv_array = numpy.divide(ttv_array, stretch_factor)
# Convert to time units, TTV
ttv_array = numpy.divide(ttv_array, R_star)
ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24) # minutes
# Convert to time units, TDV
oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60 # m/sec
newspeed = oldspeed - numpy.amax(tdv_array)
difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
conversion_factor = difference / numpy.amax(tdv_array)
tdv_array = numpy.multiply(tdv_array, conversion_factor)
return ttv_array, tdv_array
"""Main routine"""
# Set variables and constants. Do not change these!
G = 6.67428e-11 # Gravitational constant G
SCALE = 5e-07 # [px/m] Only needed for plotting during nbody-sim
tdv_list = []
ttv_list = []
R_star = 6.96 * 10**5 # [km], solar radius
transit_duration = (2*pi/sqrt(G*(M_sun+M_jup)/a_jup**3)*R_sun/(pi*a_jup)*sqrt((1+R_jup/R_sun)**2))/60/60/24 # transit duration without a moon, Eq. (C1) Kipping (2009b, MNRAS), for q = 0
print transit_duration
planet = Body()
planet.name = 'planet'
planet.mass = M_jup
#semimajor_axis = 1. * AU #[m]
semimajor_axis = a_jup
stellar_mass = M_sun
radius_hill = semimajor_axis * (planet.mass / (3 * (stellar_mass))) ** (1./3)
# Define parameters
firstmoon = Body()
firstmoon.mass = M_gan
firstmoon.px = 0.4218 * 10**9
secondmoon = Body()
secondmoon.mass = M_gan
secondmoon.px = 0.48945554 * 10**9
thirdmoon = Body()
thirdmoon.mass = M_gan
thirdmoon.px = 0.59293316 * 10**9
fourthmoon = Body()
fourthmoon.mass = M_gan
fourthmoon.px = 0.77696224 * 10**9
fithmoon = Body()
fithmoon.mass = M_gan
fithmoon.px = 1.23335068 * 10**9
# Calculate start velocities
firstmoon.vy = math.sqrt(G * planet.mass * (2 / firstmoon.px - 1 / firstmoon.px))
secondmoon.vy = math.sqrt(G * planet.mass * (2 / secondmoon.px - 1 / secondmoon.px))
thirdmoon.vy = math.sqrt(G * planet.mass * (2 / thirdmoon.px - 1 / thirdmoon.px))
fourthmoon.vy = math.sqrt(G * planet.mass * (2 / fourthmoon.px - 1 / fourthmoon.px))
fithmoon.vy = math.sqrt(G * planet.mass * (2 / fithmoon.px - 1 / fithmoon.px))
planet.vy = (-secondmoon.vy * secondmoon.mass - firstmoon.vy * firstmoon.mass) / planet.mass
# Calculate planet displacement. This holds for circular orbits
gravity_firstmoon = (firstmoon.mass / planet.mass) * firstmoon.px
gravity_secondmoon = (secondmoon.mass / planet.mass) * secondmoon.px
gravity_thirdmoon = (thirdmoon.mass / planet.mass) * thirdmoon.px
gravity_fourthmoon = (fourthmoon.mass / planet.mass) * fourthmoon.px
gravity_fithmoon = (fithmoon.mass / planet.mass) * fithmoon.px
planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon + gravity_thirdmoon + gravity_fourthmoon + gravity_fithmoon)
# Use the outermost moon to calculate the length of one full orbit duration
orbit_duration = math.sqrt((4 * math.pi**2 *fithmoon.px ** 3) / (G * (fithmoon.mass + planet.mass)))
orbit_duration = orbit_duration * 1.002
# Run simulation. Make sure to add/remove the moons you want to simulate!
ttv_array, tdv_array = run_sim(
R_star,
transit_duration,
[planet, firstmoon, secondmoon, thirdmoon, fourthmoon, fithmoon])
# Output information
print 'TTV amplitude =', numpy.amax(ttv_array), \
'[min] = ', numpy.amax(ttv_array) * 60, '[sec]'
print 'TDV amplitude =', numpy.amax(tdv_array), \
'[min] = ', numpy.amax(tdv_array) * 60, '[sec]'
ax = plt.axes()
plt.plot(ttv_array, tdv_array, color = 'k')
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('text', usetex=True)
plt.tick_params(axis='both', which='major', labelsize = 16)
plt.xlabel('transit timing variation [minutes]', fontsize = 16)
plt.ylabel('transit duration variation [minutes]', fontsize = 16)
ax.tick_params(direction='out')
plt.ylim([numpy.amin(tdv_array) * 1.2, numpy.amax(tdv_array) * 1.2])
plt.xlim([numpy.amin(ttv_array) * 1.2, numpy.amax(ttv_array) * 1.2])
plt.plot((0, 0), (numpy.amax(tdv_array) * 10., numpy.amin(tdv_array) * 10.), 'k', linewidth=0.5)
plt.plot((numpy.amin(ttv_array) * 10., numpy.amax(ttv_array) * 10.), (0, 0), 'k', linewidth=0.5)
# Fix axes for comparison with eccentric moon
plt.xlim(-0.11, +0.11)
plt.ylim(-0.8, +0.8)
plt.annotate(r"5:4:3:2:1", xy=(-0.105, +0.7), size=16)
plt.savefig("fig_system_22.eps", bbox_inches = 'tight')
|
hippke/TTV-TDV-exomoons
|
create_figures/system_22.py
|
Python
|
mit
| 7,952 | 0.005659 |
import platform
import socket
import sys
from mule_local.JobGeneration import *
from mule.JobPlatformResources import *
from . import JobPlatformAutodetect
import multiprocessing
# Underscore defines symbols to be private
_job_id = None
def _whoami(depth=1):
"""
String of function name to recycle code
https://www.oreilly.com/library/view/python-cookbook/0596001673/ch14s08.html
Returns
-------
string
Return function name
"""
return sys._getframe(depth).f_code.co_name
def p_gen_script_info(jg : JobGeneration):
global _job_id
return """#
# Generating function: """+_whoami(2)+"""
# Platform: """+get_platform_id()+"""
# Job id: """+_job_id+"""
#
"""
def get_platform_autodetect():
"""
Returns
-------
bool
True if current platform matches, otherwise False
"""
return JobPlatformAutodetect.autodetect()
def get_platform_id():
"""
Return platform ID
Returns
-------
string
unique ID of platform
"""
return "ppeixoto_usp_gnu"
def get_platform_resources():
"""
Return information about hardware
"""
h = JobPlatformResources()
h.num_cores_per_node = multiprocessing.cpu_count()
h.num_nodes = 1
# TODO: So far, we only assume a single socket system as a fallback
h.num_cores_per_socket = h.num_cores_per_node
return h
def jobscript_setup(jg : JobGeneration):
"""
Setup data to generate job script
"""
global _job_id
_job_id = jg.runtime.getUniqueID(jg.compile)
return
def jobscript_get_header(jg : JobGeneration):
"""
These headers typically contain the information on e.g. Job exection, number of compute nodes, etc.
Returns
-------
string
multiline text for scripts
"""
content = """#! /bin/bash
"""+p_gen_script_info(jg)+"""
"""
return content
def jobscript_get_exec_prefix(jg : JobGeneration):
"""
Prefix before executable
Returns
-------
string
multiline text for scripts
"""
p = jg.parallelization
content = ""
content += jg.runtime.get_jobscript_plan_exec_prefix(jg.compile, jg.runtime)
if jg.compile.threading != 'off':
content += """
export OMP_NUM_THREADS="""+str(p.num_threads_per_rank)+"""
export OMP_DISPLAY_ENV=VERBOSE
"""
if p.core_oversubscription:
raise Exception("Not supported with this script!")
else:
if p.core_affinity != None:
content += "\necho \"Affnity: "+str(p.core_affinity)+"\"\n"
if p.core_affinity == 'compact':
content += "source $MULE_ROOT/platforms/bin/setup_omp_places.sh nooversubscription close\n"
#content += "\nexport OMP_PROC_BIND=close\n"
elif p.core_affinity == 'scatter':
raise Exception("Affinity '"+str(p.core_affinity)+"' not supported")
content += "\nexport OMP_PROC_BIND=spread\n"
else:
raise Exception("Affinity '"+str(p.core_affinity)+"' not supported")
content += "\n"
return content
def jobscript_get_exec_command(jg : JobGeneration):
"""
Prefix to executable command
Returns
-------
string
multiline text for scripts
"""
p = jg.parallelization
content = """
"""+p_gen_script_info(jg)+"""
# mpiexec ... would be here without a line break
EXEC=\""""+jg.compile.getProgramPath()+"""\"
PARAMS=\""""+jg.runtime.getRuntimeOptions()+"""\"
echo \"${EXEC} ${PARAMS}\"
"""
if jg.compile.sweet_mpi == 'enable':
content += 'mpiexec -n '+str(p.num_ranks)+' '
content += "$EXEC $PARAMS || exit 1"
content += "\n"
content += "\n"
return content
def jobscript_get_exec_suffix(jg : JobGeneration):
"""
Suffix before executable
Returns
-------
string
multiline text for scripts
"""
content = ""
content += jg.runtime.get_jobscript_plan_exec_suffix(jg.compile, jg.runtime)
return content
def jobscript_get_footer(jg : JobGeneration):
"""
Footer at very end of job script
Returns
-------
string
multiline text for scripts
"""
content = """
"""+p_gen_script_info(jg)+"""
"""
return content
def jobscript_get_compile_command(jg : JobGeneration):
"""
Compile command(s)
This is separated here to put it either
* into the job script (handy for workstations)
or
* into a separate compile file (handy for clusters)
Returns
-------
string
multiline text with compile command to generate executable
"""
content = """
SCONS="scons """+jg.compile.getSConsParams()+' -j 4"'+"""
echo "$SCONS"
$SCONS || exit 1
"""
return content
|
schreiberx/sweet
|
mule/platforms/50_ppeixoto_usp_gnu/JobPlatform.py
|
Python
|
mit
| 4,663 | 0.016299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.