repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
karyon/django
|
django/contrib/gis/management/commands/ogrinspect.py
|
369
|
5760
|
import argparse
from django.contrib.gis import gdal
from django.core.management.base import BaseCommand, CommandError
from django.utils.inspect import get_func_args
class LayerOptionAction(argparse.Action):
"""
Custom argparse action for the `ogrinspect` `layer_key` keyword option
which may be an integer or a string.
"""
def __call__(self, parser, namespace, value, option_string=None):
try:
setattr(namespace, self.dest, int(value))
except ValueError:
setattr(namespace, self.dest, value)
class ListOptionAction(argparse.Action):
"""
Custom argparse action for `ogrinspect` keywords that require
a string list. If the string is 'True'/'true' then the option
value will be a boolean instead.
"""
def __call__(self, parser, namespace, value, option_string=None):
if value.lower() == 'true':
setattr(namespace, self.dest, True)
else:
setattr(namespace, self.dest, value.split(','))
class Command(BaseCommand):
help = ('Inspects the given OGR-compatible data source (e.g., a shapefile) and outputs\n'
'a GeoDjango model with the given model name. For example:\n'
' ./manage.py ogrinspect zipcode.shp Zipcode')
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('data_source', help='Path to the data source.')
parser.add_argument('model_name', help='Name of the model to create.')
parser.add_argument('--blank', dest='blank',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `blank=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.')
parser.add_argument('--decimal', dest='decimal',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR float fields to '
'generate `DecimalField` instead of the default '
'`FloatField`. Set to `true` to apply to all OGR float fields.')
parser.add_argument('--geom-name', dest='geom_name', default='geom',
help='Specifies the model name for the Geometry Field '
'(defaults to `geom`)')
parser.add_argument('--layer', dest='layer_key',
action=LayerOptionAction, default=0,
help='The key for specifying which layer in the OGR data '
'source to use. Defaults to 0 (the first layer). May be '
'an integer or a string identifier for the layer.')
parser.add_argument('--multi-geom', action='store_true',
dest='multi_geom', default=False,
help='Treat the geometry in the data source as a geometry collection.')
parser.add_argument('--name-field', dest='name_field',
help='Specifies a field name to return for the `__unicode__`/`__str__` function.')
parser.add_argument('--no-imports', action='store_false', dest='imports', default=True,
help='Do not include `from django.contrib.gis.db import models` statement.')
parser.add_argument('--null', dest='null', action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `null=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.')
parser.add_argument('--srid', dest='srid',
help='The SRID to use for the Geometry Field. If it can be '
'determined, the SRID of the data source is used.')
parser.add_argument('--mapping', action='store_true', dest='mapping',
help='Generate mapping dictionary for use with `LayerMapping`.')
def handle(self, *args, **options):
data_source, model_name = options.pop('data_source'), options.pop('model_name')
if not gdal.HAS_GDAL:
raise CommandError('GDAL is required to inspect geospatial data sources.')
# Getting the OGR DataSource from the string parameter.
try:
ds = gdal.DataSource(data_source)
except gdal.GDALException as msg:
raise CommandError(msg)
# Returning the output of ogrinspect with the given arguments
# and options.
from django.contrib.gis.utils.ogrinspect import _ogrinspect, mapping
# Filter options to params accepted by `_ogrinspect`
ogr_options = {k: v for k, v in options.items()
if k in get_func_args(_ogrinspect) and v is not None}
output = [s for s in _ogrinspect(ds, model_name, **ogr_options)]
if options['mapping']:
# Constructing the keyword arguments for `mapping`, and
# calling it on the data source.
kwargs = {'geom_name': options['geom_name'],
'layer_key': options['layer_key'],
'multi_geom': options['multi_geom'],
}
mapping_dict = mapping(ds, **kwargs)
# This extra legwork is so that the dictionary definition comes
# out in the same order as the fields in the model definition.
rev_mapping = {v: k for k, v in mapping_dict.items()}
output.extend(['', '# Auto-generated `LayerMapping` dictionary for %s model' % model_name,
'%s_mapping = {' % model_name.lower()])
output.extend(" '%s' : '%s'," % (
rev_mapping[ogr_fld], ogr_fld) for ogr_fld in ds[options['layer_key']].fields
)
output.extend([" '%s' : '%s'," % (options['geom_name'], mapping_dict[options['geom_name']]), '}'])
return '\n'.join(output) + '\n'
|
bsd-3-clause
|
upliftaero/MissionPlanner
|
Lib/encodings/cp737.py
|
93
|
35635
|
""" Python Character Mapping Codec cp737 generated from 'VENDORS/MICSFT/PC/CP737.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp737',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x0081: 0x0392, # GREEK CAPITAL LETTER BETA
0x0082: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x0083: 0x0394, # GREEK CAPITAL LETTER DELTA
0x0084: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x0085: 0x0396, # GREEK CAPITAL LETTER ZETA
0x0086: 0x0397, # GREEK CAPITAL LETTER ETA
0x0087: 0x0398, # GREEK CAPITAL LETTER THETA
0x0088: 0x0399, # GREEK CAPITAL LETTER IOTA
0x0089: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x008a: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x008b: 0x039c, # GREEK CAPITAL LETTER MU
0x008c: 0x039d, # GREEK CAPITAL LETTER NU
0x008d: 0x039e, # GREEK CAPITAL LETTER XI
0x008e: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x008f: 0x03a0, # GREEK CAPITAL LETTER PI
0x0090: 0x03a1, # GREEK CAPITAL LETTER RHO
0x0091: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x0092: 0x03a4, # GREEK CAPITAL LETTER TAU
0x0093: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x0094: 0x03a6, # GREEK CAPITAL LETTER PHI
0x0095: 0x03a7, # GREEK CAPITAL LETTER CHI
0x0096: 0x03a8, # GREEK CAPITAL LETTER PSI
0x0097: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x0098: 0x03b1, # GREEK SMALL LETTER ALPHA
0x0099: 0x03b2, # GREEK SMALL LETTER BETA
0x009a: 0x03b3, # GREEK SMALL LETTER GAMMA
0x009b: 0x03b4, # GREEK SMALL LETTER DELTA
0x009c: 0x03b5, # GREEK SMALL LETTER EPSILON
0x009d: 0x03b6, # GREEK SMALL LETTER ZETA
0x009e: 0x03b7, # GREEK SMALL LETTER ETA
0x009f: 0x03b8, # GREEK SMALL LETTER THETA
0x00a0: 0x03b9, # GREEK SMALL LETTER IOTA
0x00a1: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00a2: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00a3: 0x03bc, # GREEK SMALL LETTER MU
0x00a4: 0x03bd, # GREEK SMALL LETTER NU
0x00a5: 0x03be, # GREEK SMALL LETTER XI
0x00a6: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00a7: 0x03c0, # GREEK SMALL LETTER PI
0x00a8: 0x03c1, # GREEK SMALL LETTER RHO
0x00a9: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00aa: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00ab: 0x03c4, # GREEK SMALL LETTER TAU
0x00ac: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00ad: 0x03c6, # GREEK SMALL LETTER PHI
0x00ae: 0x03c7, # GREEK SMALL LETTER CHI
0x00af: 0x03c8, # GREEK SMALL LETTER PSI
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00e1: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x00e2: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x00e3: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x00e4: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00e5: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00e6: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00e7: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00e8: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00e9: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00ea: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x00eb: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x00ec: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x00ed: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x00ee: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x00ef: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x00f0: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x00f5: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0391' # 0x0080 -> GREEK CAPITAL LETTER ALPHA
u'\u0392' # 0x0081 -> GREEK CAPITAL LETTER BETA
u'\u0393' # 0x0082 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0x0083 -> GREEK CAPITAL LETTER DELTA
u'\u0395' # 0x0084 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0x0085 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0x0086 -> GREEK CAPITAL LETTER ETA
u'\u0398' # 0x0087 -> GREEK CAPITAL LETTER THETA
u'\u0399' # 0x0088 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0x0089 -> GREEK CAPITAL LETTER KAPPA
u'\u039b' # 0x008a -> GREEK CAPITAL LETTER LAMDA
u'\u039c' # 0x008b -> GREEK CAPITAL LETTER MU
u'\u039d' # 0x008c -> GREEK CAPITAL LETTER NU
u'\u039e' # 0x008d -> GREEK CAPITAL LETTER XI
u'\u039f' # 0x008e -> GREEK CAPITAL LETTER OMICRON
u'\u03a0' # 0x008f -> GREEK CAPITAL LETTER PI
u'\u03a1' # 0x0090 -> GREEK CAPITAL LETTER RHO
u'\u03a3' # 0x0091 -> GREEK CAPITAL LETTER SIGMA
u'\u03a4' # 0x0092 -> GREEK CAPITAL LETTER TAU
u'\u03a5' # 0x0093 -> GREEK CAPITAL LETTER UPSILON
u'\u03a6' # 0x0094 -> GREEK CAPITAL LETTER PHI
u'\u03a7' # 0x0095 -> GREEK CAPITAL LETTER CHI
u'\u03a8' # 0x0096 -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0x0097 -> GREEK CAPITAL LETTER OMEGA
u'\u03b1' # 0x0098 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0x0099 -> GREEK SMALL LETTER BETA
u'\u03b3' # 0x009a -> GREEK SMALL LETTER GAMMA
u'\u03b4' # 0x009b -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0x009c -> GREEK SMALL LETTER EPSILON
u'\u03b6' # 0x009d -> GREEK SMALL LETTER ZETA
u'\u03b7' # 0x009e -> GREEK SMALL LETTER ETA
u'\u03b8' # 0x009f -> GREEK SMALL LETTER THETA
u'\u03b9' # 0x00a0 -> GREEK SMALL LETTER IOTA
u'\u03ba' # 0x00a1 -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0x00a2 -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0x00a3 -> GREEK SMALL LETTER MU
u'\u03bd' # 0x00a4 -> GREEK SMALL LETTER NU
u'\u03be' # 0x00a5 -> GREEK SMALL LETTER XI
u'\u03bf' # 0x00a6 -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0x00a7 -> GREEK SMALL LETTER PI
u'\u03c1' # 0x00a8 -> GREEK SMALL LETTER RHO
u'\u03c3' # 0x00a9 -> GREEK SMALL LETTER SIGMA
u'\u03c2' # 0x00aa -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c4' # 0x00ab -> GREEK SMALL LETTER TAU
u'\u03c5' # 0x00ac -> GREEK SMALL LETTER UPSILON
u'\u03c6' # 0x00ad -> GREEK SMALL LETTER PHI
u'\u03c7' # 0x00ae -> GREEK SMALL LETTER CHI
u'\u03c8' # 0x00af -> GREEK SMALL LETTER PSI
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03c9' # 0x00e0 -> GREEK SMALL LETTER OMEGA
u'\u03ac' # 0x00e1 -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u03ad' # 0x00e2 -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0x00e3 -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03ca' # 0x00e4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03af' # 0x00e5 -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03cc' # 0x00e6 -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u03cd' # 0x00e7 -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03cb' # 0x00e8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u03ce' # 0x00e9 -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\u0386' # 0x00ea -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\u0388' # 0x00eb -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0389' # 0x00ec -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0x00ed -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\u038c' # 0x00ee -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\u038e' # 0x00ef -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u038f' # 0x00f0 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u03aa' # 0x00f4 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\u03ab' # 0x00f5 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b7: 0x00fa, # MIDDLE DOT
0x00f7: 0x00f6, # DIVISION SIGN
0x0386: 0x00ea, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x0388: 0x00eb, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x0389: 0x00ec, # GREEK CAPITAL LETTER ETA WITH TONOS
0x038a: 0x00ed, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x038c: 0x00ee, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x038e: 0x00ef, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x038f: 0x00f0, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x0391: 0x0080, # GREEK CAPITAL LETTER ALPHA
0x0392: 0x0081, # GREEK CAPITAL LETTER BETA
0x0393: 0x0082, # GREEK CAPITAL LETTER GAMMA
0x0394: 0x0083, # GREEK CAPITAL LETTER DELTA
0x0395: 0x0084, # GREEK CAPITAL LETTER EPSILON
0x0396: 0x0085, # GREEK CAPITAL LETTER ZETA
0x0397: 0x0086, # GREEK CAPITAL LETTER ETA
0x0398: 0x0087, # GREEK CAPITAL LETTER THETA
0x0399: 0x0088, # GREEK CAPITAL LETTER IOTA
0x039a: 0x0089, # GREEK CAPITAL LETTER KAPPA
0x039b: 0x008a, # GREEK CAPITAL LETTER LAMDA
0x039c: 0x008b, # GREEK CAPITAL LETTER MU
0x039d: 0x008c, # GREEK CAPITAL LETTER NU
0x039e: 0x008d, # GREEK CAPITAL LETTER XI
0x039f: 0x008e, # GREEK CAPITAL LETTER OMICRON
0x03a0: 0x008f, # GREEK CAPITAL LETTER PI
0x03a1: 0x0090, # GREEK CAPITAL LETTER RHO
0x03a3: 0x0091, # GREEK CAPITAL LETTER SIGMA
0x03a4: 0x0092, # GREEK CAPITAL LETTER TAU
0x03a5: 0x0093, # GREEK CAPITAL LETTER UPSILON
0x03a6: 0x0094, # GREEK CAPITAL LETTER PHI
0x03a7: 0x0095, # GREEK CAPITAL LETTER CHI
0x03a8: 0x0096, # GREEK CAPITAL LETTER PSI
0x03a9: 0x0097, # GREEK CAPITAL LETTER OMEGA
0x03aa: 0x00f4, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x03ab: 0x00f5, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x03ac: 0x00e1, # GREEK SMALL LETTER ALPHA WITH TONOS
0x03ad: 0x00e2, # GREEK SMALL LETTER EPSILON WITH TONOS
0x03ae: 0x00e3, # GREEK SMALL LETTER ETA WITH TONOS
0x03af: 0x00e5, # GREEK SMALL LETTER IOTA WITH TONOS
0x03b1: 0x0098, # GREEK SMALL LETTER ALPHA
0x03b2: 0x0099, # GREEK SMALL LETTER BETA
0x03b3: 0x009a, # GREEK SMALL LETTER GAMMA
0x03b4: 0x009b, # GREEK SMALL LETTER DELTA
0x03b5: 0x009c, # GREEK SMALL LETTER EPSILON
0x03b6: 0x009d, # GREEK SMALL LETTER ZETA
0x03b7: 0x009e, # GREEK SMALL LETTER ETA
0x03b8: 0x009f, # GREEK SMALL LETTER THETA
0x03b9: 0x00a0, # GREEK SMALL LETTER IOTA
0x03ba: 0x00a1, # GREEK SMALL LETTER KAPPA
0x03bb: 0x00a2, # GREEK SMALL LETTER LAMDA
0x03bc: 0x00a3, # GREEK SMALL LETTER MU
0x03bd: 0x00a4, # GREEK SMALL LETTER NU
0x03be: 0x00a5, # GREEK SMALL LETTER XI
0x03bf: 0x00a6, # GREEK SMALL LETTER OMICRON
0x03c0: 0x00a7, # GREEK SMALL LETTER PI
0x03c1: 0x00a8, # GREEK SMALL LETTER RHO
0x03c2: 0x00aa, # GREEK SMALL LETTER FINAL SIGMA
0x03c3: 0x00a9, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00ab, # GREEK SMALL LETTER TAU
0x03c5: 0x00ac, # GREEK SMALL LETTER UPSILON
0x03c6: 0x00ad, # GREEK SMALL LETTER PHI
0x03c7: 0x00ae, # GREEK SMALL LETTER CHI
0x03c8: 0x00af, # GREEK SMALL LETTER PSI
0x03c9: 0x00e0, # GREEK SMALL LETTER OMEGA
0x03ca: 0x00e4, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x03cb: 0x00e8, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x03cc: 0x00e6, # GREEK SMALL LETTER OMICRON WITH TONOS
0x03cd: 0x00e7, # GREEK SMALL LETTER UPSILON WITH TONOS
0x03ce: 0x00e9, # GREEK SMALL LETTER OMEGA WITH TONOS
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
gpl-3.0
|
davidsminor/gaffer
|
python/GafferSceneUI/ExecutableRenderUI.py
|
1
|
2102
|
##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import fnmatch
import GafferUI
import GafferScene
GafferUI.Nodule.registerNodule( GafferScene.ExecutableRender.staticTypeId(), fnmatch.translate( "*" ), lambda plug : None )
GafferUI.Nodule.registerNodule( GafferScene.ExecutableRender.staticTypeId(), "in", GafferUI.StandardNodule )
|
bsd-3-clause
|
FilipeMaia/arrayfire-python
|
arrayfire/array.py
|
1
|
30063
|
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
"""
arrayfire.Array class and helper functions.
"""
import inspect
from .library import *
from .util import *
from .util import _is_number
from .bcast import _bcast_var
from .base import *
from .index import *
from .index import _Index4
def _create_array(buf, numdims, idims, dtype):
out_arr = ct.c_void_p(0)
c_dims = dim4(idims[0], idims[1], idims[2], idims[3])
safe_call(backend.get().af_create_array(ct.pointer(out_arr), ct.c_void_p(buf),
numdims, ct.pointer(c_dims), dtype.value))
return out_arr
def _create_empty_array(numdims, idims, dtype):
out_arr = ct.c_void_p(0)
c_dims = dim4(idims[0], idims[1], idims[2], idims[3])
safe_call(backend.get().af_create_handle(ct.pointer(out_arr),
numdims, ct.pointer(c_dims), dtype.value))
return out_arr
def constant_array(val, d0, d1=None, d2=None, d3=None, dtype=Dtype.f32):
"""
Internal function to create a C array. Should not be used externall.
"""
if not isinstance(dtype, ct.c_int):
if isinstance(dtype, int):
dtype = ct.c_int(dtype)
elif isinstance(dtype, Dtype):
dtype = ct.c_int(dtype.value)
else:
raise TypeError("Invalid dtype")
out = ct.c_void_p(0)
dims = dim4(d0, d1, d2, d3)
if isinstance(val, complex):
c_real = ct.c_double(val.real)
c_imag = ct.c_double(val.imag)
if (dtype.value != Dtype.c32.value and dtype.value != Dtype.c64.value):
dtype = Dtype.c32.value
safe_call(backend.get().af_constant_complex(ct.pointer(out), c_real, c_imag,
4, ct.pointer(dims), dtype))
elif dtype.value == Dtype.s64.value:
c_val = ct.c_longlong(val.real)
safe_call(backend.get().af_constant_long(ct.pointer(out), c_val, 4, ct.pointer(dims)))
elif dtype.value == Dtype.u64.value:
c_val = ct.c_ulonglong(val.real)
safe_call(backend.get().af_constant_ulong(ct.pointer(out), c_val, 4, ct.pointer(dims)))
else:
c_val = ct.c_double(val)
safe_call(backend.get().af_constant(ct.pointer(out), c_val, 4, ct.pointer(dims), dtype))
return out
def _binary_func(lhs, rhs, c_func):
out = Array()
other = rhs
if (_is_number(rhs)):
ldims = dim4_to_tuple(lhs.dims())
rty = implicit_dtype(rhs, lhs.type())
other = Array()
other.arr = constant_array(rhs, ldims[0], ldims[1], ldims[2], ldims[3], rty.value)
elif not isinstance(rhs, Array):
raise TypeError("Invalid parameter to binary function")
safe_call(c_func(ct.pointer(out.arr), lhs.arr, other.arr, _bcast_var.get()))
return out
def _binary_funcr(lhs, rhs, c_func):
out = Array()
other = lhs
if (_is_number(lhs)):
rdims = dim4_to_tuple(rhs.dims())
lty = implicit_dtype(lhs, rhs.type())
other = Array()
other.arr = constant_array(lhs, rdims[0], rdims[1], rdims[2], rdims[3], lty.value)
elif not isinstance(lhs, Array):
raise TypeError("Invalid parameter to binary function")
c_func(ct.pointer(out.arr), other.arr, rhs.arr, _bcast_var.get())
return out
def _ctype_to_lists(ctype_arr, dim, shape, offset=0):
if (dim == 0):
return list(ctype_arr[offset : offset + shape[0]])
else:
dim_len = shape[dim]
res = [[]] * dim_len
for n in range(dim_len):
res[n] = _ctype_to_lists(ctype_arr, dim - 1, shape, offset)
offset += shape[0]
return res
def _slice_to_length(key, dim):
tkey = [key.start, key.stop, key.step]
if tkey[0] is None:
tkey[0] = 0
elif tkey[0] < 0:
tkey[0] = dim - tkey[0]
if tkey[1] is None:
tkey[1] = dim
elif tkey[1] < 0:
tkey[1] = dim - tkey[1]
if tkey[2] is None:
tkey[2] = 1
return int(((tkey[1] - tkey[0] - 1) / tkey[2]) + 1)
def _get_info(dims, buf_len):
elements = 1
numdims = len(dims)
idims = [1]*4
for i in range(numdims):
elements *= dims[i]
idims[i] = dims[i]
if (elements == 0):
if (buf_len != 0):
idims = [buf_len, 1, 1, 1]
numdims = 1
else:
raise RuntimeError("Invalid size")
return numdims, idims
def _get_indices(key):
S = Index(slice(None))
inds = _Index4(S, S, S, S)
if isinstance(key, tuple):
n_idx = len(key)
for n in range(n_idx):
inds[n] = Index(key[n])
else:
inds[0] = Index(key)
return inds
def _get_assign_dims(key, idims):
dims = [1]*4
for n in range(len(idims)):
dims[n] = idims[n]
if _is_number(key):
dims[0] = 1
return dims
elif isinstance(key, slice):
dims[0] = _slice_to_length(key, idims[0])
return dims
elif isinstance(key, ParallelRange):
dims[0] = _slice_to_length(key.S, idims[0])
return dims
elif isinstance(key, BaseArray):
# If the array is boolean take only the number of nonzeros
if(key.dtype() is Dtype.b8):
dims[0] = int(sum(key))
else:
dims[0] = key.elements()
return dims
elif isinstance(key, tuple):
n_inds = len(key)
for n in range(n_inds):
if (_is_number(key[n])):
dims[n] = 1
elif (isinstance(key[n], BaseArray)):
# If the array is boolean take only the number of nonzeros
if(key[n].dtype() is Dtype.b8):
dims[n] = int(sum(key[n]))
else:
dims[n] = key[n].elements()
elif (isinstance(key[n], slice)):
dims[n] = _slice_to_length(key[n], idims[n])
elif (isinstance(key[n], ParallelRange)):
dims[n] = _slice_to_length(key[n].S, idims[n])
else:
raise IndexError("Invalid type while assigning to arrayfire.array")
return dims
else:
raise IndexError("Invalid type while assigning to arrayfire.array")
def transpose(a, conj=False):
"""
Perform the transpose on an input.
Parameters
-----------
a : af.Array
Multi dimensional arrayfire array.
conj : optional: bool. default: False.
Flag to specify if a complex conjugate needs to applied for complex inputs.
Returns
--------
out : af.Array
Containing the tranpose of `a` for all batches.
"""
out = Array()
safe_call(backend.get().af_transpose(ct.pointer(out.arr), a.arr, conj))
return out
def transpose_inplace(a, conj=False):
"""
Perform inplace transpose on an input.
Parameters
-----------
a : af.Array
- Multi dimensional arrayfire array.
- Contains transposed values on exit.
conj : optional: bool. default: False.
Flag to specify if a complex conjugate needs to applied for complex inputs.
Note
-------
Input `a` needs to be a square matrix or a batch of square matrices.
"""
safe_call(backend.get().af_transpose_inplace(a.arr, conj))
class Array(BaseArray):
"""
A multi dimensional array container.
Parameters
----------
src : optional: array.array, list or C buffer. default: None.
- When `src` is `array.array` or `list`, the data is copied to create the Array()
- When `src` is None, an empty buffer is created.
dims : optional: tuple of ints. default: (0,)
- When using the default values of `dims`, the dims are caclulated as `len(src)`
dtype: optional: str or arrayfire.Dtype. default: None.
- if str, must be one of the following:
- 'f' for float
- 'd' for double
- 'b' for bool
- 'B' for unsigned char
- 'i' for signed 32 bit integer
- 'I' for unsigned 32 bit integer
- 'l' for signed 64 bit integer
- 'L' for unsigned 64 bit integer
- 'F' for 32 bit complex number
- 'D' for 64 bit complex number
- if arrayfire.Dtype, must be one of the following:
- Dtype.f32 for float
- Dtype.f64 for double
- Dtype.b8 for bool
- Dtype.u8 for unsigned char
- Dtype.s32 for signed 32 bit integer
- Dtype.u32 for unsigned 32 bit integer
- Dtype.s64 for signed 64 bit integer
- Dtype.u64 for unsigned 64 bit integer
- Dtype.c32 for 32 bit complex number
- Dtype.c64 for 64 bit complex number
- if None, Dtype.f32 is assumed
Attributes
-----------
arr: ctypes.c_void_p
ctypes variable containing af_array from arrayfire library.
Examples
--------
Creating an af.Array() from array.array()
>>> import arrayfire as af
>>> import array
>>> a = array.array('f', (1, 2, 3, 4))
>>> b = af.Array(a, (2,2))
>>> af.display(b)
[2 2 1 1]
1.0000 3.0000
2.0000 4.0000
Creating an af.Array() from a list
>>> import arrayfire as af
>>> import array
>>> a = [1, 2, 3, 4]
>>> b = af.Array(a)
>>> af.display(b)
[4 1 1 1]
1.0000
2.0000
3.0000
4.0000
Creating an af.Array() from numpy.array()
>>> import numpy as np
>>> import arrayfire as af
>>> a = np.random.random((2,2))
>>> a
array([[ 0.33042524, 0.36135449],
[ 0.86748649, 0.42199135]])
>>> b = af.Array(a.ctypes.data, a.shape, a.dtype.char)
>>> af.display(b)
[2 2 1 1]
0.3304 0.8675
0.3614 0.4220
Note
-----
- The class is currently limited to 4 dimensions.
- arrayfire.Array() uses column major format.
- numpy uses row major format by default which can cause issues during conversion
"""
def __init__(self, src=None, dims=(0,), dtype=None):
super(Array, self).__init__()
buf=None
buf_len=0
if dtype is not None:
if isinstance(dtype, str):
type_char = dtype
else:
type_char = to_typecode[dtype.value]
else:
type_char = None
_type_char='f'
backend.lock()
if src is not None:
if (isinstance(src, Array)):
safe_call(backend.get().af_retain_array(ct.pointer(self.arr), src.arr))
return
host = __import__("array")
if isinstance(src, host.array):
buf,buf_len = src.buffer_info()
_type_char = src.typecode
numdims, idims = _get_info(dims, buf_len)
elif isinstance(src, list):
tmp = host.array('f', src)
buf,buf_len = tmp.buffer_info()
_type_char = tmp.typecode
numdims, idims = _get_info(dims, buf_len)
elif isinstance(src, int) or isinstance(src, ct.c_void_p):
buf = src
numdims, idims = _get_info(dims, buf_len)
elements = 1
for dim in idims:
elements *= dim
if (elements == 0):
raise RuntimeError("Expected dims when src is data pointer")
if (type_char is None):
raise TypeError("Expected type_char when src is data pointer")
_type_char = type_char
else:
raise TypeError("src is an object of unsupported class")
if (type_char is not None and
type_char != _type_char):
raise TypeError("Can not create array of requested type from input data type")
self.arr = _create_array(buf, numdims, idims, to_dtype[_type_char])
else:
if type_char is None:
type_char = 'f'
numdims = len(dims)
idims = [1] * 4
for n in range(numdims):
idims[n] = dims[n]
self.arr = _create_empty_array(numdims, idims, to_dtype[type_char])
def copy(self):
"""
Performs a deep copy of the array.
Returns
-------
out: af.Array()
An identical copy of self.
"""
out = Array()
safe_call(backend.get().af_copy_array(ct.pointer(out.arr), self.arr))
return out
def __del__(self):
"""
Release the C array when going out of scope
"""
if self.arr.value:
backend.get().af_release_array(self.arr)
def device_ptr(self):
"""
Return the device pointer held by the array.
Returns
------
ptr : int
Contains location of the device pointer
Note
----
- This can be used to integrate with custom C code and / or PyCUDA or PyOpenCL.
- No mem copy is peformed, this function returns the raw device pointer.
"""
ptr = ct.c_void_p(0)
backend.get().af_get_device_ptr(ct.pointer(ptr), self.arr)
return ptr.value
def elements(self):
"""
Return the number of elements in the array.
"""
num = ct.c_ulonglong(0)
safe_call(backend.get().af_get_elements(ct.pointer(num), self.arr))
return num.value
def dtype(self):
"""
Return the data type as a arrayfire.Dtype enum value.
"""
dty = ct.c_int(Dtype.f32.value)
safe_call(backend.get().af_get_type(ct.pointer(dty), self.arr))
return to_dtype[typecodes[dty.value]]
def type(self):
"""
Return the data type as an int.
"""
return self.dtype().value
def dims(self):
"""
Return the shape of the array as a tuple.
"""
d0 = ct.c_longlong(0)
d1 = ct.c_longlong(0)
d2 = ct.c_longlong(0)
d3 = ct.c_longlong(0)
safe_call(backend.get().af_get_dims(ct.pointer(d0), ct.pointer(d1),
ct.pointer(d2), ct.pointer(d3), self.arr))
dims = (d0.value,d1.value,d2.value,d3.value)
return dims[:self.numdims()]
def numdims(self):
"""
Return the number of dimensions of the array.
"""
nd = ct.c_uint(0)
safe_call(backend.get().af_get_numdims(ct.pointer(nd), self.arr))
return nd.value
def is_empty(self):
"""
Check if the array is empty i.e. it has no elements.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_empty(ct.pointer(res), self.arr))
return res.value
def is_scalar(self):
"""
Check if the array is scalar i.e. it has only one element.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_scalar(ct.pointer(res), self.arr))
return res.value
def is_row(self):
"""
Check if the array is a row i.e. it has a shape of (1, cols).
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_row(ct.pointer(res), self.arr))
return res.value
def is_column(self):
"""
Check if the array is a column i.e. it has a shape of (rows, 1).
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_column(ct.pointer(res), self.arr))
return res.value
def is_vector(self):
"""
Check if the array is a vector i.e. it has a shape of one of the following:
- (rows, 1)
- (1, cols)
- (1, 1, vols)
- (1, 1, 1, batch)
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_vector(ct.pointer(res), self.arr))
return res.value
def is_complex(self):
"""
Check if the array is of complex type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_complex(ct.pointer(res), self.arr))
return res.value
def is_real(self):
"""
Check if the array is not of complex type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_real(ct.pointer(res), self.arr))
return res.value
def is_double(self):
"""
Check if the array is of double precision floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_double(ct.pointer(res), self.arr))
return res.value
def is_single(self):
"""
Check if the array is of single precision floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_single(ct.pointer(res), self.arr))
return res.value
def is_real_floating(self):
"""
Check if the array is real and of floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_realfloating(ct.pointer(res), self.arr))
return res.value
def is_floating(self):
"""
Check if the array is of floating point type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_floating(ct.pointer(res), self.arr))
return res.value
def is_integer(self):
"""
Check if the array is of integer type.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_integer(ct.pointer(res), self.arr))
return res.value
def is_bool(self):
"""
Check if the array is of type b8.
"""
res = ct.c_bool(False)
safe_call(backend.get().af_is_bool(ct.pointer(res), self.arr))
return res.value
def __add__(self, other):
"""
Return self + other.
"""
return _binary_func(self, other, backend.get().af_add)
def __iadd__(self, other):
"""
Perform self += other.
"""
self = _binary_func(self, other, backend.get().af_add)
return self
def __radd__(self, other):
"""
Return other + self.
"""
return _binary_funcr(other, self, backend.get().af_add)
def __sub__(self, other):
"""
Return self - other.
"""
return _binary_func(self, other, backend.get().af_sub)
def __isub__(self, other):
"""
Perform self -= other.
"""
self = _binary_func(self, other, backend.get().af_sub)
return self
def __rsub__(self, other):
"""
Return other - self.
"""
return _binary_funcr(other, self, backend.get().af_sub)
def __mul__(self, other):
"""
Return self * other.
"""
return _binary_func(self, other, backend.get().af_mul)
def __imul__(self, other):
"""
Perform self *= other.
"""
self = _binary_func(self, other, backend.get().af_mul)
return self
def __rmul__(self, other):
"""
Return other * self.
"""
return _binary_funcr(other, self, backend.get().af_mul)
def __truediv__(self, other):
"""
Return self / other.
"""
return _binary_func(self, other, backend.get().af_div)
def __itruediv__(self, other):
"""
Perform self /= other.
"""
self = _binary_func(self, other, backend.get().af_div)
return self
def __rtruediv__(self, other):
"""
Return other / self.
"""
return _binary_funcr(other, self, backend.get().af_div)
def __div__(self, other):
"""
Return self / other.
"""
return _binary_func(self, other, backend.get().af_div)
def __idiv__(self, other):
"""
Perform other / self.
"""
self = _binary_func(self, other, backend.get().af_div)
return self
def __rdiv__(self, other):
"""
Return other / self.
"""
return _binary_funcr(other, self, backend.get().af_div)
def __mod__(self, other):
"""
Return self % other.
"""
return _binary_func(self, other, backend.get().af_mod)
def __imod__(self, other):
"""
Perform self %= other.
"""
self = _binary_func(self, other, backend.get().af_mod)
return self
def __rmod__(self, other):
"""
Return other % self.
"""
return _binary_funcr(other, self, backend.get().af_mod)
def __pow__(self, other):
"""
Return self ** other.
"""
return _binary_func(self, other, backend.get().af_pow)
def __ipow__(self, other):
"""
Perform self **= other.
"""
self = _binary_func(self, other, backend.get().af_pow)
return self
def __rpow__(self, other):
"""
Return other ** self.
"""
return _binary_funcr(other, self, backend.get().af_pow)
def __lt__(self, other):
"""
Return self < other.
"""
return _binary_func(self, other, backend.get().af_lt)
def __gt__(self, other):
"""
Return self > other.
"""
return _binary_func(self, other, backend.get().af_gt)
def __le__(self, other):
"""
Return self <= other.
"""
return _binary_func(self, other, backend.get().af_le)
def __ge__(self, other):
"""
Return self >= other.
"""
return _binary_func(self, other, backend.get().af_ge)
def __eq__(self, other):
"""
Return self == other.
"""
return _binary_func(self, other, backend.get().af_eq)
def __ne__(self, other):
"""
Return self != other.
"""
return _binary_func(self, other, backend.get().af_neq)
def __and__(self, other):
"""
Return self & other.
"""
return _binary_func(self, other, backend.get().af_bitand)
def __iand__(self, other):
"""
Perform self &= other.
"""
self = _binary_func(self, other, backend.get().af_bitand)
return self
def __or__(self, other):
"""
Return self | other.
"""
return _binary_func(self, other, backend.get().af_bitor)
def __ior__(self, other):
"""
Perform self |= other.
"""
self = _binary_func(self, other, backend.get().af_bitor)
return self
def __xor__(self, other):
"""
Return self ^ other.
"""
return _binary_func(self, other, backend.get().af_bitxor)
def __ixor__(self, other):
"""
Perform self ^= other.
"""
self = _binary_func(self, other, backend.get().af_bitxor)
return self
def __lshift__(self, other):
"""
Return self << other.
"""
return _binary_func(self, other, backend.get().af_bitshiftl)
def __ilshift__(self, other):
"""
Perform self <<= other.
"""
self = _binary_func(self, other, backend.get().af_bitshiftl)
return self
def __rshift__(self, other):
"""
Return self >> other.
"""
return _binary_func(self, other, backend.get().af_bitshiftr)
def __irshift__(self, other):
"""
Perform self >>= other.
"""
self = _binary_func(self, other, backend.get().af_bitshiftr)
return self
def __neg__(self):
"""
Return -self
"""
return 0 - self
def __pos__(self):
"""
Return +self
"""
return self
def __invert__(self):
"""
Return ~self
"""
return self == 0
def __nonzero__(self):
return self != 0
# TODO:
# def __abs__(self):
# return self
def __getitem__(self, key):
"""
Return self[key]
Note
----
Ellipsis not supported as key
"""
try:
out = Array()
n_dims = self.numdims()
inds = _get_indices(key)
safe_call(backend.get().af_index_gen(ct.pointer(out.arr),
self.arr, ct.c_longlong(n_dims), inds.pointer))
return out
except RuntimeError as e:
raise IndexError(str(e))
def __setitem__(self, key, val):
"""
Perform self[key] = val
Note
----
Ellipsis not supported as key
"""
try:
n_dims = self.numdims()
if (_is_number(val)):
tdims = _get_assign_dims(key, self.dims())
other_arr = constant_array(val, tdims[0], tdims[1], tdims[2], tdims[3], self.type())
del_other = True
else:
other_arr = val.arr
del_other = False
out_arr = ct.c_void_p(0)
inds = _get_indices(key)
safe_call(backend.get().af_assign_gen(ct.pointer(out_arr),
self.arr, ct.c_longlong(n_dims), inds.pointer,
other_arr))
safe_call(backend.get().af_release_array(self.arr))
if del_other:
safe_call(backend.get().af_release_array(other_arr))
self.arr = out_arr
except RuntimeError as e:
raise IndexError(str(e))
def to_ctype(self, row_major=False, return_shape=False):
"""
Return the data as a ctype C array after copying to host memory
Parameters
---------
row_major: optional: bool. default: False.
Specifies if a transpose needs to occur before copying to host memory.
return_shape: optional: bool. default: False.
Specifies if the shape of the array needs to be returned.
Returns
-------
If return_shape is False:
res: The ctypes array of the appropriate type and length.
else :
(res, dims): tuple of the ctypes array and the shape of the array
"""
if (self.arr.value == 0):
raise RuntimeError("Can not call to_ctype on empty array")
tmp = transpose(self) if row_major else self
ctype_type = to_c_type[self.type()] * self.elements()
res = ctype_type()
safe_call(backend.get().af_get_data_ptr(ct.pointer(res), self.arr))
if (return_shape):
return res, self.dims()
else:
return res
def to_array(self, row_major=False, return_shape=False):
"""
Return the data as array.array
Parameters
---------
row_major: optional: bool. default: False.
Specifies if a transpose needs to occur before copying to host memory.
return_shape: optional: bool. default: False.
Specifies if the shape of the array needs to be returned.
Returns
-------
If return_shape is False:
res: array.array of the appropriate type and length.
else :
(res, dims): array.array and the shape of the array
"""
if (self.arr.value == 0):
raise RuntimeError("Can not call to_array on empty array")
res = self.to_ctype(row_major, return_shape)
host = __import__("array")
h_type = to_typecode[self.type()]
if (return_shape):
return host.array(h_type, res[0]), res[1]
else:
return host.array(h_type, res)
def to_list(self, row_major=False):
"""
Return the data as list
Parameters
---------
row_major: optional: bool. default: False.
Specifies if a transpose needs to occur before copying to host memory.
return_shape: optional: bool. default: False.
Specifies if the shape of the array needs to be returned.
Returns
-------
If return_shape is False:
res: list of the appropriate type and length.
else :
(res, dims): list and the shape of the array
"""
ct_array, shape = self.to_ctype(row_major, True)
return _ctype_to_lists(ct_array, len(shape) - 1, shape)
def __repr__(self):
"""
Displays the meta data of the arrayfire array.
Note
----
Use arrayfire.display(a) to display the contents of the array.
"""
# Having __repr__ directly print things is a bad idea
# Placeholder for when af_array_to_string is available
# safe_call(backend.get().af_array_to_string...
return 'Type: arrayfire.Array()\nShape: %s\nType char: %s' % \
(self.dims(), to_typecode[self.type()])
def __array__(self):
"""
Constructs a numpy.array from arrayfire.Array
"""
import numpy as np
res = np.empty(self.dims(), dtype=np.dtype(to_typecode[self.type()]), order='F')
safe_call(backend.get().af_get_data_ptr(ct.c_void_p(res.ctypes.data), self.arr))
return res
def display(a):
"""
Displays the contents of an array.
Parameters
----------
a : af.Array
Multi dimensional arrayfire array
"""
expr = inspect.stack()[1][-2]
try:
if (expr is not None):
st = expr[0].find('(') + 1
en = expr[0].rfind(')')
print('%s' % expr[0][st:en])
safe_call(backend.get().af_print_array(a.arr))
except:
safe_call(backend.get().af_print_array(a.arr))
from .algorithm import sum
|
bsd-3-clause
|
wolfram74/numerical_methods_iserles_notes
|
venv/lib/python2.7/site-packages/sympy/physics/secondquant.py
|
14
|
90708
|
"""
Second quantization operators and states for bosons.
This follow the formulation of Fetter and Welecka, "Quantum Theory
of Many-Particle Systems."
"""
from __future__ import print_function, division
from collections import defaultdict
from sympy import (Add, Basic, cacheit, Dummy, Expr, Function, I,
KroneckerDelta, Mul, Pow, S, sqrt, Symbol, sympify, Tuple,
zeros)
from sympy.core.compatibility import reduce, xrange
from sympy.printing.str import StrPrinter
from sympy.physics.quantum.qexpr import split_commutative_parts
from sympy.core.compatibility import reduce
from sympy.utilities.iterables import has_dups
from sympy.utilities import default_sort_key
__all__ = [
'Dagger',
'KroneckerDelta',
'BosonicOperator',
'AnnihilateBoson',
'CreateBoson',
'AnnihilateFermion',
'CreateFermion',
'FockState',
'FockStateBra',
'FockStateKet',
'FockStateBosonKet',
'FockStateBosonBra',
'BBra',
'BKet',
'FBra',
'FKet',
'F',
'Fd',
'B',
'Bd',
'apply_operators',
'InnerProduct',
'BosonicBasis',
'VarBosonicBasis',
'FixedBosonicBasis',
'Commutator',
'matrix_rep',
'contraction',
'wicks',
'NO',
'evaluate_deltas',
'AntiSymmetricTensor',
'substitute_dummies',
'PermutationOperator',
'simplify_index_permutations',
]
class SecondQuantizationError(Exception):
pass
class AppliesOnlyToSymbolicIndex(SecondQuantizationError):
pass
class ContractionAppliesOnlyToFermions(SecondQuantizationError):
pass
class ViolationOfPauliPrinciple(SecondQuantizationError):
pass
class SubstitutionOfAmbigousOperatorFailed(SecondQuantizationError):
pass
class WicksTheoremDoesNotApply(SecondQuantizationError):
pass
class Dagger(Expr):
"""
Hermitian conjugate of creation/annihilation operators.
Examples
========
>>> from sympy import I
>>> from sympy.physics.secondquant import Dagger, B, Bd
>>> Dagger(2*I)
-2*I
>>> Dagger(B(0))
CreateBoson(0)
>>> Dagger(Bd(0))
AnnihilateBoson(0)
"""
def __new__(cls, arg):
arg = sympify(arg)
r = cls.eval(arg)
if isinstance(r, Basic):
return r
obj = Basic.__new__(cls, arg)
return obj
@classmethod
def eval(cls, arg):
"""
Evaluates the Dagger instance.
Examples
========
>>> from sympy import I
>>> from sympy.physics.secondquant import Dagger, B, Bd
>>> Dagger(2*I)
-2*I
>>> Dagger(B(0))
CreateBoson(0)
>>> Dagger(Bd(0))
AnnihilateBoson(0)
The eval() method is called automatically.
"""
try:
d = arg._dagger_()
except AttributeError:
if isinstance(arg, Basic):
if arg.is_Add:
return Add(*tuple(map(Dagger, arg.args)))
if arg.is_Mul:
return Mul(*tuple(map(Dagger, reversed(arg.args))))
if arg.is_Number:
return arg
if arg.is_Pow:
return Pow(Dagger(arg.args[0]), arg.args[1])
if arg == I:
return -arg
else:
return None
else:
return d
def _dagger_(self):
return self.args[0]
class TensorSymbol(Expr):
is_commutative = True
class AntiSymmetricTensor(TensorSymbol):
"""Stores upper and lower indices in separate Tuple's.
Each group of indices is assumed to be antisymmetric.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i j', below_fermi=True)
>>> a, b = symbols('a b', above_fermi=True)
>>> AntiSymmetricTensor('v', (a, i), (b, j))
AntiSymmetricTensor(v, (a, i), (b, j))
>>> AntiSymmetricTensor('v', (i, a), (b, j))
-AntiSymmetricTensor(v, (a, i), (b, j))
As you can see, the indices are automatically sorted to a canonical form.
"""
def __new__(cls, symbol, upper, lower):
try:
upper, signu = _sort_anticommuting_fermions(
upper, key=cls._sortkey)
lower, signl = _sort_anticommuting_fermions(
lower, key=cls._sortkey)
except ViolationOfPauliPrinciple:
return S.Zero
symbol = sympify(symbol)
upper = Tuple(*upper)
lower = Tuple(*lower)
if (signu + signl) % 2:
return -TensorSymbol.__new__(cls, symbol, upper, lower)
else:
return TensorSymbol.__new__(cls, symbol, upper, lower)
@classmethod
def _sortkey(cls, index):
"""Key for sorting of indices.
particle < hole < general
FIXME: This is a bottle-neck, can we do it faster?
"""
h = hash(index)
if isinstance(index, Dummy):
if index.assumptions0.get('above_fermi'):
return (20, h)
elif index.assumptions0.get('below_fermi'):
return (21, h)
else:
return (22, h)
if index.assumptions0.get('above_fermi'):
return (10, h)
elif index.assumptions0.get('below_fermi'):
return (11, h)
else:
return (12, h)
def _latex(self, printer):
return "%s^{%s}_{%s}" % (
self.symbol,
"".join([ i.name for i in self.args[1]]),
"".join([ i.name for i in self.args[2]])
)
@property
def symbol(self):
"""
Returns the symbol of the tensor.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> AntiSymmetricTensor('v', (a, i), (b, j))
AntiSymmetricTensor(v, (a, i), (b, j))
>>> AntiSymmetricTensor('v', (a, i), (b, j)).symbol
v
"""
return self.args[0]
@property
def upper(self):
"""
Returns the upper indices.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> AntiSymmetricTensor('v', (a, i), (b, j))
AntiSymmetricTensor(v, (a, i), (b, j))
>>> AntiSymmetricTensor('v', (a, i), (b, j)).upper
(a, i)
"""
return self.args[1]
@property
def lower(self):
"""
Returns the lower indices.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> AntiSymmetricTensor('v', (a, i), (b, j))
AntiSymmetricTensor(v, (a, i), (b, j))
>>> AntiSymmetricTensor('v', (a, i), (b, j)).lower
(b, j)
"""
return self.args[2]
def __str__(self):
return "%s(%s,%s)" % self.args
def doit(self, **kw_args):
"""
Returns self.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import AntiSymmetricTensor
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> AntiSymmetricTensor('v', (a, i), (b, j)).doit()
AntiSymmetricTensor(v, (a, i), (b, j))
"""
return self
class SqOperator(Expr):
"""
Base class for Second Quantization operators.
"""
op_symbol = 'sq'
is_commutative = False
def __new__(cls, k):
obj = Basic.__new__(cls, sympify(k))
return obj
@property
def state(self):
"""
Returns the state index related to this operator.
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F, Fd, B, Bd
>>> p = Symbol('p')
>>> F(p).state
p
>>> Fd(p).state
p
>>> B(p).state
p
>>> Bd(p).state
p
"""
return self.args[0]
@property
def is_symbolic(self):
"""
Returns True if the state is a symbol (as opposed to a number).
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> p = Symbol('p')
>>> F(p).is_symbolic
True
>>> F(1).is_symbolic
False
"""
if self.state.is_Integer:
return False
else:
return True
def doit(self, **kw_args):
"""
FIXME: hack to prevent crash further up...
"""
return self
def __repr__(self):
return NotImplemented
def __str__(self):
return "%s(%r)" % (self.op_symbol, self.state)
def apply_operator(self, state):
"""
Applies an operator to itself.
"""
raise NotImplementedError('implement apply_operator in a subclass')
class BosonicOperator(SqOperator):
pass
class Annihilator(SqOperator):
pass
class Creator(SqOperator):
pass
class AnnihilateBoson(BosonicOperator, Annihilator):
"""
Bosonic annihilation operator.
Examples
========
>>> from sympy.physics.secondquant import B
>>> from sympy.abc import x
>>> B(x)
AnnihilateBoson(x)
"""
op_symbol = 'b'
def _dagger_(self):
return CreateBoson(self.state)
def apply_operator(self, state):
"""
Apply state to self if self is not symbolic and state is a FockStateKet, else
multiply self by state.
Examples
========
>>> from sympy.physics.secondquant import B, BKet
>>> from sympy.abc import x, y, n
>>> B(x).apply_operator(y)
y*AnnihilateBoson(x)
>>> B(0).apply_operator(BKet((n,)))
sqrt(n)*FockStateBosonKet((n - 1,))
"""
if not self.is_symbolic and isinstance(state, FockStateKet):
element = self.state
amp = sqrt(state[element])
return amp*state.down(element)
else:
return Mul(self, state)
def __repr__(self):
return "AnnihilateBoson(%s)" % self.state
class CreateBoson(BosonicOperator, Creator):
"""
Bosonic creation operator.
"""
op_symbol = 'b+'
def _dagger_(self):
return AnnihilateBoson(self.state)
def apply_operator(self, state):
"""
Apply state to self if self is not symbolic and state is a FockStateKet, else
multiply self by state.
Examples
========
>>> from sympy.physics.secondquant import B, Dagger, BKet
>>> from sympy.abc import x, y, n
>>> Dagger(B(x)).apply_operator(y)
y*CreateBoson(x)
>>> B(0).apply_operator(BKet((n,)))
sqrt(n)*FockStateBosonKet((n - 1,))
"""
if not self.is_symbolic and isinstance(state, FockStateKet):
element = self.state
amp = sqrt(state[element] + 1)
return amp*state.up(element)
else:
return Mul(self, state)
def __repr__(self):
return "CreateBoson(%s)" % self.state
B = AnnihilateBoson
Bd = CreateBoson
class FermionicOperator(SqOperator):
@property
def is_restricted(self):
"""
Is this FermionicOperator restricted with respect to fermi level?
Return values:
1 : restricted to orbits above fermi
0 : no restriction
-1 : restricted to orbits below fermi
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F, Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_restricted
1
>>> Fd(a).is_restricted
1
>>> F(i).is_restricted
-1
>>> Fd(i).is_restricted
-1
>>> F(p).is_restricted
0
>>> Fd(p).is_restricted
0
"""
ass = self.args[0].assumptions0
if ass.get("below_fermi"):
return -1
if ass.get("above_fermi"):
return 1
return 0
@property
def is_above_fermi(self):
"""
Does the index of this FermionicOperator allow values above fermi?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_above_fermi
True
>>> F(i).is_above_fermi
False
>>> F(p).is_above_fermi
True
The same applies to creation operators Fd
"""
return not self.args[0].assumptions0.get("below_fermi")
@property
def is_below_fermi(self):
"""
Does the index of this FermionicOperator allow values below fermi?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_below_fermi
False
>>> F(i).is_below_fermi
True
>>> F(p).is_below_fermi
True
The same applies to creation operators Fd
"""
return not self.args[0].assumptions0.get("above_fermi")
@property
def is_only_below_fermi(self):
"""
Is the index of this FermionicOperator restricted to values below fermi?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_below_fermi
False
>>> F(i).is_only_below_fermi
True
>>> F(p).is_only_below_fermi
False
The same applies to creation operators Fd
"""
return self.is_below_fermi and not self.is_above_fermi
@property
def is_only_above_fermi(self):
"""
Is the index of this FermionicOperator restricted to values above fermi?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_above_fermi
True
>>> F(i).is_only_above_fermi
False
>>> F(p).is_only_above_fermi
False
The same applies to creation operators Fd
"""
return self.is_above_fermi and not self.is_below_fermi
def _sortkey(self):
h = hash(self)
label = str(self.args[0])
if self.is_only_q_creator:
return 1, label, h
if self.is_only_q_annihilator:
return 4, label, h
if isinstance(self, Annihilator):
return 3, label, h
if isinstance(self, Creator):
return 2, label, h
class AnnihilateFermion(FermionicOperator, Annihilator):
"""
Fermionic annihilation operator.
"""
op_symbol = 'f'
def _dagger_(self):
return CreateFermion(self.state)
def apply_operator(self, state):
"""
Apply state to self if self is not symbolic and state is a FockStateKet, else
multiply self by state.
Examples
========
>>> from sympy.physics.secondquant import B, Dagger, BKet
>>> from sympy.abc import x, y, n
>>> Dagger(B(x)).apply_operator(y)
y*CreateBoson(x)
>>> B(0).apply_operator(BKet((n,)))
sqrt(n)*FockStateBosonKet((n - 1,))
"""
if isinstance(state, FockStateFermionKet):
element = self.state
return state.down(element)
elif isinstance(state, Mul):
c_part, nc_part = state.args_cnc()
if isinstance(nc_part[0], FockStateFermionKet):
element = self.state
return Mul(*(c_part + [nc_part[0].down(element)] + nc_part[1:]))
else:
return Mul(self, state)
else:
return Mul(self, state)
@property
def is_q_creator(self):
"""
Can we create a quasi-particle? (create hole or create particle)
If so, would that be above or below the fermi surface?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_q_creator
0
>>> F(i).is_q_creator
-1
>>> F(p).is_q_creator
-1
"""
if self.is_below_fermi:
return -1
return 0
@property
def is_q_annihilator(self):
"""
Can we destroy a quasi-particle? (annihilate hole or annihilate particle)
If so, would that be above or below the fermi surface?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=1)
>>> i = Symbol('i', below_fermi=1)
>>> p = Symbol('p')
>>> F(a).is_q_annihilator
1
>>> F(i).is_q_annihilator
0
>>> F(p).is_q_annihilator
1
"""
if self.is_above_fermi:
return 1
return 0
@property
def is_only_q_creator(self):
"""
Always create a quasi-particle? (create hole or create particle)
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_q_creator
False
>>> F(i).is_only_q_creator
True
>>> F(p).is_only_q_creator
False
"""
return self.is_only_below_fermi
@property
def is_only_q_annihilator(self):
"""
Always destroy a quasi-particle? (annihilate hole or annihilate particle)
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_q_annihilator
True
>>> F(i).is_only_q_annihilator
False
>>> F(p).is_only_q_annihilator
False
"""
return self.is_only_above_fermi
def __repr__(self):
return "AnnihilateFermion(%s)" % self.state
def _latex(self, printer):
return "a_{%s}" % self.state.name
class CreateFermion(FermionicOperator, Creator):
"""
Fermionic creation operator.
"""
op_symbol = 'f+'
def _dagger_(self):
return AnnihilateFermion(self.state)
def apply_operator(self, state):
"""
Apply state to self if self is not symbolic and state is a FockStateKet, else
multiply self by state.
Examples
========
>>> from sympy.physics.secondquant import B, Dagger, BKet
>>> from sympy.abc import x, y, n
>>> Dagger(B(x)).apply_operator(y)
y*CreateBoson(x)
>>> B(0).apply_operator(BKet((n,)))
sqrt(n)*FockStateBosonKet((n - 1,))
"""
if isinstance(state, FockStateFermionKet):
element = self.state
return state.up(element)
elif isinstance(state, Mul):
c_part, nc_part = state.args_cnc()
if isinstance(nc_part[0], FockStateFermionKet):
element = self.state
return Mul(*(c_part + [nc_part[0].up(element)] + nc_part[1:]))
return Mul(self, state)
@property
def is_q_creator(self):
"""
Can we create a quasi-particle? (create hole or create particle)
If so, would that be above or below the fermi surface?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> Fd(a).is_q_creator
1
>>> Fd(i).is_q_creator
0
>>> Fd(p).is_q_creator
1
"""
if self.is_above_fermi:
return 1
return 0
@property
def is_q_annihilator(self):
"""
Can we destroy a quasi-particle? (annihilate hole or annihilate particle)
If so, would that be above or below the fermi surface?
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=1)
>>> i = Symbol('i', below_fermi=1)
>>> p = Symbol('p')
>>> Fd(a).is_q_annihilator
0
>>> Fd(i).is_q_annihilator
-1
>>> Fd(p).is_q_annihilator
-1
"""
if self.is_below_fermi:
return -1
return 0
@property
def is_only_q_creator(self):
"""
Always create a quasi-particle? (create hole or create particle)
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> Fd(a).is_only_q_creator
True
>>> Fd(i).is_only_q_creator
False
>>> Fd(p).is_only_q_creator
False
"""
return self.is_only_above_fermi
@property
def is_only_q_annihilator(self):
"""
Always destroy a quasi-particle? (annihilate hole or annihilate particle)
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> Fd(a).is_only_q_annihilator
False
>>> Fd(i).is_only_q_annihilator
True
>>> Fd(p).is_only_q_annihilator
False
"""
return self.is_only_below_fermi
def __repr__(self):
return "CreateFermion(%s)" % self.state
def _latex(self, printer):
return "a^\\dagger_{%s}" % self.state.name
Fd = CreateFermion
F = AnnihilateFermion
class FockState(Expr):
"""
Many particle Fock state with a sequence of occupation numbers.
Anywhere you can have a FockState, you can also have S.Zero.
All code must check for this!
Base class to represent FockStates.
"""
is_commutative = False
def __new__(cls, occupations):
"""
occupations is a list with two possible meanings:
- For bosons it is a list of occupation numbers.
Element i is the number of particles in state i.
- For fermions it is a list of occupied orbits.
Element 0 is the state that was occupied first, element i
is the i'th occupied state.
"""
occupations = list(map(sympify, occupations))
obj = Basic.__new__(cls, Tuple(*occupations))
return obj
def __getitem__(self, i):
i = int(i)
return self.args[0][i]
def __repr__(self):
return ("FockState(%r)") % (self.args)
def __str__(self):
return "%s%r%s" % (self.lbracket, self._labels(), self.rbracket)
def _labels(self):
return self.args[0]
def __len__(self):
return len(self.args[0])
class BosonState(FockState):
"""
Base class for FockStateBoson(Ket/Bra).
"""
def up(self, i):
"""
Performs the action of a creation operator.
Examples
========
>>> from sympy.physics.secondquant import BBra
>>> b = BBra([1, 2])
>>> b
FockStateBosonBra((1, 2))
>>> b.up(1)
FockStateBosonBra((1, 3))
"""
i = int(i)
new_occs = list(self.args[0])
new_occs[i] = new_occs[i] + S.One
return self.__class__(new_occs)
def down(self, i):
"""
Performs the action of an annihilation operator.
Examples
========
>>> from sympy.physics.secondquant import BBra
>>> b = BBra([1, 2])
>>> b
FockStateBosonBra((1, 2))
>>> b.down(1)
FockStateBosonBra((1, 1))
"""
i = int(i)
new_occs = list(self.args[0])
if new_occs[i] == S.Zero:
return S.Zero
else:
new_occs[i] = new_occs[i] - S.One
return self.__class__(new_occs)
class FermionState(FockState):
"""
Base class for FockStateFermion(Ket/Bra).
"""
fermi_level = 0
def __new__(cls, occupations, fermi_level=0):
occupations = list(map(sympify, occupations))
if len(occupations) > 1:
try:
(occupations, sign) = _sort_anticommuting_fermions(
occupations, key=hash)
except ViolationOfPauliPrinciple:
return S.Zero
else:
sign = 0
cls.fermi_level = fermi_level
if cls._count_holes(occupations) > fermi_level:
return S.Zero
if sign % 2:
return S.NegativeOne*FockState.__new__(cls, occupations)
else:
return FockState.__new__(cls, occupations)
def up(self, i):
"""
Performs the action of a creation operator.
If below fermi we try to remove a hole,
if above fermi we try to create a particle.
if general index p we return Kronecker(p,i)*self
where i is a new symbol with restriction above or below.
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import FKet
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> FKet([]).up(a)
FockStateFermionKet((a,))
A creator acting on vacuum below fermi vanishes
>>> FKet([]).up(i)
0
"""
present = i in self.args[0]
if self._only_above_fermi(i):
if present:
return S.Zero
else:
return self._add_orbit(i)
elif self._only_below_fermi(i):
if present:
return self._remove_orbit(i)
else:
return S.Zero
else:
if present:
hole = Dummy("i", below_fermi=True)
return KroneckerDelta(i, hole)*self._remove_orbit(i)
else:
particle = Dummy("a", above_fermi=True)
return KroneckerDelta(i, particle)*self._add_orbit(i)
def down(self, i):
"""
Performs the action of an annihilation operator.
If below fermi we try to create a hole,
if above fermi we try to remove a particle.
if general index p we return Kronecker(p,i)*self
where i is a new symbol with restriction above or below.
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import FKet
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
An annihilator acting on vacuum above fermi vanishes
>>> FKet([]).down(a)
0
Also below fermi, it vanishes, unless we specify a fermi level > 0
>>> FKet([]).down(i)
0
>>> FKet([],4).down(i)
FockStateFermionKet((i,))
"""
present = i in self.args[0]
if self._only_above_fermi(i):
if present:
return self._remove_orbit(i)
else:
return S.Zero
elif self._only_below_fermi(i):
if present:
return S.Zero
else:
return self._add_orbit(i)
else:
if present:
hole = Dummy("i", below_fermi=True)
return KroneckerDelta(i, hole)*self._add_orbit(i)
else:
particle = Dummy("a", above_fermi=True)
return KroneckerDelta(i, particle)*self._remove_orbit(i)
@classmethod
def _only_below_fermi(cls, i):
"""
Tests if given orbit is only below fermi surface.
If nothing can be concluded we return a conservative False.
"""
if i.is_number:
return i <= cls.fermi_level
if i.assumptions0.get('below_fermi'):
return True
return False
@classmethod
def _only_above_fermi(cls, i):
"""
Tests if given orbit is only above fermi surface.
If fermi level has not been set we return True.
If nothing can be concluded we return a conservative False.
"""
if i.is_number:
return i > cls.fermi_level
if i.assumptions0.get('above_fermi'):
return True
return not cls.fermi_level
def _remove_orbit(self, i):
"""
Removes particle/fills hole in orbit i. No input tests performed here.
"""
new_occs = list(self.args[0])
pos = new_occs.index(i)
del new_occs[pos]
if (pos) % 2:
return S.NegativeOne*self.__class__(new_occs, self.fermi_level)
else:
return self.__class__(new_occs, self.fermi_level)
def _add_orbit(self, i):
"""
Adds particle/creates hole in orbit i. No input tests performed here.
"""
return self.__class__((i,) + self.args[0], self.fermi_level)
@classmethod
def _count_holes(cls, list):
"""
returns number of identified hole states in list.
"""
return len([i for i in list if cls._only_below_fermi(i)])
def _negate_holes(self, list):
return tuple([-i if i <= self.fermi_level else i for i in list])
def __repr__(self):
if self.fermi_level:
return "FockStateKet(%r, fermi_level=%s)" % (self.args[0], self.fermi_level)
else:
return "FockStateKet(%r)" % (self.args[0],)
def _labels(self):
return self._negate_holes(self.args[0])
class FockStateKet(FockState):
"""
Representation of a ket.
"""
lbracket = '|'
rbracket = '>'
class FockStateBra(FockState):
"""
Representation of a bra.
"""
lbracket = '<'
rbracket = '|'
def __mul__(self, other):
if isinstance(other, FockStateKet):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
class FockStateBosonKet(BosonState, FockStateKet):
"""
Many particle Fock state with a sequence of occupation numbers.
Occupation numbers can be any integer >= 0.
Examples
========
>>> from sympy.physics.secondquant import BKet
>>> BKet([1, 2])
FockStateBosonKet((1, 2))
"""
def _dagger_(self):
return FockStateBosonBra(*self.args)
class FockStateBosonBra(BosonState, FockStateBra):
"""
Describes a collection of BosonBra particles.
Examples
========
>>> from sympy.physics.secondquant import BBra
>>> BBra([1, 2])
FockStateBosonBra((1, 2))
"""
def _dagger_(self):
return FockStateBosonKet(*self.args)
class FockStateFermionKet(FermionState, FockStateKet):
"""
Many-particle Fock state with a sequence of occupied orbits.
Each state can only have one particle, so we choose to store a list of
occupied orbits rather than a tuple with occupation numbers (zeros and ones).
states below fermi level are holes, and are represented by negative labels
in the occupation list.
For symbolic state labels, the fermi_level caps the number of allowed hole-
states.
Examples
========
>>> from sympy.physics.secondquant import FKet
>>> FKet([1, 2]) #doctest: +SKIP
FockStateFermionKet((1, 2))
"""
def _dagger_(self):
return FockStateFermionBra(*self.args)
class FockStateFermionBra(FermionState, FockStateBra):
"""
See Also
========
FockStateFermionKet
Examples
========
>>> from sympy.physics.secondquant import FBra
>>> FBra([1, 2]) #doctest: +SKIP
FockStateFermionBra((1, 2))
"""
def _dagger_(self):
return FockStateFermionKet(*self.args)
BBra = FockStateBosonBra
BKet = FockStateBosonKet
FBra = FockStateFermionBra
FKet = FockStateFermionKet
def _apply_Mul(m):
"""
Take a Mul instance with operators and apply them to states.
This method applies all operators with integer state labels
to the actual states. For symbolic state labels, nothing is done.
When inner products of FockStates are encountered (like <a|b>),
they are converted to instances of InnerProduct.
This does not currently work on double inner products like,
<a|b><c|d>.
If the argument is not a Mul, it is simply returned as is.
"""
if not isinstance(m, Mul):
return m
c_part, nc_part = m.args_cnc()
n_nc = len(nc_part)
if n_nc == 0 or n_nc == 1:
return m
else:
last = nc_part[-1]
next_to_last = nc_part[-2]
if isinstance(last, FockStateKet):
if isinstance(next_to_last, SqOperator):
if next_to_last.is_symbolic:
return m
else:
result = next_to_last.apply_operator(last)
if result == 0:
return S.Zero
else:
return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result])))
elif isinstance(next_to_last, Pow):
if isinstance(next_to_last.base, SqOperator) and \
next_to_last.exp.is_Integer:
if next_to_last.base.is_symbolic:
return m
else:
result = last
for i in range(next_to_last.exp):
result = next_to_last.base.apply_operator(result)
if result == 0:
break
if result == 0:
return S.Zero
else:
return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result])))
else:
return m
elif isinstance(next_to_last, FockStateBra):
result = InnerProduct(next_to_last, last)
if result == 0:
return S.Zero
else:
return _apply_Mul(Mul(*(c_part + nc_part[:-2] + [result])))
else:
return m
else:
return m
def apply_operators(e):
"""
Take a sympy expression with operators and states and apply the operators.
Examples
========
>>> from sympy.physics.secondquant import apply_operators
>>> from sympy import sympify
>>> apply_operators(sympify(3)+4)
7
"""
e = e.expand()
muls = e.atoms(Mul)
subs_list = [(m, _apply_Mul(m)) for m in iter(muls)]
return e.subs(subs_list)
class InnerProduct(Basic):
"""
An unevaluated inner product between a bra and ket.
Currently this class just reduces things to a product of
Kronecker Deltas. In the future, we could introduce abstract
states like ``|a>`` and ``|b>``, and leave the inner product unevaluated as
``<a|b>``.
"""
is_commutative = True
def __new__(cls, bra, ket):
if not isinstance(bra, FockStateBra):
raise TypeError("must be a bra")
if not isinstance(ket, FockStateKet):
raise TypeError("must be a key")
return cls.eval(bra, ket)
@classmethod
def eval(cls, bra, ket):
result = S.One
for i, j in zip(bra.args[0], ket.args[0]):
result *= KroneckerDelta(i, j)
if result == 0:
break
return result
@property
def bra(self):
"""Returns the bra part of the state"""
return self.args[0]
@property
def ket(self):
"""Returns the ket part of the state"""
return self.args[1]
def __repr__(self):
sbra = repr(self.bra)
sket = repr(self.ket)
return "%s|%s" % (sbra[:-1], sket[1:])
def __str__(self):
return self.__repr__()
def matrix_rep(op, basis):
"""
Find the representation of an operator in a basis.
Examples
========
>>> from sympy.physics.secondquant import VarBosonicBasis, B, matrix_rep
>>> b = VarBosonicBasis(5)
>>> o = B(0)
>>> matrix_rep(o, b)
Matrix([
[0, 1, 0, 0, 0],
[0, 0, sqrt(2), 0, 0],
[0, 0, 0, sqrt(3), 0],
[0, 0, 0, 0, 2],
[0, 0, 0, 0, 0]])
"""
a = zeros(len(basis))
for i in range(len(basis)):
for j in range(len(basis)):
a[i, j] = apply_operators(Dagger(basis[i])*op*basis[j])
return a
class BosonicBasis(object):
"""
Base class for a basis set of bosonic Fock states.
"""
pass
class VarBosonicBasis(object):
"""
A single state, variable particle number basis set.
Examples
========
>>> from sympy.physics.secondquant import VarBosonicBasis
>>> b = VarBosonicBasis(5)
>>> b
[FockState((0,)), FockState((1,)), FockState((2,)),
FockState((3,)), FockState((4,))]
"""
def __init__(self, n_max):
self.n_max = n_max
self._build_states()
def _build_states(self):
self.basis = []
for i in range(self.n_max):
self.basis.append(FockStateBosonKet([i]))
self.n_basis = len(self.basis)
def index(self, state):
"""
Returns the index of state in basis.
Examples
========
>>> from sympy.physics.secondquant import VarBosonicBasis
>>> b = VarBosonicBasis(3)
>>> state = b.state(1)
>>> b
[FockState((0,)), FockState((1,)), FockState((2,))]
>>> state
FockStateBosonKet((1,))
>>> b.index(state)
1
"""
return self.basis.index(state)
def state(self, i):
"""
The state of a single basis.
Examples
========
>>> from sympy.physics.secondquant import VarBosonicBasis
>>> b = VarBosonicBasis(5)
>>> b.state(3)
FockStateBosonKet((3,))
"""
return self.basis[i]
def __getitem__(self, i):
return self.state(i)
def __len__(self):
return len(self.basis)
def __repr__(self):
return repr(self.basis)
class FixedBosonicBasis(BosonicBasis):
"""
Fixed particle number basis set.
Examples
========
>>> from sympy.physics.secondquant import FixedBosonicBasis
>>> b = FixedBosonicBasis(2, 2)
>>> state = b.state(1)
>>> b
[FockState((2, 0)), FockState((1, 1)), FockState((0, 2))]
>>> state
FockStateBosonKet((1, 1))
>>> b.index(state)
1
"""
def __init__(self, n_particles, n_levels):
self.n_particles = n_particles
self.n_levels = n_levels
self._build_particle_locations()
self._build_states()
def _build_particle_locations(self):
tup = ["i%i" % i for i in range(self.n_particles)]
first_loop = "for i0 in range(%i)" % self.n_levels
other_loops = ''
for cur, prev in zip(tup[1:], tup):
temp = "for %s in range(%s + 1) " % (cur, prev)
other_loops = other_loops + temp
tup_string = "(%s)" % ", ".join(tup)
list_comp = "[%s %s %s]" % (tup_string, first_loop, other_loops)
result = eval(list_comp)
if self.n_particles == 1:
result = [(item,) for item in result]
self.particle_locations = result
def _build_states(self):
self.basis = []
for tuple_of_indices in self.particle_locations:
occ_numbers = self.n_levels*[0]
for level in tuple_of_indices:
occ_numbers[level] += 1
self.basis.append(FockStateBosonKet(occ_numbers))
self.n_basis = len(self.basis)
def index(self, state):
"""Returns the index of state in basis.
Examples
========
>>> from sympy.physics.secondquant import FixedBosonicBasis
>>> b = FixedBosonicBasis(2, 3)
>>> b.index(b.state(3))
3
"""
return self.basis.index(state)
def state(self, i):
"""Returns the state that lies at index i of the basis
Examples
========
>>> from sympy.physics.secondquant import FixedBosonicBasis
>>> b = FixedBosonicBasis(2, 3)
>>> b.state(3)
FockStateBosonKet((1, 0, 1))
"""
return self.basis[i]
def __getitem__(self, i):
return self.state(i)
def __len__(self):
return len(self.basis)
def __repr__(self):
return repr(self.basis)
# def move(e, i, d):
# """
# Takes the expression "e" and moves the operator at the position i by "d".
# """
# if e.is_Mul:
# if d == 1:
# # e = a*b*c*d
# a = Mul(*e.args[:i])
# b = e.args[i]
# c = e.args[i+1]
# d = Mul(*e.args[i+2:])
# if isinstance(b, Dagger) and not isinstance(c, Dagger):
# i, j = b.args[0].args[0], c.args[0]
# return a*c*b*d-a*KroneckerDelta(i, j)*d
# elif not isinstance(b, Dagger) and isinstance(c, Dagger):
# i, j = b.args[0], c.args[0].args[0]
# return a*c*b*d-a*KroneckerDelta(i, j)*d
# else:
# return a*c*b*d
# elif d == -1:
# # e = a*b*c*d
# a = Mul(*e.args[:i-1])
# b = e.args[i-1]
# c = e.args[i]
# d = Mul(*e.args[i+1:])
# if isinstance(b, Dagger) and not isinstance(c, Dagger):
# i, j = b.args[0].args[0], c.args[0]
# return a*c*b*d-a*KroneckerDelta(i, j)*d
# elif not isinstance(b, Dagger) and isinstance(c, Dagger):
# i, j = b.args[0], c.args[0].args[0]
# return a*c*b*d-a*KroneckerDelta(i, j)*d
# else:
# return a*c*b*d
# else:
# if d > 1:
# while d >= 1:
# e = move(e, i, 1)
# d -= 1
# i += 1
# return e
# elif d < -1:
# while d <= -1:
# e = move(e, i, -1)
# d += 1
# i -= 1
# return e
# elif isinstance(e, Add):
# a, b = e.as_two_terms()
# return move(a, i, d) + move(b, i, d)
# raise NotImplementedError()
class Commutator(Function):
"""
The Commutator: [A, B] = A*B - B*A
The arguments are ordered according to .__cmp__()
>>> from sympy import symbols
>>> from sympy.physics.secondquant import Commutator
>>> A, B = symbols('A,B', commutative=False)
>>> Commutator(B, A)
-Commutator(A, B)
Evaluate the commutator with .doit()
>>> comm = Commutator(A,B); comm
Commutator(A, B)
>>> comm.doit()
A*B - B*A
For two second quantization operators the commutator is evaluated
immediately:
>>> from sympy.physics.secondquant import Fd, F
>>> a = symbols('a', above_fermi=True)
>>> i = symbols('i', below_fermi=True)
>>> p,q = symbols('p,q')
>>> Commutator(Fd(a),Fd(i))
2*NO(CreateFermion(a)*CreateFermion(i))
But for more complicated expressions, the evaluation is triggered by
a call to .doit()
>>> comm = Commutator(Fd(p)*Fd(q),F(i)); comm
Commutator(CreateFermion(p)*CreateFermion(q), AnnihilateFermion(i))
>>> comm.doit(wicks=True)
-KroneckerDelta(i, p)*CreateFermion(q) +
KroneckerDelta(i, q)*CreateFermion(p)
"""
is_commutative = False
@classmethod
def eval(cls, a, b):
"""
The Commutator [A,B] is on canonical form if A < B.
Examples
========
>>> from sympy.physics.secondquant import Commutator, F, Fd
>>> from sympy.abc import x
>>> c1 = Commutator(F(x), Fd(x))
>>> c2 = Commutator(Fd(x), F(x))
>>> Commutator.eval(c1, c2)
0
"""
if not (a and b):
return S.Zero
if a == b:
return S.Zero
if a.is_commutative or b.is_commutative:
return S.Zero
#
# [A+B,C] -> [A,C] + [B,C]
#
a = a.expand()
if isinstance(a, Add):
return Add(*[cls(term, b) for term in a.args])
b = b.expand()
if isinstance(b, Add):
return Add(*[cls(a, term) for term in b.args])
#
# [xA,yB] -> xy*[A,B]
#
ca, nca = a.args_cnc()
cb, ncb = b.args_cnc()
c_part = list(ca) + list(cb)
if c_part:
return Mul(Mul(*c_part), cls(Mul._from_args(nca), Mul._from_args(ncb)))
#
# single second quantization operators
#
if isinstance(a, BosonicOperator) and isinstance(b, BosonicOperator):
if isinstance(b, CreateBoson) and isinstance(a, AnnihilateBoson):
return KroneckerDelta(a.state, b.state)
if isinstance(a, CreateBoson) and isinstance(b, AnnihilateBoson):
return S.NegativeOne*KroneckerDelta(a.state, b.state)
else:
return S.Zero
if isinstance(a, FermionicOperator) and isinstance(b, FermionicOperator):
return wicks(a*b) - wicks(b*a)
#
# Canonical ordering of arguments
#
if a.sort_key() > b.sort_key():
return S.NegativeOne*cls(b, a)
def doit(self, **hints):
"""
Enables the computation of complex expressions.
Examples
========
>>> from sympy.physics.secondquant import Commutator, F, Fd
>>> from sympy import symbols
>>> i, j = symbols('i,j', below_fermi=True)
>>> a, b = symbols('a,b', above_fermi=True)
>>> c = Commutator(Fd(a)*F(i),Fd(b)*F(j))
>>> c.doit(wicks=True)
0
"""
a = self.args[0]
b = self.args[1]
if hints.get("wicks"):
a = a.doit(**hints)
b = b.doit(**hints)
try:
return wicks(a*b) - wicks(b*a)
except ContractionAppliesOnlyToFermions:
pass
except WicksTheoremDoesNotApply:
pass
return (a*b - b*a).doit(**hints)
def __repr__(self):
return "Commutator(%s,%s)" % (self.args[0], self.args[1])
def __str__(self):
return "[%s,%s]" % (self.args[0], self.args[1])
def _latex(self, printer):
return "\\left[%s,%s\\right]" % tuple([
printer._print(arg) for arg in self.args])
class NO(Expr):
"""
This Object is used to represent normal ordering brackets.
i.e. {abcd} sometimes written :abcd:
Applying the function NO(arg) to an argument means that all operators in
the argument will be assumed to anticommute, and have vanishing
contractions. This allows an immediate reordering to canonical form
upon object creation.
>>> from sympy import symbols
>>> from sympy.physics.secondquant import NO, F, Fd
>>> p,q = symbols('p,q')
>>> NO(Fd(p)*F(q))
NO(CreateFermion(p)*AnnihilateFermion(q))
>>> NO(F(q)*Fd(p))
-NO(CreateFermion(p)*AnnihilateFermion(q))
Note:
If you want to generate a normal ordered equivalent of an expression, you
should use the function wicks(). This class only indicates that all
operators inside the brackets anticommute, and have vanishing contractions.
Nothing more, nothing less.
"""
is_commutative = False
def __new__(cls, arg):
"""
Use anticommutation to get canonical form of operators.
Employ associativity of normal ordered product: {ab{cd}} = {abcd}
but note that {ab}{cd} /= {abcd}.
We also employ distributivity: {ab + cd} = {ab} + {cd}.
Canonical form also implies expand() {ab(c+d)} = {abc} + {abd}.
"""
# {ab + cd} = {ab} + {cd}
arg = sympify(arg)
arg = arg.expand()
if arg.is_Add:
return Add(*[ cls(term) for term in arg.args])
if arg.is_Mul:
# take coefficient outside of normal ordering brackets
c_part, seq = arg.args_cnc()
if c_part:
coeff = Mul(*c_part)
if not seq:
return coeff
else:
coeff = S.One
# {ab{cd}} = {abcd}
newseq = []
foundit = False
for fac in seq:
if isinstance(fac, NO):
newseq.extend(fac.args)
foundit = True
else:
newseq.append(fac)
if foundit:
return coeff*cls(Mul(*newseq))
# We assume that the user don't mix B and F operators
if isinstance(seq[0], BosonicOperator):
raise NotImplementedError
try:
newseq, sign = _sort_anticommuting_fermions(seq)
except ViolationOfPauliPrinciple:
return S.Zero
if sign % 2:
return (S.NegativeOne*coeff)*cls(Mul(*newseq))
elif sign:
return coeff*cls(Mul(*newseq))
else:
pass # since sign==0, no permutations was necessary
# if we couldn't do anything with Mul object, we just
# mark it as normal ordered
if coeff != S.One:
return coeff*cls(Mul(*newseq))
return Expr.__new__(cls, Mul(*newseq))
if isinstance(arg, NO):
return arg
# if object was not Mul or Add, normal ordering does not apply
return arg
@property
def has_q_creators(self):
"""
Return 0 if the leftmost argument of the first argument is a not a
q_creator, else 1 if it is above fermi or -1 if it is below fermi.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import NO, F, Fd
>>> a = symbols('a', above_fermi=True)
>>> i = symbols('i', below_fermi=True)
>>> NO(Fd(a)*Fd(i)).has_q_creators
1
>>> NO(F(i)*F(a)).has_q_creators
-1
>>> NO(Fd(i)*F(a)).has_q_creators #doctest: +SKIP
0
"""
return self.args[0].args[0].is_q_creator
@property
def has_q_annihilators(self):
"""
Return 0 if the rightmost argument of the first argument is a not a
q_annihilator, else 1 if it is above fermi or -1 if it is below fermi.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import NO, F, Fd
>>> a = symbols('a', above_fermi=True)
>>> i = symbols('i', below_fermi=True)
>>> NO(Fd(a)*Fd(i)).has_q_annihilators
-1
>>> NO(F(i)*F(a)).has_q_annihilators
1
>>> NO(Fd(a)*F(i)).has_q_annihilators
0
"""
return self.args[0].args[-1].is_q_annihilator
def doit(self, **kw_args):
"""
Either removes the brackets or enables complex computations
in its arguments.
Examples
========
>>> from sympy.physics.secondquant import NO, Fd, F
>>> from textwrap import fill
>>> from sympy import symbols, Dummy
>>> p,q = symbols('p,q', cls=Dummy)
>>> print(fill(str(NO(Fd(p)*F(q)).doit())))
KroneckerDelta(_a, _p)*KroneckerDelta(_a,
_q)*CreateFermion(_a)*AnnihilateFermion(_a) + KroneckerDelta(_a,
_p)*KroneckerDelta(_i, _q)*CreateFermion(_a)*AnnihilateFermion(_i) -
KroneckerDelta(_a, _q)*KroneckerDelta(_i,
_p)*AnnihilateFermion(_a)*CreateFermion(_i) - KroneckerDelta(_i,
_p)*KroneckerDelta(_i, _q)*AnnihilateFermion(_i)*CreateFermion(_i)
"""
if kw_args.get("remove_brackets", True):
return self._remove_brackets()
else:
return self.__new__(type(self), self.args[0].doit(**kw_args))
def _remove_brackets(self):
"""
Returns the sorted string without normal order brackets.
The returned string have the property that no nonzero
contractions exist.
"""
# check if any creator is also an annihilator
subslist = []
for i in self.iter_q_creators():
if self[i].is_q_annihilator:
assume = self[i].state.assumptions0
# only operators with a dummy index can be split in two terms
if isinstance(self[i].state, Dummy):
# create indices with fermi restriction
assume.pop("above_fermi", None)
assume["below_fermi"] = True
below = Dummy('i', **assume)
assume.pop("below_fermi", None)
assume["above_fermi"] = True
above = Dummy('a', **assume)
cls = type(self[i])
split = (
self[i].__new__(cls, below)
* KroneckerDelta(below, self[i].state)
+ self[i].__new__(cls, above)
* KroneckerDelta(above, self[i].state)
)
subslist.append((self[i], split))
else:
raise SubstitutionOfAmbigousOperatorFailed(self[i])
if subslist:
result = NO(self.subs(subslist))
if isinstance(result, Add):
return Add(*[term.doit() for term in result.args])
else:
return self.args[0]
def _expand_operators(self):
"""
Returns a sum of NO objects that contain no ambiguous q-operators.
If an index q has range both above and below fermi, the operator F(q)
is ambiguous in the sense that it can be both a q-creator and a q-annihilator.
If q is dummy, it is assumed to be a summation variable and this method
rewrites it into a sum of NO terms with unambiguous operators:
{Fd(p)*F(q)} = {Fd(a)*F(b)} + {Fd(a)*F(i)} + {Fd(j)*F(b)} -{F(i)*Fd(j)}
where a,b are above and i,j are below fermi level.
"""
return NO(self._remove_brackets)
def __getitem__(self, i):
if isinstance(i, slice):
indices = i.indices(len(self))
return [self.args[0].args[i] for i in range(*indices)]
else:
return self.args[0].args[i]
def __len__(self):
return len(self.args[0].args)
def iter_q_annihilators(self):
"""
Iterates over the annihilation operators.
Examples
========
>>> from sympy import symbols
>>> i, j = symbols('i j', below_fermi=True)
>>> a, b = symbols('a b', above_fermi=True)
>>> from sympy.physics.secondquant import NO, F, Fd
>>> no = NO(Fd(a)*F(i)*F(b)*Fd(j))
>>> no.iter_q_creators()
<generator object... at 0x...>
>>> list(no.iter_q_creators())
[0, 1]
>>> list(no.iter_q_annihilators())
[3, 2]
"""
ops = self.args[0].args
iter = xrange(len(ops) - 1, -1, -1)
for i in iter:
if ops[i].is_q_annihilator:
yield i
else:
break
def iter_q_creators(self):
"""
Iterates over the creation operators.
Examples
========
>>> from sympy import symbols
>>> i, j = symbols('i j', below_fermi=True)
>>> a, b = symbols('a b', above_fermi=True)
>>> from sympy.physics.secondquant import NO, F, Fd
>>> no = NO(Fd(a)*F(i)*F(b)*Fd(j))
>>> no.iter_q_creators()
<generator object... at 0x...>
>>> list(no.iter_q_creators())
[0, 1]
>>> list(no.iter_q_annihilators())
[3, 2]
"""
ops = self.args[0].args
iter = xrange(0, len(ops))
for i in iter:
if ops[i].is_q_creator:
yield i
else:
break
def get_subNO(self, i):
"""
Returns a NO() without FermionicOperator at index i.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import F, NO
>>> p,q,r = symbols('p,q,r')
>>> NO(F(p)*F(q)*F(r)).get_subNO(1) # doctest: +SKIP
NO(AnnihilateFermion(p)*AnnihilateFermion(r))
"""
arg0 = self.args[0] # it's a Mul by definition of how it's created
mul = arg0._new_rawargs(arg0.args[:i] + arg0.args[i + 1:])
return NO(mul)
def _latex(self, printer):
return "\\left\\{%s\\right\\}" % printer._print(self.args[0])
def __repr__(self):
return "NO(%s)" % self.args[0]
def __str__(self):
return ":%s:" % self.args[0]
# @cacheit
def contraction(a, b):
"""
Calculates contraction of Fermionic operators a and b.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.secondquant import F, Fd, contraction
>>> p, q = symbols('p,q')
>>> a, b = symbols('a,b', above_fermi=True)
>>> i, j = symbols('i,j', below_fermi=True)
A contraction is non-zero only if a quasi-creator is to the right of a
quasi-annihilator:
>>> contraction(F(a),Fd(b))
KroneckerDelta(a, b)
>>> contraction(Fd(i),F(j))
KroneckerDelta(i, j)
For general indices a non-zero result restricts the indices to below/above
the fermi surface:
>>> contraction(Fd(p),F(q))
KroneckerDelta(_i, q)*KroneckerDelta(p, q)
>>> contraction(F(p),Fd(q))
KroneckerDelta(_a, q)*KroneckerDelta(p, q)
Two creators or two annihilators always vanishes:
>>> contraction(F(p),F(q))
0
>>> contraction(Fd(p),Fd(q))
0
"""
if isinstance(b, FermionicOperator) and isinstance(a, FermionicOperator):
if isinstance(a, AnnihilateFermion) and isinstance(b, CreateFermion):
if b.state.assumptions0.get("below_fermi"):
return S.Zero
if a.state.assumptions0.get("below_fermi"):
return S.Zero
if b.state.assumptions0.get("above_fermi"):
return KroneckerDelta(a.state, b.state)
if a.state.assumptions0.get("above_fermi"):
return KroneckerDelta(a.state, b.state)
return (KroneckerDelta(a.state, b.state)*
KroneckerDelta(b.state, Dummy('a', above_fermi=True)))
if isinstance(b, AnnihilateFermion) and isinstance(a, CreateFermion):
if b.state.assumptions0.get("above_fermi"):
return S.Zero
if a.state.assumptions0.get("above_fermi"):
return S.Zero
if b.state.assumptions0.get("below_fermi"):
return KroneckerDelta(a.state, b.state)
if a.state.assumptions0.get("below_fermi"):
return KroneckerDelta(a.state, b.state)
return (KroneckerDelta(a.state, b.state)*
KroneckerDelta(b.state, Dummy('i', below_fermi=True)))
# vanish if 2xAnnihilator or 2xCreator
return S.Zero
else:
#not fermion operators
t = ( isinstance(i, FermionicOperator) for i in (a, b) )
raise ContractionAppliesOnlyToFermions(*t)
def _sqkey(sq_operator):
"""Generates key for canonical sorting of SQ operators."""
return sq_operator._sortkey()
def _sort_anticommuting_fermions(string1, key=_sqkey):
"""Sort fermionic operators to canonical order, assuming all pairs anticommute.
Uses a bidirectional bubble sort. Items in string1 are not referenced
so in principle they may be any comparable objects. The sorting depends on the
operators '>' and '=='.
If the Pauli principle is violated, an exception is raised.
Returns
=======
tuple (sorted_str, sign)
sorted_str: list containing the sorted operators
sign: int telling how many times the sign should be changed
(if sign==0 the string was already sorted)
"""
verified = False
sign = 0
rng = list(range(len(string1) - 1))
rev = list(range(len(string1) - 3, -1, -1))
keys = list(map(key, string1))
key_val = dict(list(zip(keys, string1)))
while not verified:
verified = True
for i in rng:
left = keys[i]
right = keys[i + 1]
if left == right:
raise ViolationOfPauliPrinciple([left, right])
if left > right:
verified = False
keys[i:i + 2] = [right, left]
sign = sign + 1
if verified:
break
for i in rev:
left = keys[i]
right = keys[i + 1]
if left == right:
raise ViolationOfPauliPrinciple([left, right])
if left > right:
verified = False
keys[i:i + 2] = [right, left]
sign = sign + 1
string1 = [ key_val[k] for k in keys ]
return (string1, sign)
def evaluate_deltas(e):
"""
We evaluate KroneckerDelta symbols in the expression assuming Einstein summation.
If one index is repeated it is summed over and in effect substituted with
the other one. If both indices are repeated we substitute according to what
is the preferred index. this is determined by
KroneckerDelta.preferred_index and KroneckerDelta.killable_index.
In case there are no possible substitutions or if a substitution would
imply a loss of information, nothing is done.
In case an index appears in more than one KroneckerDelta, the resulting
substitution depends on the order of the factors. Since the ordering is platform
dependent, the literal expression resulting from this function may be hard to
predict.
Examples
========
We assume the following:
>>> from sympy import symbols, Function, Dummy, KroneckerDelta
>>> from sympy.physics.secondquant import evaluate_deltas
>>> i,j = symbols('i j', below_fermi=True, cls=Dummy)
>>> a,b = symbols('a b', above_fermi=True, cls=Dummy)
>>> p,q = symbols('p q', cls=Dummy)
>>> f = Function('f')
>>> t = Function('t')
The order of preference for these indices according to KroneckerDelta is
(a, b, i, j, p, q).
Trivial cases:
>>> evaluate_deltas(KroneckerDelta(i,j)*f(i)) # d_ij f(i) -> f(j)
f(_j)
>>> evaluate_deltas(KroneckerDelta(i,j)*f(j)) # d_ij f(j) -> f(i)
f(_i)
>>> evaluate_deltas(KroneckerDelta(i,p)*f(p)) # d_ip f(p) -> f(i)
f(_i)
>>> evaluate_deltas(KroneckerDelta(q,p)*f(p)) # d_qp f(p) -> f(q)
f(_q)
>>> evaluate_deltas(KroneckerDelta(q,p)*f(q)) # d_qp f(q) -> f(p)
f(_p)
More interesting cases:
>>> evaluate_deltas(KroneckerDelta(i,p)*t(a,i)*f(p,q))
f(_i, _q)*t(_a, _i)
>>> evaluate_deltas(KroneckerDelta(a,p)*t(a,i)*f(p,q))
f(_a, _q)*t(_a, _i)
>>> evaluate_deltas(KroneckerDelta(p,q)*f(p,q))
f(_p, _p)
Finally, here are some cases where nothing is done, because that would
imply a loss of information:
>>> evaluate_deltas(KroneckerDelta(i,p)*f(q))
f(_q)*KroneckerDelta(_i, _p)
>>> evaluate_deltas(KroneckerDelta(i,p)*f(i))
f(_i)*KroneckerDelta(_i, _p)
"""
# We treat Deltas only in mul objects
# for general function objects we don't evaluate KroneckerDeltas in arguments,
# but here we hard code exceptions to this rule
accepted_functions = (
Add,
)
if isinstance(e, accepted_functions):
return e.func(*[evaluate_deltas(arg) for arg in e.args])
elif isinstance(e, Mul):
# find all occurences of delta function and count each index present in
# expression.
deltas = []
indices = {}
for i in e.args:
for s in i.free_symbols:
if s in indices:
indices[s] += 1
else:
indices[s] = 0 # geek counting simplifies logic below
if isinstance(i, KroneckerDelta):
deltas.append(i)
for d in deltas:
# If we do something, and there are more deltas, we should recurse
# to treat the resulting expression properly
if indices[d.killable_index]:
e = e.subs(d.killable_index, d.preferred_index)
if len(deltas) > 1:
return evaluate_deltas(e)
elif indices[d.preferred_index] and d.indices_contain_equal_information:
e = e.subs(d.preferred_index, d.killable_index)
if len(deltas) > 1:
return evaluate_deltas(e)
else:
pass
return e
# nothing to do, maybe we hit a Symbol or a number
else:
return e
def substitute_dummies(expr, new_indices=False, pretty_indices={}):
"""
Collect terms by substitution of dummy variables.
This routine allows simplification of Add expressions containing terms
which differ only due to dummy variables.
The idea is to substitute all dummy variables consistently depending on
the structure of the term. For each term, we obtain a sequence of all
dummy variables, where the order is determined by the index range, what
factors the index belongs to and its position in each factor. See
_get_ordered_dummies() for more inforation about the sorting of dummies.
The index sequence is then substituted consistently in each term.
Examples
========
>>> from sympy import symbols, Function, Dummy
>>> from sympy.physics.secondquant import substitute_dummies
>>> a,b,c,d = symbols('a b c d', above_fermi=True, cls=Dummy)
>>> i,j = symbols('i j', below_fermi=True, cls=Dummy)
>>> f = Function('f')
>>> expr = f(a,b) + f(c,d); expr
f(_a, _b) + f(_c, _d)
Since a, b, c and d are equivalent summation indices, the expression can be
simplified to a single term (for which the dummy indices are still summed over)
>>> substitute_dummies(expr)
2*f(_a, _b)
Controlling output:
By default the dummy symbols that are already present in the expression
will be reused in a different permuation. However, if new_indices=True,
new dummies will be generated and inserted. The keyword 'pretty_indices'
can be used to control this generation of new symbols.
By default the new dummies will be generated on the form i_1, i_2, a_1,
etc. If you supply a dictionary with key:value pairs in the form:
{ index_group: string_of_letters }
The letters will be used as labels for the new dummy symbols. The
index_groups must be one of 'above', 'below' or 'general'.
>>> expr = f(a,b,i,j)
>>> my_dummies = { 'above':'st', 'below':'uv' }
>>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies)
f(_s, _t, _u, _v)
If we run out of letters, or if there is no keyword for some index_group
the default dummy generator will be used as a fallback:
>>> p,q = symbols('p q', cls=Dummy) # general indices
>>> expr = f(p,q)
>>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies)
f(_p_0, _p_1)
"""
# setup the replacing dummies
if new_indices:
letters_above = pretty_indices.get('above', "")
letters_below = pretty_indices.get('below', "")
letters_general = pretty_indices.get('general', "")
len_above = len(letters_above)
len_below = len(letters_below)
len_general = len(letters_general)
def _i(number):
try:
return letters_below[number]
except IndexError:
return 'i_' + str(number - len_below)
def _a(number):
try:
return letters_above[number]
except IndexError:
return 'a_' + str(number - len_above)
def _p(number):
try:
return letters_general[number]
except IndexError:
return 'p_' + str(number - len_general)
aboves = []
belows = []
generals = []
dummies = expr.atoms(Dummy)
if not new_indices:
dummies = sorted(dummies, key=default_sort_key)
# generate lists with the dummies we will insert
a = i = p = 0
for d in dummies:
assum = d.assumptions0
if assum.get("above_fermi"):
if new_indices:
sym = _a(a)
a += 1
l1 = aboves
elif assum.get("below_fermi"):
if new_indices:
sym = _i(i)
i += 1
l1 = belows
else:
if new_indices:
sym = _p(p)
p += 1
l1 = generals
if new_indices:
l1.append(Dummy(sym, **assum))
else:
l1.append(d)
expr = expr.expand()
terms = Add.make_args(expr)
new_terms = []
for term in terms:
i = iter(belows)
a = iter(aboves)
p = iter(generals)
ordered = _get_ordered_dummies(term)
subsdict = {}
for d in ordered:
if d.assumptions0.get('below_fermi'):
subsdict[d] = next(i)
elif d.assumptions0.get('above_fermi'):
subsdict[d] = next(a)
else:
subsdict[d] = next(p)
subslist = []
final_subs = []
for k, v in subsdict.items():
if k == v:
continue
if v in subsdict:
# We check if the sequence of substitutions end quickly. In
# that case, we can avoid temporary symbols if we ensure the
# correct substitution order.
if subsdict[v] in subsdict:
# (x, y) -> (y, x), we need a temporary variable
x = Dummy('x')
subslist.append((k, x))
final_subs.append((x, v))
else:
# (x, y) -> (y, a), x->y must be done last
# but before temporary variables are resolved
final_subs.insert(0, (k, v))
else:
subslist.append((k, v))
subslist.extend(final_subs)
new_terms.append(term.subs(subslist))
return Add(*new_terms)
class KeyPrinter(StrPrinter):
"""Printer for which only equal objects are equal in print"""
def _print_Dummy(self, expr):
return "(%s_%i)" % (expr.name, expr.dummy_index)
def __kprint(expr):
p = KeyPrinter()
return p.doprint(expr)
def _get_ordered_dummies(mul, verbose=False):
"""Returns all dummies in the mul sorted in canonical order
The purpose of the canonical ordering is that dummies can be substituted
consistently across terms with the result that equivalent terms can be
simplified.
It is not possible to determine if two terms are equivalent based solely on
the dummy order. However, a consistent substitution guided by the ordered
dummies should lead to trivially (non-)equivalent terms, thereby revealing
the equivalence. This also means that if two terms have identical sequences of
dummies, the (non-)equivalence should already be apparent.
Strategy
--------
The canoncial order is given by an arbitrary sorting rule. A sort key
is determined for each dummy as a tuple that depends on all factors where
the index is present. The dummies are thereby sorted according to the
contraction structure of the term, instead of sorting based solely on the
dummy symbol itself.
After all dummies in the term has been assigned a key, we check for identical
keys, i.e. unorderable dummies. If any are found, we call a specialized
method, _determine_ambiguous(), that will determine a unique order based
on recursive calls to _get_ordered_dummies().
Key description
---------------
A high level description of the sort key:
1. Range of the dummy index
2. Relation to external (non-dummy) indices
3. Position of the index in the first factor
4. Position of the index in the second factor
The sort key is a tuple with the following components:
1. A single character indicating the range of the dummy (above, below
or general.)
2. A list of strings with fully masked string representations of all
factors where the dummy is present. By masked, we mean that dummies
are represented by a symbol to indicate either below fermi, above or
general. No other information is displayed about the dummies at
this point. The list is sorted stringwise.
3. An integer number indicating the position of the index, in the first
factor as sorted in 2.
4. An integer number indicating the position of the index, in the second
factor as sorted in 2.
If a factor is either of type AntiSymmetricTensor or SqOperator, the index
position in items 3 and 4 is indicated as 'upper' or 'lower' only.
(Creation operators are considered upper and annihilation operators lower.)
If the masked factors are identical, the two factors cannot be ordered
unambiguously in item 2. In this case, items 3, 4 are left out. If several
indices are contracted between the unorderable factors, it will be handled by
_determine_ambiguous()
"""
# setup dicts to avoid repeated calculations in key()
args = Mul.make_args(mul)
fac_dum = dict([ (fac, fac.atoms(Dummy)) for fac in args] )
fac_repr = dict([ (fac, __kprint(fac)) for fac in args] )
all_dums = reduce(set.union, list(fac_dum.values()), set())
mask = {}
for d in all_dums:
if d.assumptions0.get('below_fermi'):
mask[d] = '0'
elif d.assumptions0.get('above_fermi'):
mask[d] = '1'
else:
mask[d] = '2'
dum_repr = dict([ (d, __kprint(d)) for d in all_dums ])
def _key(d):
dumstruct = [ fac for fac in fac_dum if d in fac_dum[fac] ]
other_dums = reduce(
set.union, [ fac_dum[fac] for fac in dumstruct ], set())
fac = dumstruct[-1]
if other_dums is fac_dum[fac]:
other_dums = fac_dum[fac].copy()
other_dums.remove(d)
masked_facs = [ fac_repr[fac] for fac in dumstruct ]
for d2 in other_dums:
masked_facs = [ fac.replace(dum_repr[d2], mask[d2])
for fac in masked_facs ]
all_masked = [ fac.replace(dum_repr[d], mask[d])
for fac in masked_facs ]
masked_facs = dict(list(zip(dumstruct, masked_facs)))
# dummies for which the ordering cannot be determined
if has_dups(all_masked):
all_masked.sort()
return mask[d], tuple(all_masked) # positions are ambiguous
# sort factors according to fully masked strings
keydict = dict(list(zip(dumstruct, all_masked)))
dumstruct.sort(key=lambda x: keydict[x])
all_masked.sort()
pos_val = []
for fac in dumstruct:
if isinstance(fac, AntiSymmetricTensor):
if d in fac.upper:
pos_val.append('u')
if d in fac.lower:
pos_val.append('l')
elif isinstance(fac, Creator):
pos_val.append('u')
elif isinstance(fac, Annihilator):
pos_val.append('l')
elif isinstance(fac, NO):
ops = [ op for op in fac if op.has(d) ]
for op in ops:
if isinstance(op, Creator):
pos_val.append('u')
else:
pos_val.append('l')
else:
# fallback to position in string representation
facpos = -1
while 1:
facpos = masked_facs[fac].find(dum_repr[d], facpos + 1)
if facpos == -1:
break
pos_val.append(facpos)
return (mask[d], tuple(all_masked), pos_val[0], pos_val[-1])
dumkey = dict(list(zip(all_dums, list(map(_key, all_dums)))))
result = sorted(all_dums, key=lambda x: dumkey[x])
if has_dups(iter(dumkey.values())):
# We have ambiguities
unordered = defaultdict(set)
for d, k in dumkey.items():
unordered[k].add(d)
for k in [ k for k in unordered if len(unordered[k]) < 2 ]:
del unordered[k]
unordered = [ unordered[k] for k in sorted(unordered) ]
result = _determine_ambiguous(mul, result, unordered)
return result
def _determine_ambiguous(term, ordered, ambiguous_groups):
# We encountered a term for which the dummy substitution is ambiguous.
# This happens for terms with 2 or more contractions between factors that
# cannot be uniquely ordered independent of summation indices. For
# example:
#
# Sum(p, q) v^{p, .}_{q, .}v^{q, .}_{p, .}
#
# Assuming that the indices represented by . are dummies with the
# same range, the factors cannot be ordered, and there is no
# way to determine a consistent ordering of p and q.
#
# The strategy employed here, is to relabel all unambiguous dummies with
# non-dummy symbols and call _get_ordered_dummies again. This procedure is
# applied to the entire term so there is a possibility that
# _determine_ambiguous() is called again from a deeper recursion level.
# break recursion if there are no ordered dummies
all_ambiguous = set()
for dummies in ambiguous_groups:
all_ambiguous |= dummies
all_ordered = set(ordered) - all_ambiguous
if not all_ordered:
# FIXME: If we arrive here, there are no ordered dummies. A method to
# handle this needs to be implemented. In order to return something
# useful nevertheless, we choose arbitrarily the first dummy and
# determine the rest from this one. This method is dependent on the
# actual dummy labels which violates an assumption for the canonization
# procedure. A better implementation is needed.
group = [ d for d in ordered if d in ambiguous_groups[0] ]
d = group[0]
all_ordered.add(d)
ambiguous_groups[0].remove(d)
stored_counter = _symbol_factory._counter
subslist = []
for d in [ d for d in ordered if d in all_ordered ]:
nondum = _symbol_factory._next()
subslist.append((d, nondum))
newterm = term.subs(subslist)
neworder = _get_ordered_dummies(newterm)
_symbol_factory._set_counter(stored_counter)
# update ordered list with new information
for group in ambiguous_groups:
ordered_group = [ d for d in neworder if d in group ]
ordered_group.reverse()
result = []
for d in ordered:
if d in group:
result.append(ordered_group.pop())
else:
result.append(d)
ordered = result
return ordered
class _SymbolFactory(object):
def __init__(self, label):
self._counterVar = 0
self._label = label
def _set_counter(self, value):
"""
Sets counter to value.
"""
self._counterVar = value
@property
def _counter(self):
"""
What counter is currently at.
"""
return self._counterVar
def _next(self):
"""
Generates the next symbols and increments counter by 1.
"""
s = Symbol("%s%i" % (self._label, self._counterVar))
self._counterVar += 1
return s
_symbol_factory = _SymbolFactory('_]"]_') # most certainly a unique label
@cacheit
def _get_contractions(string1, keep_only_fully_contracted=False):
"""
Returns Add-object with contracted terms.
Uses recursion to find all contractions. -- Internal helper function --
Will find nonzero contractions in string1 between indices given in
leftrange and rightrange.
"""
# Should we store current level of contraction?
if keep_only_fully_contracted and string1:
result = []
else:
result = [NO(Mul(*string1))]
for i in range(len(string1) - 1):
for j in range(i + 1, len(string1)):
c = contraction(string1[i], string1[j])
if c:
# print "found contraction",c
sign = (j - i + 1) % 2
if sign:
coeff = S.NegativeOne*c
else:
coeff = c
#
# Call next level of recursion
# ============================
#
# We now need to find more contractions among operators
#
# oplist = string1[:i]+ string1[i+1:j] + string1[j+1:]
#
# To prevent overcounting, we don't allow contractions
# we have already encountered. i.e. contractions between
# string1[:i] <---> string1[i+1:j]
# and string1[:i] <---> string1[j+1:].
#
# This leaves the case:
oplist = string1[i + 1:j] + string1[j + 1:]
if oplist:
result.append(coeff*NO(
Mul(*string1[:i])*_get_contractions( oplist,
keep_only_fully_contracted=keep_only_fully_contracted)))
else:
result.append(coeff*NO( Mul(*string1[:i])))
if keep_only_fully_contracted:
break # next iteration over i leaves leftmost operator string1[0] uncontracted
return Add(*result)
# @cacheit
def wicks(e, **kw_args):
"""
Returns the normal ordered equivalent of an expression using Wicks Theorem.
Examples
========
>>> from sympy import symbols, Function, Dummy
>>> from sympy.physics.secondquant import wicks, F, Fd, NO
>>> p,q,r = symbols('p,q,r')
>>> wicks(Fd(p)*F(q)) # doctest: +SKIP
d(p, q)*d(q, _i) + NO(CreateFermion(p)*AnnihilateFermion(q))
By default, the expression is expanded:
>>> wicks(F(p)*(F(q)+F(r))) # doctest: +SKIP
NO(AnnihilateFermion(p)*AnnihilateFermion(q)) + NO(
AnnihilateFermion(p)*AnnihilateFermion(r))
With the keyword 'keep_only_fully_contracted=True', only fully contracted
terms are returned.
By request, the result can be simplified in the following order:
-- KroneckerDelta functions are evaluated
-- Dummy variables are substituted consistently across terms
>>> p, q, r = symbols('p q r', cls=Dummy)
>>> wicks(Fd(p)*(F(q)+F(r)), keep_only_fully_contracted=True) # doctest: +SKIP
KroneckerDelta(_i, _q)*KroneckerDelta(
_p, _q) + KroneckerDelta(_i, _r)*KroneckerDelta(_p, _r)
"""
if not e:
return S.Zero
opts = {
'simplify_kronecker_deltas': False,
'expand': True,
'simplify_dummies': False,
'keep_only_fully_contracted': False
}
opts.update(kw_args)
# check if we are already normally ordered
if isinstance(e, NO):
if opts['keep_only_fully_contracted']:
return S.Zero
else:
return e
elif isinstance(e, FermionicOperator):
if opts['keep_only_fully_contracted']:
return S.Zero
else:
return e
# break up any NO-objects, and evaluate commutators
e = e.doit(wicks=True)
# make sure we have only one term to consider
e = e.expand()
if isinstance(e, Add):
if opts['simplify_dummies']:
return substitute_dummies(Add(*[ wicks(term, **kw_args) for term in e.args]))
else:
return Add(*[ wicks(term, **kw_args) for term in e.args])
# For Mul-objects we can actually do something
if isinstance(e, Mul):
# we dont want to mess around with commuting part of Mul
# so we factorize it out before starting recursion
c_part = []
string1 = []
for factor in e.args:
if factor.is_commutative:
c_part.append(factor)
else:
string1.append(factor)
n = len(string1)
# catch trivial cases
if n == 0:
result = e
elif n == 1:
if opts['keep_only_fully_contracted']:
return S.Zero
else:
result = e
else: # non-trivial
if isinstance(string1[0], BosonicOperator):
raise NotImplementedError
string1 = tuple(string1)
# recursion over higher order contractions
result = _get_contractions(string1,
keep_only_fully_contracted=opts['keep_only_fully_contracted'] )
result = Mul(*c_part)*result
if opts['expand']:
result = result.expand()
if opts['simplify_kronecker_deltas']:
result = evaluate_deltas(result)
return result
# there was nothing to do
return e
class PermutationOperator(Expr):
"""
Represents the index permutation operator P(ij).
P(ij)*f(i)*g(j) = f(i)*g(j) - f(j)*g(i)
"""
is_commutative = True
def __new__(cls, i, j):
i, j = sorted(map(sympify, (i, j)), key=default_sort_key)
obj = Basic.__new__(cls, i, j)
return obj
def get_permuted(self, expr):
"""
Returns -expr with permuted indices.
>>> from sympy import symbols, Function
>>> from sympy.physics.secondquant import PermutationOperator
>>> p,q = symbols('p,q')
>>> f = Function('f')
>>> PermutationOperator(p,q).get_permuted(f(p,q))
-f(q, p)
"""
i = self.args[0]
j = self.args[1]
if expr.has(i) and expr.has(j):
tmp = Dummy()
expr = expr.subs(i, tmp)
expr = expr.subs(j, i)
expr = expr.subs(tmp, j)
return S.NegativeOne*expr
else:
return expr
def _latex(self, printer):
return "P(%s%s)" % self.args
def simplify_index_permutations(expr, permutation_operators):
"""
Performs simplification by introducing PermutationOperators where appropriate.
Schematically:
[abij] - [abji] - [baij] + [baji] -> P(ab)*P(ij)*[abij]
permutation_operators is a list of PermutationOperators to consider.
If permutation_operators=[P(ab),P(ij)] we will try to introduce the
permutation operators P(ij) and P(ab) in the expression. If there are other
possible simplifications, we ignore them.
>>> from sympy import symbols, Function
>>> from sympy.physics.secondquant import simplify_index_permutations
>>> from sympy.physics.secondquant import PermutationOperator
>>> p,q,r,s = symbols('p,q,r,s')
>>> f = Function('f')
>>> g = Function('g')
>>> expr = f(p)*g(q) - f(q)*g(p); expr
f(p)*g(q) - f(q)*g(p)
>>> simplify_index_permutations(expr,[PermutationOperator(p,q)])
f(p)*g(q)*PermutationOperator(p, q)
>>> PermutList = [PermutationOperator(p,q),PermutationOperator(r,s)]
>>> expr = f(p,r)*g(q,s) - f(q,r)*g(p,s) + f(q,s)*g(p,r) - f(p,s)*g(q,r)
>>> simplify_index_permutations(expr,PermutList)
f(p, r)*g(q, s)*PermutationOperator(p, q)*PermutationOperator(r, s)
"""
def _get_indices(expr, ind):
"""
Collects indices recursively in predictable order.
"""
result = []
for arg in expr.args:
if arg in ind:
result.append(arg)
else:
if arg.args:
result.extend(_get_indices(arg, ind))
return result
def _choose_one_to_keep(a, b, ind):
# we keep the one where indices in ind are in order ind[0] < ind[1]
return min(a, b, key=lambda x: default_sort_key(_get_indices(x, ind)))
expr = expr.expand()
if isinstance(expr, Add):
terms = set(expr.args)
for P in permutation_operators:
new_terms = set([])
on_hold = set([])
while terms:
term = terms.pop()
permuted = P.get_permuted(term)
if permuted in terms | on_hold:
try:
terms.remove(permuted)
except KeyError:
on_hold.remove(permuted)
keep = _choose_one_to_keep(term, permuted, P.args)
new_terms.add(P*keep)
else:
# Some terms must get a second chance because the permuted
# term may already have canonical dummy ordering. Then
# substitute_dummies() does nothing. However, the other
# term, if it exists, will be able to match with us.
permuted1 = permuted
permuted = substitute_dummies(permuted)
if permuted1 == permuted:
on_hold.add(term)
elif permuted in terms | on_hold:
try:
terms.remove(permuted)
except KeyError:
on_hold.remove(permuted)
keep = _choose_one_to_keep(term, permuted, P.args)
new_terms.add(P*keep)
else:
new_terms.add(term)
terms = new_terms | on_hold
return Add(*terms)
return expr
|
mit
|
evaschalde/odoo
|
addons/mail/mail_group.py
|
247
|
12877
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import openerp
import openerp.tools as tools
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.safe_eval import safe_eval as eval
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
class mail_group(osv.Model):
""" A mail_group is a collection of users sharing messages in a discussion
group. The group mechanics are based on the followers. """
_description = 'Discussion group'
_name = 'mail.group'
_mail_flat_thread = False
_inherit = ['mail.thread']
_inherits = {'mail.alias': 'alias_id'}
def _get_image(self, cr, uid, ids, name, args, context=None):
result = {}
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
'name': fields.char('Name', required=True, translate=True),
'description': fields.text('Description'),
'menu_id': fields.many2one('ir.ui.menu', string='Related Menu', required=True, ondelete="cascade"),
'public': fields.selection([('public', 'Public'), ('private', 'Private'), ('groups', 'Selected Group Only')], 'Privacy', required=True,
help='This group is visible by non members. \
Invisible groups can add members through the invite button.'),
'group_public_id': fields.many2one('res.groups', string='Authorized Group'),
'group_ids': fields.many2many('res.groups', rel='mail_group_res_group_rel',
id1='mail_group_id', id2='groups_id', string='Auto Subscription',
help="Members of those groups will automatically added as followers. "\
"Note that they will be able to manage their subscription manually "\
"if necessary."),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Photo",
help="This field holds the image used as photo for the group, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized photo", type="binary", multi="_get_image",
store={
'mail.group': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized photo of the group. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized photo", type="binary", multi="_get_image",
store={
'mail.group': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized photo of the group. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="The email address associated with this group. New emails received will automatically "
"create new topics."),
}
def _get_default_employee_group(self, cr, uid, context=None):
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_user')
return ref and ref[1] or False
def _get_default_image(self, cr, uid, context=None):
image_path = openerp.modules.get_module_resource('mail', 'static/src/img', 'groupdefault.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
_defaults = {
'public': 'groups',
'group_public_id': _get_default_employee_group,
'image': _get_default_image,
}
def _generate_header_description(self, cr, uid, group, context=None):
header = ''
if group.description:
header = '%s' % group.description
if group.alias_id and group.alias_name and group.alias_domain:
if header:
header = '%s<br/>' % header
return '%sGroup email gateway: %s@%s' % (header, group.alias_name, group.alias_domain)
return header
def _subscribe_users(self, cr, uid, ids, context=None):
for mail_group in self.browse(cr, uid, ids, context=context):
partner_ids = []
for group in mail_group.group_ids:
partner_ids += [user.partner_id.id for user in group.users]
self.message_subscribe(cr, uid, ids, partner_ids, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
# get parent menu
menu_parent = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'mail_group_root')
menu_parent = menu_parent and menu_parent[1] or False
# Create menu id
mobj = self.pool.get('ir.ui.menu')
menu_id = mobj.create(cr, SUPERUSER_ID, {'name': vals['name'], 'parent_id': menu_parent}, context=context)
vals['menu_id'] = menu_id
# Create group and alias
create_context = dict(context, alias_model_name=self._name, alias_parent_model_name=self._name, mail_create_nolog=True)
mail_group_id = super(mail_group, self).create(cr, uid, vals, context=create_context)
group = self.browse(cr, uid, mail_group_id, context=context)
self.pool.get('mail.alias').write(cr, uid, [group.alias_id.id], {"alias_force_thread_id": mail_group_id, 'alias_parent_thread_id': mail_group_id}, context)
group = self.browse(cr, uid, mail_group_id, context=context)
# Create client action for this group and link the menu to it
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'action_mail_group_feeds')
if ref:
search_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'view_message_search')
params = {
'search_view_id': search_ref and search_ref[1] or False,
'domain': [
('model', '=', 'mail.group'),
('res_id', '=', mail_group_id),
],
'context': {
'default_model': 'mail.group',
'default_res_id': mail_group_id,
},
'res_model': 'mail.message',
'thread_level': 1,
'header_description': self._generate_header_description(cr, uid, group, context=context),
'view_mailbox': True,
'compose_placeholder': 'Send a message to the group',
}
cobj = self.pool.get('ir.actions.client')
newref = cobj.copy(cr, SUPERUSER_ID, ref[1], default={'params': str(params), 'name': vals['name']}, context=context)
mobj.write(cr, SUPERUSER_ID, menu_id, {'action': 'ir.actions.client,' + str(newref), 'mail_group_id': mail_group_id}, context=context)
if vals.get('group_ids'):
self._subscribe_users(cr, uid, [mail_group_id], context=context)
return mail_group_id
def unlink(self, cr, uid, ids, context=None):
groups = self.browse(cr, uid, ids, context=context)
alias_ids = [group.alias_id.id for group in groups if group.alias_id]
menu_ids = [group.menu_id.id for group in groups if group.menu_id]
# Delete mail_group
try:
all_emp_group = self.pool['ir.model.data'].get_object_reference(cr, uid, 'mail', 'group_all_employees')[1]
except ValueError:
all_emp_group = None
if all_emp_group and all_emp_group in ids:
raise osv.except_osv(_('Warning!'), _('You cannot delete those groups, as the Whole Company group is required by other modules.'))
res = super(mail_group, self).unlink(cr, uid, ids, context=context)
# Cascade-delete mail aliases as well, as they should not exist without the mail group.
self.pool.get('mail.alias').unlink(cr, SUPERUSER_ID, alias_ids, context=context)
# Cascade-delete menu entries as well
self.pool.get('ir.ui.menu').unlink(cr, SUPERUSER_ID, menu_ids, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
result = super(mail_group, self).write(cr, uid, ids, vals, context=context)
if vals.get('group_ids'):
self._subscribe_users(cr, uid, ids, context=context)
# if description, name or alias is changed: update client action
if vals.get('description') or vals.get('name') or vals.get('alias_id') or vals.get('alias_name'):
cobj = self.pool.get('ir.actions.client')
for action in [group.menu_id.action for group in self.browse(cr, uid, ids, context=context)]:
new_params = action.params
new_params['header_description'] = self._generate_header_description(cr, uid, group, context=context)
cobj.write(cr, SUPERUSER_ID, [action.id], {'params': str(new_params)}, context=context)
# if name is changed: update menu
if vals.get('name'):
mobj = self.pool.get('ir.ui.menu')
mobj.write(cr, SUPERUSER_ID,
[group.menu_id.id for group in self.browse(cr, uid, ids, context=context)],
{'name': vals.get('name')}, context=context)
return result
def action_follow(self, cr, uid, ids, context=None):
""" Wrapper because message_subscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_subscribe_users(cr, uid, ids, context=context)
def action_unfollow(self, cr, uid, ids, context=None):
""" Wrapper because message_unsubscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_unsubscribe_users(cr, uid, ids, context=context)
def get_suggested_thread(self, cr, uid, removed_suggested_threads=None, context=None):
"""Show the suggestion of groups if display_groups_suggestions if the
user perference allows it."""
user = self.pool.get('res.users').browse(cr, uid, uid, context)
if not user.display_groups_suggestions:
return []
else:
return super(mail_group, self).get_suggested_thread(cr, uid, removed_suggested_threads, context)
def message_get_email_values(self, cr, uid, id, notif_mail=None, context=None):
res = super(mail_group, self).message_get_email_values(cr, uid, id, notif_mail=notif_mail, context=context)
group = self.browse(cr, uid, id, context=context)
headers = {}
if res.get('headers'):
try:
headers.update(eval(res['headers']))
except Exception:
pass
headers['Precedence'] = 'list'
# avoid out-of-office replies from MS Exchange
# http://blogs.technet.com/b/exchange/archive/2006/10/06/3395024.aspx
headers['X-Auto-Response-Suppress'] = 'OOF'
if group.alias_domain and group.alias_name:
headers['List-Id'] = '%s.%s' % (group.alias_name, group.alias_domain)
headers['List-Post'] = '<mailto:%s@%s>' % (group.alias_name, group.alias_domain)
# Avoid users thinking it was a personal message
# X-Forge-To: will replace To: after SMTP envelope is determined by ir.mail.server
list_to = '"%s" <%s@%s>' % (group.name, group.alias_name, group.alias_domain)
headers['X-Forge-To'] = list_to
res['headers'] = repr(headers)
return res
|
agpl-3.0
|
janslow/boto
|
tests/unit/cloudsearch2/test_search.py
|
114
|
12329
|
#!/usr/bin env python
from boto.cloudsearch2.domain import Domain
from boto.cloudsearch2.layer1 import CloudSearchConnection
from tests.compat import mock, unittest
from httpretty import HTTPretty
import json
from boto.cloudsearch2.search import SearchConnection, SearchServiceException
from boto.compat import six, map
from tests.unit import AWSMockServiceTestCase
from tests.unit.cloudsearch2 import DEMO_DOMAIN_DATA
from tests.unit.cloudsearch2.test_connection import TestCloudSearchCreateDomain
HOSTNAME = "search-demo-userdomain.us-east-1.cloudsearch.amazonaws.com"
FULL_URL = 'http://%s/2013-01-01/search' % HOSTNAME
class CloudSearchSearchBaseTest(unittest.TestCase):
hits = [
{
'id': '12341',
'fields': {
'title': 'Document 1',
'rank': 1
}
},
{
'id': '12342',
'fields': {
'title': 'Document 2',
'rank': 2
}
},
{
'id': '12343',
'fields': {
'title': 'Document 3',
'rank': 3
}
},
{
'id': '12344',
'fields': {
'title': 'Document 4',
'rank': 4
}
},
{
'id': '12345',
'fields': {
'title': 'Document 5',
'rank': 5
}
},
{
'id': '12346',
'fields': {
'title': 'Document 6',
'rank': 6
}
},
{
'id': '12347',
'fields': {
'title': 'Document 7',
'rank': 7
}
},
]
content_type = "text/xml"
response_status = 200
def get_args(self, requestline):
(_, request, _) = requestline.split(b" ")
(_, request) = request.split(b"?", 1)
args = six.moves.urllib.parse.parse_qs(request)
return args
def setUp(self):
HTTPretty.enable()
body = self.response
if not isinstance(body, bytes):
body = json.dumps(body).encode('utf-8')
HTTPretty.register_uri(HTTPretty.GET, FULL_URL,
body=body,
content_type=self.content_type,
status=self.response_status)
def tearDown(self):
HTTPretty.disable()
class CloudSearchSearchTest(CloudSearchSearchBaseTest):
response = {
'rank': '-text_relevance',
'match-expr': "Test",
'hits': {
'found': 30,
'start': 0,
'hit': CloudSearchSearchBaseTest.hits
},
'status': {
'rid': 'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08',
'time-ms': 2,
'cpu-time-ms': 0
}
}
def test_cloudsearch_qsearch(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', options='TestOptions')
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'q'], [b"Test"])
self.assertEqual(args[b'q.options'], [b"TestOptions"])
self.assertEqual(args[b'start'], [b"0"])
self.assertEqual(args[b'size'], [b"10"])
def test_cloudsearch_search_details(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', size=50, start=20)
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'q'], [b"Test"])
self.assertEqual(args[b'size'], [b"50"])
self.assertEqual(args[b'start'], [b"20"])
def test_cloudsearch_facet_constraint_single(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(
q='Test',
facet={'author': "'John Smith','Mark Smith'"})
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'facet.author'],
[b"'John Smith','Mark Smith'"])
def test_cloudsearch_facet_constraint_multiple(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(
q='Test',
facet={'author': "'John Smith','Mark Smith'",
'category': "'News','Reviews'"})
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'facet.author'],
[b"'John Smith','Mark Smith'"])
self.assertEqual(args[b'facet.category'],
[b"'News','Reviews'"])
def test_cloudsearch_facet_sort_single(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', facet={'author': {'sort': 'alpha'}})
args = self.get_args(HTTPretty.last_request.raw_requestline)
print(args)
self.assertEqual(args[b'facet.author'], [b'{"sort": "alpha"}'])
def test_cloudsearch_facet_sort_multiple(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', facet={'author': {'sort': 'alpha'},
'cat': {'sort': 'count'}})
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'facet.author'], [b'{"sort": "alpha"}'])
self.assertEqual(args[b'facet.cat'], [b'{"sort": "count"}'])
def test_cloudsearch_result_fields_single(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', return_fields=['author'])
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'return'], [b'author'])
def test_cloudsearch_result_fields_multiple(self):
search = SearchConnection(endpoint=HOSTNAME)
search.search(q='Test', return_fields=['author', 'title'])
args = self.get_args(HTTPretty.last_request.raw_requestline)
self.assertEqual(args[b'return'], [b'author,title'])
def test_cloudsearch_results_meta(self):
"""Check returned metadata is parsed correctly"""
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test')
# These rely on the default response which is fed into HTTPretty
self.assertEqual(results.hits, 30)
self.assertEqual(results.docs[0]['fields']['rank'], 1)
def test_cloudsearch_results_info(self):
"""Check num_pages_needed is calculated correctly"""
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test')
# This relies on the default response which is fed into HTTPretty
self.assertEqual(results.num_pages_needed, 3.0)
def test_cloudsearch_results_matched(self):
"""
Check that information objects are passed back through the API
correctly.
"""
search = SearchConnection(endpoint=HOSTNAME)
query = search.build_query(q='Test')
results = search(query)
self.assertEqual(results.search_service, search)
self.assertEqual(results.query, query)
def test_cloudsearch_results_hits(self):
"""Check that documents are parsed properly from AWS"""
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test')
hits = list(map(lambda x: x['id'], results.docs))
# This relies on the default response which is fed into HTTPretty
self.assertEqual(
hits, ["12341", "12342", "12343", "12344",
"12345", "12346", "12347"])
def test_cloudsearch_results_iterator(self):
"""Check the results iterator"""
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test')
results_correct = iter(["12341", "12342", "12343", "12344",
"12345", "12346", "12347"])
for x in results:
self.assertEqual(x['id'], next(results_correct))
def test_cloudsearch_results_internal_consistancy(self):
"""Check the documents length matches the iterator details"""
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test')
self.assertEqual(len(results), len(results.docs))
def test_cloudsearch_search_nextpage(self):
"""Check next page query is correct"""
search = SearchConnection(endpoint=HOSTNAME)
query1 = search.build_query(q='Test')
query2 = search.build_query(q='Test')
results = search(query2)
self.assertEqual(results.next_page().query.start,
query1.start + query1.size)
self.assertEqual(query1.q, query2.q)
class CloudSearchSearchFacetTest(CloudSearchSearchBaseTest):
response = {
'rank': '-text_relevance',
'match-expr': "Test",
'hits': {
'found': 30,
'start': 0,
'hit': CloudSearchSearchBaseTest.hits
},
'status': {
'rid': 'b7c167f6c2da6d93531b9a7b314ad030b3a74803b4b7797edb905ba5a6a08',
'time-ms': 2,
'cpu-time-ms': 0
},
'facets': {
'tags': {},
'animals': {'buckets': [{'count': '2', 'value': 'fish'}, {'count': '1', 'value': 'lions'}]},
}
}
def test_cloudsearch_search_facets(self):
#self.response['facets'] = {'tags': {}}
search = SearchConnection(endpoint=HOSTNAME)
results = search.search(q='Test', facet={'tags': {}})
self.assertTrue('tags' not in results.facets)
self.assertEqual(results.facets['animals'], {u'lions': u'1', u'fish': u'2'})
class CloudSearchNonJsonTest(CloudSearchSearchBaseTest):
response = b'<html><body><h1>500 Internal Server Error</h1></body></html>'
response_status = 500
content_type = 'text/xml'
def test_response(self):
search = SearchConnection(endpoint=HOSTNAME)
with self.assertRaises(SearchServiceException):
search.search(q='Test')
class CloudSearchUnauthorizedTest(CloudSearchSearchBaseTest):
response = b'<html><body><h1>403 Forbidden</h1>foo bar baz</body></html>'
response_status = 403
content_type = 'text/html'
def test_response(self):
search = SearchConnection(endpoint=HOSTNAME)
with self.assertRaisesRegexp(SearchServiceException, 'foo bar baz'):
search.search(q='Test')
class FakeResponse(object):
status_code = 405
content = b''
class CloudSearchConnectionTest(AWSMockServiceTestCase):
cloudsearch = True
connection_class = CloudSearchConnection
def setUp(self):
super(CloudSearchConnectionTest, self).setUp()
self.conn = SearchConnection(
endpoint='test-domain.cloudsearch.amazonaws.com'
)
def test_expose_additional_error_info(self):
mpo = mock.patch.object
fake = FakeResponse()
fake.content = b'Nopenopenope'
# First, in the case of a non-JSON, non-403 error.
with mpo(self.conn.session, 'get', return_value=fake) as mock_request:
with self.assertRaises(SearchServiceException) as cm:
self.conn.search(q='not_gonna_happen')
self.assertTrue('non-json response' in str(cm.exception))
self.assertTrue('Nopenopenope' in str(cm.exception))
# Then with JSON & an 'error' key within.
fake.content = json.dumps({
'error': "Something went wrong. Oops."
}).encode('utf-8')
with mpo(self.conn.session, 'get', return_value=fake) as mock_request:
with self.assertRaises(SearchServiceException) as cm:
self.conn.search(q='no_luck_here')
self.assertTrue('Unknown error' in str(cm.exception))
self.assertTrue('went wrong. Oops' in str(cm.exception))
def test_proxy(self):
conn = self.service_connection
conn.proxy = "127.0.0.1"
conn.proxy_user = "john.doe"
conn.proxy_pass="p4ssw0rd"
conn.proxy_port="8180"
conn.use_proxy = True
domain = Domain(conn, DEMO_DOMAIN_DATA)
search = SearchConnection(domain=domain)
self.assertEqual(search.session.proxies, {'http': 'http://john.doe:p4ssw0rd@127.0.0.1:8180'})
|
mit
|
llmike/PhMuPDF
|
thirdparty/freetype/src/tools/docmaker/sources.py
|
106
|
10770
|
# Sources (c) 2002-2004, 2006-2009, 2012
# David Turner <david@freetype.org>
#
#
# this file contains definitions of classes needed to decompose
# C sources files into a series of multi-line "blocks". There are
# two kinds of blocks:
#
# - normal blocks, which contain source code or ordinary comments
#
# - documentation blocks, which have restricted formatting, and
# whose text always start with a documentation markup tag like
# "<Function>", "<Type>", etc..
#
# the routines used to process the content of documentation blocks
# are not contained here, but in "content.py"
#
# the classes and methods found here only deal with text parsing
# and basic documentation block extraction
#
import fileinput, re, sys, os, string
################################################################
##
## BLOCK FORMAT PATTERN
##
## A simple class containing compiled regular expressions used
## to detect potential documentation format block comments within
## C source code
##
## note that the 'column' pattern must contain a group that will
## be used to "unbox" the content of documentation comment blocks
##
class SourceBlockFormat:
def __init__( self, id, start, column, end ):
"""create a block pattern, used to recognize special documentation blocks"""
self.id = id
self.start = re.compile( start, re.VERBOSE )
self.column = re.compile( column, re.VERBOSE )
self.end = re.compile( end, re.VERBOSE )
#
# format 1 documentation comment blocks look like the following:
#
# /************************************/
# /* */
# /* */
# /* */
# /************************************/
#
# we define a few regular expressions here to detect them
#
start = r'''
\s* # any number of whitespace
/\*{2,}/ # followed by '/' and at least two asterisks then '/'
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
/\*{1} # followed by '/' and precisely one asterisk
([^*].*) # followed by anything (group 1)
\*{1}/ # followed by one asterisk and a '/'
\s*$ # probably followed by whitespace
'''
re_source_block_format1 = SourceBlockFormat( 1, start, column, start )
#
# format 2 documentation comment blocks look like the following:
#
# /************************************ (at least 2 asterisks)
# *
# *
# *
# *
# **/ (1 or more asterisks at the end)
#
# we define a few regular expressions here to detect them
#
start = r'''
\s* # any number of whitespace
/\*{2,} # followed by '/' and at least two asterisks
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
\*{1}(?!/) # followed by precisely one asterisk not followed by `/'
(.*) # then anything (group1)
'''
end = r'''
\s* # any number of whitespace
\*+/ # followed by at least one asterisk, then '/'
'''
re_source_block_format2 = SourceBlockFormat( 2, start, column, end )
#
# the list of supported documentation block formats, we could add new ones
# relatively easily
#
re_source_block_formats = [re_source_block_format1, re_source_block_format2]
#
# the following regular expressions corresponds to markup tags
# within the documentation comment blocks. they're equivalent
# despite their different syntax
#
# notice how each markup tag _must_ begin a new line
#
re_markup_tag1 = re.compile( r'''\s*<((?:\w|-)*)>''' ) # <xxxx> format
re_markup_tag2 = re.compile( r'''\s*@((?:\w|-)*):''' ) # @xxxx: format
#
# the list of supported markup tags, we could add new ones relatively
# easily
#
re_markup_tags = [re_markup_tag1, re_markup_tag2]
#
# used to detect a cross-reference, after markup tags have been stripped
#
re_crossref = re.compile( r'@((?:\w|-)*)(.*)' )
#
# used to detect italic and bold styles in paragraph text
#
re_italic = re.compile( r"_(\w(\w|')*)_(.*)" ) # _italic_
re_bold = re.compile( r"\*(\w(\w|')*)\*(.*)" ) # *bold*
#
# used to detect the end of commented source lines
#
re_source_sep = re.compile( r'\s*/\*\s*\*/' )
#
# used to perform cross-reference within source output
#
re_source_crossref = re.compile( r'(\W*)(\w*)' )
#
# a list of reserved source keywords
#
re_source_keywords = re.compile( '''\\b ( typedef |
struct |
enum |
union |
const |
char |
int |
short |
long |
void |
signed |
unsigned |
\#include |
\#define |
\#undef |
\#if |
\#ifdef |
\#ifndef |
\#else |
\#endif ) \\b''', re.VERBOSE )
################################################################
##
## SOURCE BLOCK CLASS
##
## A SourceProcessor is in charge of reading a C source file
## and decomposing it into a series of different "SourceBlocks".
## each one of these blocks can be made of the following data:
##
## - A documentation comment block that starts with "/**" and
## whose exact format will be discussed later
##
## - normal sources lines, including comments
##
## the important fields in a text block are the following ones:
##
## self.lines : a list of text lines for the corresponding block
##
## self.content : for documentation comment blocks only, this is the
## block content that has been "unboxed" from its
## decoration. This is None for all other blocks
## (i.e. sources or ordinary comments with no starting
## markup tag)
##
class SourceBlock:
def __init__( self, processor, filename, lineno, lines ):
self.processor = processor
self.filename = filename
self.lineno = lineno
self.lines = lines[:]
self.format = processor.format
self.content = []
if self.format == None:
return
words = []
# extract comment lines
lines = []
for line0 in self.lines:
m = self.format.column.match( line0 )
if m:
lines.append( m.group( 1 ) )
# now, look for a markup tag
for l in lines:
l = string.strip( l )
if len( l ) > 0:
for tag in re_markup_tags:
if tag.match( l ):
self.content = lines
return
def location( self ):
return "(" + self.filename + ":" + repr( self.lineno ) + ")"
# debugging only - not used in normal operations
def dump( self ):
if self.content:
print "{{{content start---"
for l in self.content:
print l
print "---content end}}}"
return
fmt = ""
if self.format:
fmt = repr( self.format.id ) + " "
for line in self.lines:
print line
################################################################
##
## SOURCE PROCESSOR CLASS
##
## The SourceProcessor is in charge of reading a C source file
## and decomposing it into a series of different "SourceBlock"
## objects.
##
## each one of these blocks can be made of the following data:
##
## - A documentation comment block that starts with "/**" and
## whose exact format will be discussed later
##
## - normal sources lines, include comments
##
##
class SourceProcessor:
def __init__( self ):
"""initialize a source processor"""
self.blocks = []
self.filename = None
self.format = None
self.lines = []
def reset( self ):
"""reset a block processor, clean all its blocks"""
self.blocks = []
self.format = None
def parse_file( self, filename ):
"""parse a C source file, and add its blocks to the processor's list"""
self.reset()
self.filename = filename
fileinput.close()
self.format = None
self.lineno = 0
self.lines = []
for line in fileinput.input( filename ):
# strip trailing newlines, important on Windows machines!
if line[-1] == '\012':
line = line[0:-1]
if self.format == None:
self.process_normal_line( line )
else:
if self.format.end.match( line ):
# that's a normal block end, add it to 'lines' and
# create a new block
self.lines.append( line )
self.add_block_lines()
elif self.format.column.match( line ):
# that's a normal column line, add it to 'lines'
self.lines.append( line )
else:
# humm.. this is an unexpected block end,
# create a new block, but don't process the line
self.add_block_lines()
# we need to process the line again
self.process_normal_line( line )
# record the last lines
self.add_block_lines()
def process_normal_line( self, line ):
"""process a normal line and check whether it is the start of a new block"""
for f in re_source_block_formats:
if f.start.match( line ):
self.add_block_lines()
self.format = f
self.lineno = fileinput.filelineno()
self.lines.append( line )
def add_block_lines( self ):
"""add the current accumulated lines and create a new block"""
if self.lines != []:
block = SourceBlock( self, self.filename, self.lineno, self.lines )
self.blocks.append( block )
self.format = None
self.lines = []
# debugging only, not used in normal operations
def dump( self ):
"""print all blocks in a processor"""
for b in self.blocks:
b.dump()
# eof
|
agpl-3.0
|
ethansshaw/stellavitrum
|
ScienceFairProcess.py
|
1
|
10349
|
#!/usr/bin/env python
"""
Written by Ethan Shaw
"""
from astropy.io import fits
import sys, png, math, os
colors = ['red', 'green', 'blue']
# Build x_axis_len rows, each containing y_axis_len columns
# access with PNG_data[row][column]
def buildMatrix(x_axis_len, y_axis_len, greyscale=True):
# set up empty list (matrix) to hold pixels
PNG_data = []
for row in range(0, x_axis_len):
PNG_data.append([])
#start out with an empty list, then put another list in it so it looks like [[]]
#gives the value of x_axis_len empty lists inside the list PNG_data
for column in range (0, y_axis_len):
if ( greyscale ):
PNG_data[row].append(0)
#this is the grayscale value
else:
#Red,Green,Blue values
PNG_data[row].append(0)
PNG_data[row].append(0)
PNG_data[row].append(0)
return PNG_data
#Function defines ONLY color
def setPixel(PNG_data, red, green, blue, row, column):
PNG_data[row][column*3] = red
PNG_data[row][column*3 + 1] = green
PNG_data[row][column*3 + 2] = blue
def getPixelRange(PNG_data, x_axis_len, y_axis_len):
# determine the PNG_data range for scaling purposes
pixel_max = 0
pixel_min = pow(2,16)
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
pixel_max = max(pixel_max, PNG_data[row][column])
pixel_min = min(pixel_min, PNG_data[row][column])
print "Pixel max: {0:.20f}, Pixel min: {0:.20f}".format(pixel_max, pixel_min)
return (pixel_max, pixel_min)
def getRawDataFromFile(file, color):
#this reads the file and structures into useable format
hdulist = fits.open(file)
entry = hdulist[0]
bits_per_pixel = entry.header['BITPIX']
number_axis = entry.header['NAXIS']
x_axis_len = entry.header['NAXIS2']
y_axis_len = entry.header['NAXIS1']
print "Data dimensions: (%d x %d) - %d axes, %d bpp" % (x_axis_len, y_axis_len, number_axis, bits_per_pixel)
# data is a bunch of columns, each containing one row
data = entry.data
pixelData = buildMatrix(x_axis_len, y_axis_len, greyscale=False)
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
try:
image_value = data[row][column]
red, green, blue = ( 0,0,0 )
if ( color == 'red' ):
red = image_value
elif ( color == 'green' ):
green = image_value
elif ( color == 'blue' ):
blue = image_value
setPixel(pixelData, red, green, blue, row, column)
except Exception as e:
print "Error accessing (%d, %d) : %s" % (row, column, e)
raise SystemExit
return pixelData
def combineTwoDataSets(dataSet1, dataSet2):
print "Combining two data sets"
# step 1, make a new data set the size of the two
x_axis_len = len(dataSet1)
y_axis_len = len(dataSet1[0])
combinedData = buildMatrix(x_axis_len, y_axis_len)
# step 2, step over each pixel in the sets and ADD to the combined pixel value
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
combinedData[row][column] = dataSet1[row][column] + dataSet2[row][column]
# step 3, return the combined data set
return combinedData
def writePNGFile(PNGData, output_directory, dataset_name):
filename = '%s/out_data_%s.png' % ( output_directory, dataset_name)
f = open(filename, 'wb') # binary mode is important
w = png.Writer(len(PNGData[0])/3, len(PNGData), greyscale=False,alpha=False, bitdepth=16)
w.write(f, PNGData)
print "Image written to file %s" % filename
def linearScale(value, min_value, max_value):
pixel_range = abs(max_value - min_value)
#2 to the 16th means a 16 bit image (using 16 bits of data to describe each pixel)
ratio = (pow(2, 16)*1.0 - 1) / pixel_range
#This gives us a linearly scaled value between 0 (black) and 2^16 (white)
val = int(round(value * ratio))
return val
def logarithmicScalePixel(value, min_value, max_value):
try:
val = abs(math.log(value))
# for min and max we use 0, 100 for now
return linearScalePixel(val, 0, 100)
except Exception as e:
return 0
def linearScalePixel(value, min_value, max_value):
pixel_range = abs(max_value - min_value)
#2 to the 16th means a 16 bit image (using 16 bits of data to describe each pixel)
ratio = (pow(2, 16)*1.0 -1 ) / pixel_range
#This gives us a linearly scaled value between 0 (black) and 2^16 (white)
val = int(round(value * ratio))
if ( val < 0 or val > 65535 ):
print "value %d (orig: %f was outside range %.e, %.e" % ( val, value, min_value, max_value )
raise SystemExit
return val
def scaleDataSet(scalingFunction, dataSet):
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
pixel_max, pixel_min = getPixelRange(dataSet, x_axis_len, y_axis_len)
print "Max: %f, Min: %f" % (pixel_max, pixel_min)
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
dataSet[row][column] = scalingFunction(dataSet[row][column], pixel_min, pixel_max)
return dataSet
def linearScaleDataSet(dataSet):
return scaleDataSet(linearScalePixel, dataSet)
def logScaleDataSet(dataSet):
return scaleDataSet(logarithmicScalePixel, dataSet)
def zeroOutliersInDataSet(dataSet, interQuartileScaleFactor=1.5):
(firstQuartile, median, thirdQuartile, interQuartile) = getQuartileValues(dataSet)
minAllowedValue = max(0, firstQuartile - (interQuartileScaleFactor * interQuartile))
maxAllowedValue = thirdQuartile + (interQuartileScaleFactor * interQuartile)
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
dataValue = dataSet[row][column]
if (dataValue < minAllowedValue or dataValue > maxAllowedValue):
dataSet[row][column] = 0
return dataSet
def histogramData(dataSet, output_directory, dataset_folder="data"):
pixel_max, pixel_min = getPixelRange(dataSet, len(dataSet), len(dataSet[0]))
histogram = {}
number_of_groups = 10
group_size = (pixel_max - pixel_min) / (number_of_groups *1.0)
for i in range(0, number_of_groups):
histogram[int(i*group_size)] = 0
histogramKeys = histogram.keys()
histogramKeys.sort()
histogramKeys.reverse()
for x in range(0, len(dataSet)):
for y in range(0, len(dataSet[0])):
pixel = dataSet[x][y]
for key in histogramKeys:
if pixel < key:
histogram[key] = int(histogram[key] + 1)
continue
histogramKeys.reverse()
output_path = "%s/%s_histogram.csv" % (output_directory, dataset_folder)
outf = open(output_path, "w")
for key in histogramKeys:
kname = "Bucket %d" % key
outf.write("%s,%d\n" % (kname, histogram[key]))
outf.close()
print "Histogram written to file %s" % output_path
def getMean(dataSet):
sum = 0.0
count = 0
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
if dataSet[row][column] > 0:
sum = sum + dataSet[row][column]
count = count + 1
return sum/count
def getMedian(dataSet):
dataList = []
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
if (dataSet[row][column] > 0):
dataList.append(dataSet[row][column])
dataList.sort()
middleNumber = len(dataList)/2
return dataList[middleNumber]
def getQuartileValues(dataSet):
median = getMedian(dataSet)
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
valuesLessThanMedian = []
valuesGreaterThanMedian = []
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
if dataSet[row][column] > median:
valuesGreaterThanMedian.append(dataSet[row][column])
else:
valuesLessThanMedian.append(dataSet[row][column])
valuesGreaterThanMedian.sort()
valuesLessThanMedian.sort()
firstQuartile = valuesLessThanMedian[len(valuesLessThanMedian)/2]
thirdQuartile = valuesGreaterThanMedian[len(valuesGreaterThanMedian)/2]
interQuartile = thirdQuartile - firstQuartile
print "Quartiles: ", firstQuartile, median, thirdQuartile, interQuartile
return (firstQuartile, median, thirdQuartile, interQuartile)
def getMode(dataSet):
dataPoints = {}
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
for column in range (0, y_axis_len):
point = dataSet[row][column]
if (point > 0):
if dataPoints.has_key(point):
dataPoints[point] = dataPoints[point] + 1
else:
dataPoints[point] = 1
maxCount = 0
maxValue = None
for (value, count) in dataPoints.items():
if count > maxCount:
maxCount = count
maxValue = value
print "%f was the max value and occurred %d times" % (maxValue, maxCount)
return maxValue
def outputToCSVFile(filename, dataSet):
outf = open(filename, 'w')
x_axis_len = len(dataSet)
y_axis_len = len(dataSet[0])
for row in range(0, x_axis_len):
line = ""
for column in range (0, y_axis_len):
line = "%s%.7e," % (line, dataSet[row][column])
line = line + "\n"
outf.write(line)
outf.close()
print "Wrote to %s" % filename
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage: %s <file1> <file2> ..." % sys.argv[0]
raise SystemExit
files = sys.argv[1:]
i = 0
PNGDataSets = []
#rData = getRawDataFromFile(files[0], "red")
#writePNGFile(rData, "red")
#raise SystemExit
full_path1 = os.path.abspath(files[0])
folder_path = os.path.split(full_path1)[0]
dataset_folder = os.path.basename(folder_path)
for file in files:
dataSet = getRawDataFromFile(file, colors[i])
i = i + 1
dataSetNormalized = zeroOutliersInDataSet(dataSet)
PNGDataSets.append(dataSetNormalized)
combinedSet = None
for dataSet in PNGDataSets:
if (combinedSet == None):
combinedSet = dataSet
else:
combinedSet = combineTwoDataSets(combinedSet, dataSet)
parent_directory = os.path.split(os.path.abspath(sys.argv[0]))[0]
output_directory = os.path.join(parent_directory, "Results")
if not os.path.exists(output_directory):
os.makedirs(output_directory)
print "Created directory %s" % output_directory
else:
print "Output directory %s exists" % output_directory
# now linear scale the outlier set
scaledSet = linearScaleDataSet(combinedSet)
histogramData(scaledSet, output_directory, dataset_folder)
#raise SystemExit
filename = "%s/dataset_%s.csv" % (output_directory, dataset_folder)
outputToCSVFile(filename, scaledSet)
writePNGFile(scaledSet, output_directory, dataset_folder) #old was writePNGFile(combinedSet, "combined")
print "Process complete"
|
mit
|
procangroup/edx-platform
|
common/lib/calc/calc/functions.py
|
279
|
1521
|
"""
Provide the mathematical functions that numpy doesn't.
Specifically, the secant/cosecant/cotangents and their inverses and
hyperbolic counterparts
"""
import numpy
# Normal Trig
def sec(arg):
"""
Secant
"""
return 1 / numpy.cos(arg)
def csc(arg):
"""
Cosecant
"""
return 1 / numpy.sin(arg)
def cot(arg):
"""
Cotangent
"""
return 1 / numpy.tan(arg)
# Inverse Trig
# http://en.wikipedia.org/wiki/Inverse_trigonometric_functions#Relationships_among_the_inverse_trigonometric_functions
def arcsec(val):
"""
Inverse secant
"""
return numpy.arccos(1. / val)
def arccsc(val):
"""
Inverse cosecant
"""
return numpy.arcsin(1. / val)
def arccot(val):
"""
Inverse cotangent
"""
if numpy.real(val) < 0:
return -numpy.pi / 2 - numpy.arctan(val)
else:
return numpy.pi / 2 - numpy.arctan(val)
# Hyperbolic Trig
def sech(arg):
"""
Hyperbolic secant
"""
return 1 / numpy.cosh(arg)
def csch(arg):
"""
Hyperbolic cosecant
"""
return 1 / numpy.sinh(arg)
def coth(arg):
"""
Hyperbolic cotangent
"""
return 1 / numpy.tanh(arg)
# And their inverses
def arcsech(val):
"""
Inverse hyperbolic secant
"""
return numpy.arccosh(1. / val)
def arccsch(val):
"""
Inverse hyperbolic cosecant
"""
return numpy.arcsinh(1. / val)
def arccoth(val):
"""
Inverse hyperbolic cotangent
"""
return numpy.arctanh(1. / val)
|
agpl-3.0
|
sbelskie/symplicity
|
Symplicity/local_settings.py
|
1
|
2695
|
"""
Django settings for Symplicity project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i+acxn5(akgsn!sr4^qgf(^m&*@+g1@u^t@=8s@axc41ml*f=s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'symptom_tracker',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'Symplicity.urls'
WSGI_APPLICATION = 'Symplicity.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'symplicity',
'USER':'postgres',
'PASSWORD': 'password',
'HOST': 'localhost',
'PORT': '5432'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
|
apache-2.0
|
kamenim/samba
|
third_party/waf/wafadmin/Tools/ocaml.py
|
32
|
9092
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
"ocaml support"
import os, re
import TaskGen, Utils, Task, Build
from Logs import error
from TaskGen import taskgen, feature, before, after, extension
EXT_MLL = ['.mll']
EXT_MLY = ['.mly']
EXT_MLI = ['.mli']
EXT_MLC = ['.c']
EXT_ML = ['.ml']
open_re = re.compile('^\s*open\s+([a-zA-Z]+)(;;){0,1}$', re.M)
foo = re.compile(r"""(\(\*)|(\*\))|("(\\.|[^"\\])*"|'(\\.|[^'\\])*'|.[^()*"'\\]*)""", re.M)
def filter_comments(txt):
meh = [0]
def repl(m):
if m.group(1): meh[0] += 1
elif m.group(2): meh[0] -= 1
elif not meh[0]: return m.group(0)
return ''
return foo.sub(repl, txt)
def scan(self):
node = self.inputs[0]
code = filter_comments(node.read(self.env))
global open_re
names = []
import_iterator = open_re.finditer(code)
if import_iterator:
for import_match in import_iterator:
names.append(import_match.group(1))
found_lst = []
raw_lst = []
for name in names:
nd = None
for x in self.incpaths:
nd = x.find_resource(name.lower()+'.ml')
if not nd: nd = x.find_resource(name+'.ml')
if nd:
found_lst.append(nd)
break
else:
raw_lst.append(name)
return (found_lst, raw_lst)
native_lst=['native', 'all', 'c_object']
bytecode_lst=['bytecode', 'all']
class ocaml_taskgen(TaskGen.task_gen):
def __init__(self, *k, **kw):
TaskGen.task_gen.__init__(self, *k, **kw)
@feature('ocaml')
def init_ml(self):
Utils.def_attrs(self,
type = 'all',
incpaths_lst = [],
bld_incpaths_lst = [],
mlltasks = [],
mlytasks = [],
mlitasks = [],
native_tasks = [],
bytecode_tasks = [],
linktasks = [],
bytecode_env = None,
native_env = None,
compiled_tasks = [],
includes = '',
uselib = '',
are_deps_set = 0)
@feature('ocaml')
@after('init_ml')
def init_envs_ml(self):
self.islibrary = getattr(self, 'islibrary', False)
global native_lst, bytecode_lst
self.native_env = None
if self.type in native_lst:
self.native_env = self.env.copy()
if self.islibrary: self.native_env['OCALINKFLAGS'] = '-a'
self.bytecode_env = None
if self.type in bytecode_lst:
self.bytecode_env = self.env.copy()
if self.islibrary: self.bytecode_env['OCALINKFLAGS'] = '-a'
if self.type == 'c_object':
self.native_env.append_unique('OCALINKFLAGS_OPT', '-output-obj')
@feature('ocaml')
@before('apply_vars_ml')
@after('init_envs_ml')
def apply_incpaths_ml(self):
inc_lst = self.includes.split()
lst = self.incpaths_lst
for dir in inc_lst:
node = self.path.find_dir(dir)
if not node:
error("node not found: " + str(dir))
continue
self.bld.rescan(node)
if not node in lst: lst.append(node)
self.bld_incpaths_lst.append(node)
# now the nodes are added to self.incpaths_lst
@feature('ocaml')
@before('apply_core')
def apply_vars_ml(self):
for i in self.incpaths_lst:
if self.bytecode_env:
app = self.bytecode_env.append_value
app('OCAMLPATH', '-I')
app('OCAMLPATH', i.srcpath(self.env))
app('OCAMLPATH', '-I')
app('OCAMLPATH', i.bldpath(self.env))
if self.native_env:
app = self.native_env.append_value
app('OCAMLPATH', '-I')
app('OCAMLPATH', i.bldpath(self.env))
app('OCAMLPATH', '-I')
app('OCAMLPATH', i.srcpath(self.env))
varnames = ['INCLUDES', 'OCAMLFLAGS', 'OCALINKFLAGS', 'OCALINKFLAGS_OPT']
for name in self.uselib.split():
for vname in varnames:
cnt = self.env[vname+'_'+name]
if cnt:
if self.bytecode_env: self.bytecode_env.append_value(vname, cnt)
if self.native_env: self.native_env.append_value(vname, cnt)
@feature('ocaml')
@after('apply_core')
def apply_link_ml(self):
if self.bytecode_env:
ext = self.islibrary and '.cma' or '.run'
linktask = self.create_task('ocalink')
linktask.bytecode = 1
linktask.set_outputs(self.path.find_or_declare(self.target + ext))
linktask.obj = self
linktask.env = self.bytecode_env
self.linktasks.append(linktask)
if self.native_env:
if self.type == 'c_object': ext = '.o'
elif self.islibrary: ext = '.cmxa'
else: ext = ''
linktask = self.create_task('ocalinkx')
linktask.set_outputs(self.path.find_or_declare(self.target + ext))
linktask.obj = self
linktask.env = self.native_env
self.linktasks.append(linktask)
# we produce a .o file to be used by gcc
self.compiled_tasks.append(linktask)
@extension(EXT_MLL)
def mll_hook(self, node):
mll_task = self.create_task('ocamllex', node, node.change_ext('.ml'), env=self.native_env)
self.mlltasks.append(mll_task)
self.allnodes.append(mll_task.outputs[0])
@extension(EXT_MLY)
def mly_hook(self, node):
mly_task = self.create_task('ocamlyacc', node, [node.change_ext('.ml'), node.change_ext('.mli')], env=self.native_env)
self.mlytasks.append(mly_task)
self.allnodes.append(mly_task.outputs[0])
task = self.create_task('ocamlcmi', mly_task.outputs[1], mly_task.outputs[1].change_ext('.cmi'), env=self.native_env)
@extension(EXT_MLI)
def mli_hook(self, node):
task = self.create_task('ocamlcmi', node, node.change_ext('.cmi'), env=self.native_env)
self.mlitasks.append(task)
@extension(EXT_MLC)
def mlc_hook(self, node):
task = self.create_task('ocamlcc', node, node.change_ext('.o'), env=self.native_env)
self.compiled_tasks.append(task)
@extension(EXT_ML)
def ml_hook(self, node):
if self.native_env:
task = self.create_task('ocamlx', node, node.change_ext('.cmx'), env=self.native_env)
task.obj = self
task.incpaths = self.bld_incpaths_lst
self.native_tasks.append(task)
if self.bytecode_env:
task = self.create_task('ocaml', node, node.change_ext('.cmo'), env=self.bytecode_env)
task.obj = self
task.bytecode = 1
task.incpaths = self.bld_incpaths_lst
self.bytecode_tasks.append(task)
def compile_may_start(self):
if not getattr(self, 'flag_deps', ''):
self.flag_deps = 1
# the evil part is that we can only compute the dependencies after the
# source files can be read (this means actually producing the source files)
if getattr(self, 'bytecode', ''): alltasks = self.obj.bytecode_tasks
else: alltasks = self.obj.native_tasks
self.signature() # ensure that files are scanned - unfortunately
tree = self.generator.bld
env = self.env
for node in self.inputs:
lst = tree.node_deps[self.unique_id()]
for depnode in lst:
for t in alltasks:
if t == self: continue
if depnode in t.inputs:
self.set_run_after(t)
# TODO necessary to get the signature right - for now
delattr(self, 'cache_sig')
self.signature()
return Task.Task.runnable_status(self)
b = Task.simple_task_type
cls = b('ocamlx', '${OCAMLOPT} ${OCAMLPATH} ${OCAMLFLAGS} ${INCLUDES} -c -o ${TGT} ${SRC}', color='GREEN', shell=False)
cls.runnable_status = compile_may_start
cls.scan = scan
b = Task.simple_task_type
cls = b('ocaml', '${OCAMLC} ${OCAMLPATH} ${OCAMLFLAGS} ${INCLUDES} -c -o ${TGT} ${SRC}', color='GREEN', shell=False)
cls.runnable_status = compile_may_start
cls.scan = scan
b('ocamlcmi', '${OCAMLC} ${OCAMLPATH} ${INCLUDES} -o ${TGT} -c ${SRC}', color='BLUE', before="ocaml ocamlcc ocamlx")
b('ocamlcc', 'cd ${TGT[0].bld_dir(env)} && ${OCAMLOPT} ${OCAMLFLAGS} ${OCAMLPATH} ${INCLUDES} -c ${SRC[0].abspath(env)}', color='GREEN')
b('ocamllex', '${OCAMLLEX} ${SRC} -o ${TGT}', color='BLUE', before="ocamlcmi ocaml ocamlcc")
b('ocamlyacc', '${OCAMLYACC} -b ${TGT[0].bld_base(env)} ${SRC}', color='BLUE', before="ocamlcmi ocaml ocamlcc")
def link_may_start(self):
if not getattr(self, 'order', ''):
# now reorder the inputs given the task dependencies
if getattr(self, 'bytecode', 0): alltasks = self.obj.bytecode_tasks
else: alltasks = self.obj.native_tasks
# this part is difficult, we do not have a total order on the tasks
# if the dependencies are wrong, this may not stop
seen = []
pendant = []+alltasks
while pendant:
task = pendant.pop(0)
if task in seen: continue
for x in task.run_after:
if not x in seen:
pendant.append(task)
break
else:
seen.append(task)
self.inputs = [x.outputs[0] for x in seen]
self.order = 1
return Task.Task.runnable_status(self)
act = b('ocalink', '${OCAMLC} -o ${TGT} ${INCLUDES} ${OCALINKFLAGS} ${SRC}', color='YELLOW', after="ocaml ocamlcc")
act.runnable_status = link_may_start
act = b('ocalinkx', '${OCAMLOPT} -o ${TGT} ${INCLUDES} ${OCALINKFLAGS_OPT} ${SRC}', color='YELLOW', after="ocamlx ocamlcc")
act.runnable_status = link_may_start
def detect(conf):
opt = conf.find_program('ocamlopt', var='OCAMLOPT')
occ = conf.find_program('ocamlc', var='OCAMLC')
if (not opt) or (not occ):
conf.fatal('The objective caml compiler was not found:\ninstall it or make it available in your PATH')
v = conf.env
v['OCAMLC'] = occ
v['OCAMLOPT'] = opt
v['OCAMLLEX'] = conf.find_program('ocamllex', var='OCAMLLEX')
v['OCAMLYACC'] = conf.find_program('ocamlyacc', var='OCAMLYACC')
v['OCAMLFLAGS'] = ''
v['OCAMLLIB'] = Utils.cmd_output(conf.env['OCAMLC']+' -where').strip()+os.sep
v['LIBPATH_OCAML'] = Utils.cmd_output(conf.env['OCAMLC']+' -where').strip()+os.sep
v['CPPPATH_OCAML'] = Utils.cmd_output(conf.env['OCAMLC']+' -where').strip()+os.sep
v['LIB_OCAML'] = 'camlrun'
|
gpl-3.0
|
jelugbo/hebs_master
|
cms/djangoapps/contentstore/views/item.py
|
11
|
41646
|
"""Views for items (modules)."""
from __future__ import absolute_import
import hashlib
import logging
from uuid import uuid4
from datetime import datetime
from pytz import UTC
import json
from collections import OrderedDict
from functools import partial
from static_replace import replace_static_urls
from xmodule_modifiers import wrap_xblock, request_token
from django.core.exceptions import PermissionDenied
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseBadRequest, HttpResponse, Http404
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_http_methods
from xblock.fields import Scope
from xblock.fragment import Fragment
import xmodule
from xmodule.tabs import StaticTab, CourseTabList
from xmodule.modulestore import ModuleStoreEnum, EdxJSONEncoder
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, InvalidLocationError
from xmodule.modulestore.inheritance import own_metadata
from xmodule.modulestore.draft_and_published import DIRECT_ONLY_CATEGORIES
from xmodule.x_module import PREVIEW_VIEWS, STUDIO_VIEW, STUDENT_VIEW
from xmodule.course_module import DEFAULT_START_DATE
from django.contrib.auth.models import User
from util.date_utils import get_default_time_display
from util.json_request import expect_json, JsonResponse
from .access import has_course_access
from contentstore.utils import find_release_date_source, find_staff_lock_source, is_currently_visible_to_students, \
ancestor_has_staff_lock
from contentstore.views.helpers import is_unit, xblock_studio_url, xblock_primary_child_category, \
xblock_type_display_name, get_parent_xblock
from contentstore.views.preview import get_preview_fragment
from edxmako.shortcuts import render_to_string
from models.settings.course_grading import CourseGradingModel
from cms.lib.xblock.runtime import handler_url, local_resource_url
from opaque_keys.edx.keys import UsageKey, CourseKey
__all__ = ['orphan_handler', 'xblock_handler', 'xblock_view_handler', 'xblock_outline_handler']
log = logging.getLogger(__name__)
CREATE_IF_NOT_FOUND = ['course_info']
# Useful constants for defining predicates
NEVER = lambda x: False
ALWAYS = lambda x: True
# In order to allow descriptors to use a handler url, we need to
# monkey-patch the x_module library.
# TODO: Remove this code when Runtimes are no longer created by modulestores
xmodule.x_module.descriptor_global_handler_url = handler_url
xmodule.x_module.descriptor_global_local_resource_url = local_resource_url
def hash_resource(resource):
"""
Hash a :class:`xblock.fragment.FragmentResource`.
"""
md5 = hashlib.md5()
md5.update(repr(resource))
return md5.hexdigest()
def usage_key_with_run(usage_key_string):
"""
Converts usage_key_string to a UsageKey, adding a course run if necessary
"""
usage_key = UsageKey.from_string(usage_key_string)
usage_key = usage_key.replace(course_key=modulestore().fill_in_run(usage_key.course_key))
return usage_key
# pylint: disable=unused-argument
@require_http_methods(("DELETE", "GET", "PUT", "POST", "PATCH"))
@login_required
@expect_json
def xblock_handler(request, usage_key_string):
"""
The restful handler for xblock requests.
DELETE
json: delete this xblock instance from the course.
GET
json: returns representation of the xblock (locator id, data, and metadata).
if ?fields=graderType, it returns the graderType for the unit instead of the above.
html: returns HTML for rendering the xblock (which includes both the "preview" view and the "editor" view)
PUT or POST or PATCH
json: if xblock locator is specified, update the xblock instance. The json payload can contain
these fields, all optional:
:data: the new value for the data.
:children: the unicode representation of the UsageKeys of children for this xblock.
:metadata: new values for the metadata fields. Any whose values are None will be deleted not set
to None! Absent ones will be left alone.
:nullout: which metadata fields to set to None
:graderType: change how this unit is graded
:publish: can be:
'make_public': publish the content
'republish': publish this item *only* if it was previously published
'discard_changes' - reverts to the last published version
Note: If 'discard_changes', the other fields will not be used; that is, it is not possible
to update and discard changes in a single operation.
The JSON representation on the updated xblock (minus children) is returned.
if usage_key_string is not specified, create a new xblock instance, either by duplicating
an existing xblock, or creating an entirely new one. The json playload can contain
these fields:
:parent_locator: parent for new xblock, required for both duplicate and create new instance
:duplicate_source_locator: if present, use this as the source for creating a duplicate copy
:category: type of xblock, required if duplicate_source_locator is not present.
:display_name: name for new xblock, optional
:boilerplate: template name for populating fields, optional and only used
if duplicate_source_locator is not present
The locator (unicode representation of a UsageKey) for the created xblock (minus children) is returned.
"""
if usage_key_string:
usage_key = usage_key_with_run(usage_key_string)
if not has_course_access(request.user, usage_key.course_key):
raise PermissionDenied()
if request.method == 'GET':
accept_header = request.META.get('HTTP_ACCEPT', 'application/json')
if 'application/json' in accept_header:
fields = request.REQUEST.get('fields', '').split(',')
if 'graderType' in fields:
# right now can't combine output of this w/ output of _get_module_info, but worthy goal
return JsonResponse(CourseGradingModel.get_section_grader_type(usage_key))
# TODO: pass fields to _get_module_info and only return those
rsp = _get_module_info(_get_xblock(usage_key, request.user))
return JsonResponse(rsp)
else:
return HttpResponse(status=406)
elif request.method == 'DELETE':
_delete_item(usage_key, request.user)
return JsonResponse()
else: # Since we have a usage_key, we are updating an existing xblock.
return _save_xblock(
request.user,
_get_xblock(usage_key, request.user),
data=request.json.get('data'),
children_strings=request.json.get('children'),
metadata=request.json.get('metadata'),
nullout=request.json.get('nullout'),
grader_type=request.json.get('graderType'),
publish=request.json.get('publish'),
)
elif request.method in ('PUT', 'POST'):
if 'duplicate_source_locator' in request.json:
parent_usage_key = usage_key_with_run(request.json['parent_locator'])
duplicate_source_usage_key = usage_key_with_run(request.json['duplicate_source_locator'])
dest_usage_key = _duplicate_item(
parent_usage_key,
duplicate_source_usage_key,
request.user,
request.json.get('display_name'),
)
return JsonResponse({"locator": unicode(dest_usage_key), "courseKey": unicode(dest_usage_key.course_key)})
else:
return _create_item(request)
else:
return HttpResponseBadRequest(
"Only instance creation is supported without a usage key.",
content_type="text/plain"
)
# pylint: disable=unused-argument
@require_http_methods(("GET"))
@login_required
@expect_json
def xblock_view_handler(request, usage_key_string, view_name):
"""
The restful handler for requests for rendered xblock views.
Returns a json object containing two keys:
html: The rendered html of the view
resources: A list of tuples where the first element is the resource hash, and
the second is the resource description
"""
usage_key = usage_key_with_run(usage_key_string)
if not has_course_access(request.user, usage_key.course_key):
raise PermissionDenied()
accept_header = request.META.get('HTTP_ACCEPT', 'application/json')
if 'application/json' in accept_header:
store = modulestore()
xblock = store.get_item(usage_key)
container_views = ['container_preview', 'reorderable_container_child_preview']
# wrap the generated fragment in the xmodule_editor div so that the javascript
# can bind to it correctly
xblock.runtime.wrappers.append(partial(
wrap_xblock,
'StudioRuntime',
usage_id_serializer=unicode,
request_token=request_token(request),
))
if view_name == STUDIO_VIEW:
try:
fragment = xblock.render(STUDIO_VIEW)
# catch exceptions indiscriminately, since after this point they escape the
# dungeon and surface as uneditable, unsaveable, and undeletable
# component-goblins.
except Exception as exc: # pylint: disable=w0703
log.debug("unable to render studio_view for %r", xblock, exc_info=True)
fragment = Fragment(render_to_string('html_error.html', {'message': str(exc)}))
elif view_name in (PREVIEW_VIEWS + container_views):
is_pages_view = view_name == STUDENT_VIEW # Only the "Pages" view uses student view in Studio
# Determine the items to be shown as reorderable. Note that the view
# 'reorderable_container_child_preview' is only rendered for xblocks that
# are being shown in a reorderable container, so the xblock is automatically
# added to the list.
reorderable_items = set()
if view_name == 'reorderable_container_child_preview':
reorderable_items.add(xblock.location)
# Set up the context to be passed to each XBlock's render method.
context = {
'is_pages_view': is_pages_view, # This setting disables the recursive wrapping of xblocks
'is_unit_page': is_unit(xblock),
'root_xblock': xblock if (view_name == 'container_preview') else None,
'reorderable_items': reorderable_items
}
fragment = get_preview_fragment(request, xblock, context)
# Note that the container view recursively adds headers into the preview fragment,
# so only the "Pages" view requires that this extra wrapper be included.
if is_pages_view:
fragment.content = render_to_string('component.html', {
'xblock_context': context,
'xblock': xblock,
'locator': usage_key,
'preview': fragment.content,
'label': xblock.display_name or xblock.scope_ids.block_type,
})
else:
raise Http404
hashed_resources = OrderedDict()
for resource in fragment.resources:
hashed_resources[hash_resource(resource)] = resource
return JsonResponse({
'html': fragment.content,
'resources': hashed_resources.items()
})
else:
return HttpResponse(status=406)
# pylint: disable=unused-argument
@require_http_methods(("GET"))
@login_required
@expect_json
def xblock_outline_handler(request, usage_key_string):
"""
The restful handler for requests for XBlock information about the block and its children.
This is used by the course outline in particular to construct the tree representation of
a course.
"""
usage_key = usage_key_with_run(usage_key_string)
if not has_course_access(request.user, usage_key.course_key):
raise PermissionDenied()
response_format = request.REQUEST.get('format', 'html')
if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
store = modulestore()
root_xblock = store.get_item(usage_key)
return JsonResponse(create_xblock_info(
root_xblock,
include_child_info=True,
course_outline=True,
include_children_predicate=lambda xblock: not xblock.category == 'vertical'
))
else:
return Http404
def _update_with_callback(xblock, user, old_metadata=None, old_content=None):
"""
Updates the xblock in the modulestore.
But before doing so, it calls the xblock's editor_saved callback function.
"""
if callable(getattr(xblock, "editor_saved", None)):
if old_metadata is None:
old_metadata = own_metadata(xblock)
if old_content is None:
old_content = xblock.get_explicitly_set_fields_by_scope(Scope.content)
xblock.editor_saved(user, old_metadata, old_content)
# Update after the callback so any changes made in the callback will get persisted.
return modulestore().update_item(xblock, user.id)
def _save_xblock(user, xblock, data=None, children_strings=None, metadata=None, nullout=None,
grader_type=None, publish=None):
"""
Saves xblock w/ its fields. Has special processing for grader_type, publish, and nullout and Nones in metadata.
nullout means to truly set the field to None whereas nones in metadata mean to unset them (so they revert
to default).
"""
store = modulestore()
# Perform all xblock changes within a (single-versioned) transaction
with store.bulk_operations(xblock.location.course_key):
# Don't allow updating an xblock and discarding changes in a single operation (unsupported by UI).
if publish == "discard_changes":
store.revert_to_published(xblock.location, user.id)
# Returning the same sort of result that we do for other save operations. In the future,
# we may want to return the full XBlockInfo.
return JsonResponse({'id': unicode(xblock.location)})
old_metadata = own_metadata(xblock)
old_content = xblock.get_explicitly_set_fields_by_scope(Scope.content)
if data:
# TODO Allow any scope.content fields not just "data" (exactly like the get below this)
xblock.data = data
else:
data = old_content['data'] if 'data' in old_content else None
if children_strings is not None:
children = []
for child_string in children_strings:
children.append(usage_key_with_run(child_string))
# if new children have been added, remove them from their old parents
new_children = set(children) - set(xblock.children)
for new_child in new_children:
old_parent_location = store.get_parent_location(new_child)
if old_parent_location:
old_parent = store.get_item(old_parent_location)
old_parent.children.remove(new_child)
old_parent = _update_with_callback(old_parent, user)
else:
# the Studio UI currently doesn't present orphaned children, so assume this is an error
return JsonResponse({"error": "Invalid data, possibly caused by concurrent authors."}, 400)
# make sure there are no old children that became orphans
# In a single-author (no-conflict) scenario, all children in the persisted list on the server should be
# present in the updated list. If there are any children that have been dropped as part of this update,
# then that would be an error.
#
# We can be even more restrictive in a multi-author (conflict), by returning an error whenever
# len(old_children) > 0. However, that conflict can still be "merged" if the dropped child had been
# re-parented. Hence, the check for the parent in the any statement below.
#
# Note that this multi-author conflict error should not occur in modulestores (such as Split) that support
# atomic write transactions. In Split, if there was another author who moved one of the "old_children"
# into another parent, then that child would have been deleted from this parent on the server. However,
# this is error could occur in modulestores (such as Draft) that do not support atomic write-transactions
old_children = set(xblock.children) - set(children)
if any(
store.get_parent_location(old_child) == xblock.location
for old_child in old_children
):
# since children are moved as part of a single transaction, orphans should not be created
return JsonResponse({"error": "Invalid data, possibly caused by concurrent authors."}, 400)
# set the children on the xblock
xblock.children = children
# also commit any metadata which might have been passed along
if nullout is not None or metadata is not None:
# the postback is not the complete metadata, as there's system metadata which is
# not presented to the end-user for editing. So let's use the original (existing_item) and
# 'apply' the submitted metadata, so we don't end up deleting system metadata.
if nullout is not None:
for metadata_key in nullout:
setattr(xblock, metadata_key, None)
# update existing metadata with submitted metadata (which can be partial)
# IMPORTANT NOTE: if the client passed 'null' (None) for a piece of metadata that means 'remove it'. If
# the intent is to make it None, use the nullout field
if metadata is not None:
for metadata_key, value in metadata.items():
field = xblock.fields[metadata_key]
if value is None:
field.delete_from(xblock)
else:
try:
value = field.from_json(value)
except ValueError:
return JsonResponse({"error": "Invalid data"}, 400)
field.write_to(xblock, value)
# update the xblock and call any xblock callbacks
xblock = _update_with_callback(xblock, user, old_metadata, old_content)
# for static tabs, their containing course also records their display name
if xblock.location.category == 'static_tab':
course = store.get_course(xblock.location.course_key)
# find the course's reference to this tab and update the name.
static_tab = CourseTabList.get_tab_by_slug(course.tabs, xblock.location.name)
# only update if changed
if static_tab and static_tab['name'] != xblock.display_name:
static_tab['name'] = xblock.display_name
store.update_item(course, user.id)
result = {
'id': unicode(xblock.location),
'data': data,
'metadata': own_metadata(xblock)
}
if grader_type is not None:
result.update(CourseGradingModel.update_section_grader_type(xblock, grader_type, user))
# If publish is set to 'republish' and this item is not in direct only categories and has previously been published,
# then this item should be republished. This is used by staff locking to ensure that changing the draft
# value of the staff lock will also update the published version, but only at the unit level.
if publish == 'republish' and xblock.category not in DIRECT_ONLY_CATEGORIES:
if modulestore().has_published_version(xblock):
publish = 'make_public'
# Make public after updating the xblock, in case the caller asked for both an update and a publish.
# Used by Bok Choy tests and by republishing of staff locks.
if publish == 'make_public':
modulestore().publish(xblock.location, user.id)
# Note that children aren't being returned until we have a use case.
return JsonResponse(result, encoder=EdxJSONEncoder)
@login_required
@expect_json
def _create_item(request):
"""View for create items."""
usage_key = usage_key_with_run(request.json['parent_locator'])
category = request.json['category']
display_name = request.json.get('display_name')
if not has_course_access(request.user, usage_key.course_key):
raise PermissionDenied()
store = modulestore()
with store.bulk_operations(usage_key.course_key):
parent = store.get_item(usage_key)
dest_usage_key = usage_key.replace(category=category, name=uuid4().hex)
# get the metadata, display_name, and definition from the request
metadata = {}
data = None
template_id = request.json.get('boilerplate')
if template_id:
clz = parent.runtime.load_block_type(category)
if clz is not None:
template = clz.get_template(template_id)
if template is not None:
metadata = template.get('metadata', {})
data = template.get('data')
if display_name is not None:
metadata['display_name'] = display_name
# TODO need to fix components that are sending definition_data as strings, instead of as dicts
# For now, migrate them into dicts here.
if isinstance(data, basestring):
data = {'data': data}
created_block = store.create_child(
request.user.id,
usage_key,
dest_usage_key.block_type,
block_id=dest_usage_key.block_id,
definition_data=data,
metadata=metadata,
runtime=parent.runtime,
)
# VS[compat] cdodge: This is a hack because static_tabs also have references from the course module, so
# if we add one then we need to also add it to the policy information (i.e. metadata)
# we should remove this once we can break this reference from the course to static tabs
if category == 'static_tab':
display_name = display_name or _("Empty") # Prevent name being None
course = store.get_course(dest_usage_key.course_key)
course.tabs.append(
StaticTab(
name=display_name,
url_slug=dest_usage_key.name,
)
)
store.update_item(course, request.user.id)
return JsonResponse({"locator": unicode(created_block.location), "courseKey": unicode(created_block.location.course_key)})
def _duplicate_item(parent_usage_key, duplicate_source_usage_key, user, display_name=None):
"""
Duplicate an existing xblock as a child of the supplied parent_usage_key.
"""
store = modulestore()
with store.bulk_operations(duplicate_source_usage_key.course_key):
source_item = store.get_item(duplicate_source_usage_key)
# Change the blockID to be unique.
dest_usage_key = source_item.location.replace(name=uuid4().hex)
category = dest_usage_key.block_type
# Update the display name to indicate this is a duplicate (unless display name provided).
duplicate_metadata = own_metadata(source_item)
if display_name is not None:
duplicate_metadata['display_name'] = display_name
else:
if source_item.display_name is None:
duplicate_metadata['display_name'] = _("Duplicate of {0}").format(source_item.category)
else:
duplicate_metadata['display_name'] = _("Duplicate of '{0}'").format(source_item.display_name)
dest_module = store.create_item(
user.id,
dest_usage_key.course_key,
dest_usage_key.block_type,
block_id=dest_usage_key.block_id,
definition_data=source_item.get_explicitly_set_fields_by_scope(Scope.content),
metadata=duplicate_metadata,
runtime=source_item.runtime,
)
# Children are not automatically copied over (and not all xblocks have a 'children' attribute).
# Because DAGs are not fully supported, we need to actually duplicate each child as well.
if source_item.has_children:
dest_module.children = []
for child in source_item.children:
dupe = _duplicate_item(dest_module.location, child, user=user)
dest_module.children.append(dupe)
store.update_item(dest_module, user.id)
if 'detached' not in source_item.runtime.load_block_type(category)._class_tags:
parent = store.get_item(parent_usage_key)
# If source was already a child of the parent, add duplicate immediately afterward.
# Otherwise, add child to end.
if source_item.location in parent.children:
source_index = parent.children.index(source_item.location)
parent.children.insert(source_index + 1, dest_module.location)
else:
parent.children.append(dest_module.location)
store.update_item(parent, user.id)
return dest_module.location
def _delete_item(usage_key, user):
"""
Deletes an existing xblock with the given usage_key.
If the xblock is a Static Tab, removes it from course.tabs as well.
"""
store = modulestore()
with store.bulk_operations(usage_key.course_key):
# VS[compat] cdodge: This is a hack because static_tabs also have references from the course module, so
# if we add one then we need to also add it to the policy information (i.e. metadata)
# we should remove this once we can break this reference from the course to static tabs
if usage_key.category == 'static_tab':
course = store.get_course(usage_key.course_key)
existing_tabs = course.tabs or []
course.tabs = [tab for tab in existing_tabs if tab.get('url_slug') != usage_key.name]
store.update_item(course, user.id)
store.delete_item(usage_key, user.id)
# pylint: disable=W0613
@login_required
@require_http_methods(("GET", "DELETE"))
def orphan_handler(request, course_key_string):
"""
View for handling orphan related requests. GET gets all of the current orphans.
DELETE removes all orphans (requires is_staff access)
An orphan is a block whose category is not in the DETACHED_CATEGORY list, is not the root, and is not reachable
from the root via children
"""
course_usage_key = CourseKey.from_string(course_key_string)
if request.method == 'GET':
if has_course_access(request.user, course_usage_key):
return JsonResponse([unicode(item) for item in modulestore().get_orphans(course_usage_key)])
else:
raise PermissionDenied()
if request.method == 'DELETE':
if request.user.is_staff:
store = modulestore()
items = store.get_orphans(course_usage_key)
for itemloc in items:
# need to delete all versions
store.delete_item(itemloc, request.user.id, revision=ModuleStoreEnum.RevisionOption.all)
return JsonResponse({'deleted': [unicode(item) for item in items]})
else:
raise PermissionDenied()
def _get_xblock(usage_key, user):
"""
Returns the xblock for the specified usage key. Note: if failing to find a key with a category
in the CREATE_IF_NOT_FOUND list, an xblock will be created and saved automatically.
"""
store = modulestore()
with store.bulk_operations(usage_key.course_key):
try:
return store.get_item(usage_key, depth=None)
except ItemNotFoundError:
if usage_key.category in CREATE_IF_NOT_FOUND:
# Create a new one for certain categories only. Used for course info handouts.
return store.create_item(user.id, usage_key.course_key, usage_key.block_type, block_id=usage_key.block_id)
else:
raise
except InvalidLocationError:
log.error("Can't find item by location.")
return JsonResponse({"error": "Can't find item by location: " + unicode(usage_key)}, 404)
def _get_module_info(xblock, rewrite_static_links=True):
"""
metadata, data, id representation of a leaf module fetcher.
:param usage_key: A UsageKey
"""
with modulestore().bulk_operations(xblock.location.course_key):
data = getattr(xblock, 'data', '')
if rewrite_static_links:
data = replace_static_urls(
data,
None,
course_id=xblock.location.course_key
)
# Pre-cache has changes for the entire course because we'll need it for the ancestor info
modulestore().has_changes(modulestore().get_course(xblock.location.course_key, depth=None))
# Note that children aren't being returned until we have a use case.
return create_xblock_info(xblock, data=data, metadata=own_metadata(xblock), include_ancestor_info=True)
def create_xblock_info(xblock, data=None, metadata=None, include_ancestor_info=False, include_child_info=False,
course_outline=False, include_children_predicate=NEVER, parent_xblock=None, graders=None):
"""
Creates the information needed for client-side XBlockInfo.
If data or metadata are not specified, their information will not be added
(regardless of whether or not the xblock actually has data or metadata).
There are three optional boolean parameters:
include_ancestor_info - if true, ancestor info is added to the response
include_child_info - if true, direct child info is included in the response
course_outline - if true, the xblock is being rendered on behalf of the course outline.
There are certain expensive computations that do not need to be included in this case.
In addition, an optional include_children_predicate argument can be provided to define whether or
not a particular xblock should have its children included.
"""
def safe_get_username(user_id):
"""
Guard against bad user_ids, like the infamous "**replace_user**".
Note that this will ignore our special known IDs (ModuleStoreEnum.UserID).
We should consider adding special handling for those values.
:param user_id: the user id to get the username of
:return: username, or None if the user does not exist or user_id is None
"""
if user_id:
try:
return User.objects.get(id=user_id).username
except: # pylint: disable=bare-except
pass
return None
is_xblock_unit = is_unit(xblock, parent_xblock)
# this should not be calculated for Sections and Subsections on Unit page
has_changes = modulestore().has_changes(xblock) if (is_xblock_unit or course_outline) else None
if graders is None:
graders = CourseGradingModel.fetch(xblock.location.course_key).graders
# Compute the child info first so it can be included in aggregate information for the parent
should_visit_children = include_child_info and (course_outline and not is_xblock_unit or not course_outline)
if should_visit_children and xblock.has_children:
child_info = _create_xblock_child_info(
xblock,
course_outline,
graders,
include_children_predicate=include_children_predicate,
)
else:
child_info = None
# Treat DEFAULT_START_DATE as a magic number that means the release date has not been set
release_date = get_default_time_display(xblock.start) if xblock.start != DEFAULT_START_DATE else None
if xblock.category != 'course':
visibility_state = _compute_visibility_state(xblock, child_info, is_xblock_unit and has_changes)
else:
visibility_state = None
published = modulestore().has_published_version(xblock)
xblock_info = {
"id": unicode(xblock.location),
"display_name": xblock.display_name_with_default,
"category": xblock.category,
"edited_on": get_default_time_display(xblock.subtree_edited_on) if xblock.subtree_edited_on else None,
"published": published,
"published_on": get_default_time_display(xblock.published_on) if xblock.published_on else None,
"studio_url": xblock_studio_url(xblock, parent_xblock),
"released_to_students": datetime.now(UTC) > xblock.start,
"release_date": release_date,
"visibility_state": visibility_state,
"has_explicit_staff_lock": xblock.fields['visible_to_staff_only'].is_set_on(xblock),
"start": xblock.fields['start'].to_json(xblock.start),
"graded": xblock.graded,
"due_date": get_default_time_display(xblock.due),
"due": xblock.fields['due'].to_json(xblock.due),
"format": xblock.format,
"course_graders": json.dumps([grader.get('type') for grader in graders]),
"has_changes": has_changes,
}
if data is not None:
xblock_info["data"] = data
if metadata is not None:
xblock_info["metadata"] = metadata
if include_ancestor_info:
xblock_info['ancestor_info'] = _create_xblock_ancestor_info(xblock, course_outline)
if child_info:
xblock_info['child_info'] = child_info
if visibility_state == VisibilityState.staff_only:
xblock_info["ancestor_has_staff_lock"] = ancestor_has_staff_lock(xblock, parent_xblock)
else:
xblock_info["ancestor_has_staff_lock"] = False
# Currently, 'edited_by', 'published_by', and 'release_date_from' are only used by the
# container page when rendering a unit. Since they are expensive to compute, only include them for units
# that are not being rendered on the course outline.
if is_xblock_unit and not course_outline:
xblock_info["edited_by"] = safe_get_username(xblock.subtree_edited_by)
xblock_info["published_by"] = safe_get_username(xblock.published_by)
xblock_info["currently_visible_to_students"] = is_currently_visible_to_students(xblock)
if release_date:
xblock_info["release_date_from"] = _get_release_date_from(xblock)
if visibility_state == VisibilityState.staff_only:
xblock_info["staff_lock_from"] = _get_staff_lock_from(xblock)
else:
xblock_info["staff_lock_from"] = None
if course_outline:
if xblock_info["has_explicit_staff_lock"]:
xblock_info["staff_only_message"] = True
elif child_info and child_info["children"]:
xblock_info["staff_only_message"] = all([child["staff_only_message"] for child in child_info["children"]])
else:
xblock_info["staff_only_message"] = False
return xblock_info
class VisibilityState(object):
"""
Represents the possible visibility states for an xblock:
live - the block and all of its descendants are live to students (excluding staff only items)
Note: Live means both published and released.
ready - the block is ready to go live and all of its descendants are live or ready (excluding staff only items)
Note: content is ready when it is published and scheduled with a release date in the future.
unscheduled - the block and all of its descendants have no release date (excluding staff only items)
Note: it is valid for items to be published with no release date in which case they are still unscheduled.
needs_attention - the block or its descendants are not fully live, ready or unscheduled (excluding staff only items)
For example: one subsection has draft content, or there's both unreleased and released content in one section.
staff_only - all of the block's content is to be shown to staff only
Note: staff only items do not affect their parent's state.
"""
live = 'live'
ready = 'ready'
unscheduled = 'unscheduled'
needs_attention = 'needs_attention'
staff_only = 'staff_only'
def _compute_visibility_state(xblock, child_info, is_unit_with_changes):
"""
Returns the current publish state for the specified xblock and its children
"""
if xblock.visible_to_staff_only:
return VisibilityState.staff_only
elif is_unit_with_changes:
# Note that a unit that has never been published will fall into this category,
# as well as previously published units with draft content.
return VisibilityState.needs_attention
is_unscheduled = xblock.start == DEFAULT_START_DATE
is_live = datetime.now(UTC) > xblock.start
children = child_info and child_info.get('children', [])
if children and len(children) > 0:
all_staff_only = True
all_unscheduled = True
all_live = True
for child in child_info['children']:
child_state = child['visibility_state']
if child_state == VisibilityState.needs_attention:
return child_state
elif not child_state == VisibilityState.staff_only:
all_staff_only = False
if not child_state == VisibilityState.unscheduled:
all_unscheduled = False
if not child_state == VisibilityState.live:
all_live = False
if all_staff_only:
return VisibilityState.staff_only
elif all_unscheduled:
return VisibilityState.unscheduled if is_unscheduled else VisibilityState.needs_attention
elif all_live:
return VisibilityState.live if is_live else VisibilityState.needs_attention
else:
return VisibilityState.ready if not is_unscheduled else VisibilityState.needs_attention
if is_unscheduled:
return VisibilityState.unscheduled
elif is_live:
return VisibilityState.live
else:
return VisibilityState.ready
def _create_xblock_ancestor_info(xblock, course_outline):
"""
Returns information about the ancestors of an xblock. Note that the direct parent will also return
information about all of its children.
"""
ancestors = []
def collect_ancestor_info(ancestor, include_child_info=False):
"""
Collect xblock info regarding the specified xblock and its ancestors.
"""
if ancestor:
direct_children_only = lambda parent: parent == ancestor
ancestors.append(create_xblock_info(
ancestor,
include_child_info=include_child_info,
course_outline=course_outline,
include_children_predicate=direct_children_only
))
collect_ancestor_info(get_parent_xblock(ancestor))
collect_ancestor_info(get_parent_xblock(xblock), include_child_info=True)
return {
'ancestors': ancestors
}
def _create_xblock_child_info(xblock, course_outline, graders, include_children_predicate=NEVER):
"""
Returns information about the children of an xblock, as well as about the primary category
of xblock expected as children.
"""
child_info = {}
child_category = xblock_primary_child_category(xblock)
if child_category:
child_info = {
'category': child_category,
'display_name': xblock_type_display_name(child_category, default_display_name=child_category),
}
if xblock.has_children and include_children_predicate(xblock):
child_info['children'] = [
create_xblock_info(
child, include_child_info=True, course_outline=course_outline,
include_children_predicate=include_children_predicate,
parent_xblock=xblock,
graders=graders
) for child in xblock.get_children()
]
return child_info
def _get_release_date_from(xblock):
"""
Returns a string representation of the section or subsection that sets the xblock's release date
"""
return _xblock_type_and_display_name(find_release_date_source(xblock))
def _get_staff_lock_from(xblock):
"""
Returns a string representation of the section or subsection that sets the xblock's release date
"""
source = find_staff_lock_source(xblock)
return _xblock_type_and_display_name(source) if source else None
def _xblock_type_and_display_name(xblock):
"""
Returns a string representation of the xblock's type and display name
"""
return _('{section_or_subsection} "{display_name}"').format(
section_or_subsection=xblock_type_display_name(xblock),
display_name=xblock.display_name_with_default)
|
agpl-3.0
|
rhelmer/socorro
|
socorro/external/postgresql/signature_urls.py
|
10
|
5635
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import logging
from socorro.external import MissingArgumentError, BadArgumentError
from socorro.external.postgresql.base import add_param_to_dict, PostgreSQLBase
from socorro.lib import external_common
logger = logging.getLogger("webapi")
class SignatureURLs(PostgreSQLBase):
def get(self, **kwargs):
""" Return urls for signature """
filters = [
("signature", None, "str"),
("start_date", None, "datetime"),
("end_date", None, "datetime"),
("products", None, ["list", "str"]),
("versions", None, ["list", "str"]),
]
params = external_common.parse_arguments(filters, kwargs)
#Because no parameters are optional, we need to loop through
#all parameters to ensure each has been set and is not None
missingParams = []
for param in params:
if not params[param]:
if param == 'versions':
# force versions parameter to being 'ALL' if empty
params[param] = 'ALL'
continue
missingParams.append(param)
if len(missingParams) > 0:
raise MissingArgumentError(", ".join(missingParams))
all_products_versions_sql = """
/* socorro.external.postgresql.signature_urls.SignatureURLs.get */
SELECT url, count(*) as crash_count FROM reports_clean
JOIN reports_user_info USING ( UUID )
JOIN signatures USING ( signature_id )
WHERE reports_clean.date_processed
BETWEEN %(start_date)s AND %(end_date)s
AND reports_user_info.date_processed
BETWEEN %(start_date)s AND %(end_date)s
AND signature = %(signature)s
AND url <> ''
"""
sql = """
/* socorro.external.postgresql.signature_urls.SignatureURLs.get */
SELECT url, count(*) as crash_count FROM reports_clean
JOIN reports_user_info USING ( UUID )
JOIN signatures USING ( signature_id )
JOIN product_versions USING ( product_version_id )
WHERE reports_clean.date_processed
BETWEEN %(start_date)s AND %(end_date)s
AND reports_user_info.date_processed
BETWEEN %(start_date)s AND %(end_date)s
AND signature = %(signature)s
AND url <> ''
AND (
"""
sql_group_order = """ GROUP BY url
ORDER BY crash_count DESC LIMIT 100"""
sql_params = {
"start_date": params.start_date,
"end_date": params.end_date,
"signature": params.signature
}
# if this query is for all products the 'ALL' keyword will be
# the only item in the products list and this will then also
# be for all versions.
if 'ALL' in params['products']:
sql_query = " ".join((all_products_versions_sql, sql_group_order))
# if this query is for all versions the 'ALL' keyword will be
# the only item in the versions list.
elif 'ALL' in params['versions']:
sql_products = " product_name IN %(products)s )"
sql_params['products'] = tuple(params.products)
sql_date_range_limit = """AND %(end_date)s BETWEEN
product_versions.build_date
AND product_versions.sunset_date"""
sql_query = " ".join((sql, sql_products,
sql_date_range_limit, sql_group_order))
else:
products = []
(params["products_versions"],
products) = self.parse_versions(params["versions"], [])
if len(params["products_versions"]) == 0:
raise BadArgumentError(", ".join(params["versions"]))
versions_list = []
products_list = []
for x in range(0, len(params["products_versions"]), 2):
products_list.append(params["products_versions"][x])
versions_list.append(params["products_versions"][x + 1])
product_version_list = []
for prod in params["products"]:
versions = []
[versions.append(versions_list[i])
for i, x in enumerate(products_list)
if x == prod]
product_version_list.append(tuple(versions))
sql_product_version_ids = [
"""( product_name = %%(product%s)s
AND version_string IN %%(version%s)s ) """
% (x, x) for x in range(len(product_version_list))]
sql_params = add_param_to_dict(sql_params, "version",
product_version_list)
sql_params = add_param_to_dict(sql_params, "product",
params.products)
sql_query = " ".join((sql, " OR ".join(sql_product_version_ids),
" ) " + sql_group_order))
error_message = "Failed to retrieve urls for signature from PostgreSQL"
results = self.query(sql_query, sql_params,
error_message=error_message)
urls = []
for row in results:
url = dict(zip(("url", "crash_count"), row))
urls.append(url)
return {
"hits": urls,
"total": len(urls)
}
|
mpl-2.0
|
hujiajie/chromium-crosswalk
|
tools/grit/grit/format/policy_templates/writers/xml_writer_base_unittest.py
|
24
|
1369
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for grit.format.policy_templates.writers.admx_writer."""
import re
import unittest
class XmlWriterBaseTest(unittest.TestCase):
'''Base class for XML writer unit-tests.
'''
def GetXMLOfChildren(self, parent):
'''Returns the XML of all child nodes of the given parent node.
Args:
parent: The XML of the children of this node will be returned.
Return: XML of the chrildren of the parent node.
'''
raw_pretty_xml = ''.join(
child.toprettyxml(indent=' ') for child in parent.childNodes)
# Python 2.6.5 which is present in Lucid has bug in its pretty print
# function which produces new lines around string literals. This has been
# fixed in Precise which has Python 2.7.3 but we have to keep compatibility
# with both for now.
text_re = re.compile('>\n\s+([^<>\s].*?)\n\s*</', re.DOTALL)
return text_re.sub('>\g<1></', raw_pretty_xml)
def AssertXMLEquals(self, output, expected_output):
'''Asserts if the passed XML arguements are equal.
Args:
output: Actual XML text.
expected_output: Expected XML text.
'''
self.assertEquals(output.strip(), expected_output.strip())
|
bsd-3-clause
|
chunweixu/Deep-Learning
|
face_generation/helper.py
|
160
|
8114
|
import math
import os
import hashlib
from urllib.request import urlretrieve
import zipfile
import gzip
import shutil
import numpy as np
from PIL import Image
from tqdm import tqdm
def _read32(bytestream):
"""
Read 32-bit integer from bytesteam
:param bytestream: A bytestream
:return: 32-bit integer
"""
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def _unzip(save_path, _, database_name, data_path):
"""
Unzip wrapper with the same interface as _ungzip
:param save_path: The path of the gzip files
:param database_name: Name of database
:param data_path: Path to extract to
:param _: HACK - Used to have to same interface as _ungzip
"""
print('Extracting {}...'.format(database_name))
with zipfile.ZipFile(save_path) as zf:
zf.extractall(data_path)
def _ungzip(save_path, extract_path, database_name, _):
"""
Unzip a gzip file and extract it to extract_path
:param save_path: The path of the gzip files
:param extract_path: The location to extract the data to
:param database_name: Name of database
:param _: HACK - Used to have to same interface as _unzip
"""
# Get data from save_path
with open(save_path, 'rb') as f:
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number {} in file: {}'.format(magic, f.name))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, rows, cols)
# Save data to extract_path
for image_i, image in enumerate(
tqdm(data, unit='File', unit_scale=True, miniters=1, desc='Extracting {}'.format(database_name))):
Image.fromarray(image, 'L').save(os.path.join(extract_path, 'image_{}.jpg'.format(image_i)))
def get_image(image_path, width, height, mode):
"""
Read image from image_path
:param image_path: Path of image
:param width: Width of image
:param height: Height of image
:param mode: Mode of image
:return: Image data
"""
image = Image.open(image_path)
if image.size != (width, height): # HACK - Check if image is from the CELEBA dataset
# Remove most pixels that aren't part of a face
face_width = face_height = 108
j = (image.size[0] - face_width) // 2
i = (image.size[1] - face_height) // 2
image = image.crop([j, i, j + face_width, i + face_height])
image = image.resize([width, height], Image.BILINEAR)
return np.array(image.convert(mode))
def get_batch(image_files, width, height, mode):
data_batch = np.array(
[get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)
# Make sure the images are in 4 dimensions
if len(data_batch.shape) < 4:
data_batch = data_batch.reshape(data_batch.shape + (1,))
return data_batch
def images_square_grid(images, mode):
"""
Save images as a square grid
:param images: Images to be used for the grid
:param mode: The mode to use for images
:return: Image of images in a square grid
"""
# Get maximum size for square grid of images
save_size = math.floor(np.sqrt(images.shape[0]))
# Scale to 0-255
images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8)
# Put images in a square arrangement
images_in_square = np.reshape(
images[:save_size*save_size],
(save_size, save_size, images.shape[1], images.shape[2], images.shape[3]))
if mode == 'L':
images_in_square = np.squeeze(images_in_square, 4)
# Combine images to grid image
new_im = Image.new(mode, (images.shape[1] * save_size, images.shape[2] * save_size))
for col_i, col_images in enumerate(images_in_square):
for image_i, image in enumerate(col_images):
im = Image.fromarray(image, mode)
new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2]))
return new_im
def download_extract(database_name, data_path):
"""
Download and extract database
:param database_name: Database name
"""
DATASET_CELEBA_NAME = 'celeba'
DATASET_MNIST_NAME = 'mnist'
if database_name == DATASET_CELEBA_NAME:
url = 'https://s3-us-west-1.amazonaws.com/udacity-dlnfd/datasets/celeba.zip'
hash_code = '00d2c5bc6d35e252742224ab0c1e8fcb'
extract_path = os.path.join(data_path, 'img_align_celeba')
save_path = os.path.join(data_path, 'celeba.zip')
extract_fn = _unzip
elif database_name == DATASET_MNIST_NAME:
url = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz'
hash_code = 'f68b3c2dcbeaaa9fbdd348bbdeb94873'
extract_path = os.path.join(data_path, 'mnist')
save_path = os.path.join(data_path, 'train-images-idx3-ubyte.gz')
extract_fn = _ungzip
if os.path.exists(extract_path):
print('Found {} Data'.format(database_name))
return
if not os.path.exists(data_path):
os.makedirs(data_path)
if not os.path.exists(save_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Downloading {}'.format(database_name)) as pbar:
urlretrieve(
url,
save_path,
pbar.hook)
assert hashlib.md5(open(save_path, 'rb').read()).hexdigest() == hash_code, \
'{} file is corrupted. Remove the file and try again.'.format(save_path)
os.makedirs(extract_path)
try:
extract_fn(save_path, extract_path, database_name, data_path)
except Exception as err:
shutil.rmtree(extract_path) # Remove extraction folder if there is an error
raise err
# Remove compressed data
os.remove(save_path)
class Dataset(object):
"""
Dataset
"""
def __init__(self, dataset_name, data_files):
"""
Initalize the class
:param dataset_name: Database name
:param data_files: List of files in the database
"""
DATASET_CELEBA_NAME = 'celeba'
DATASET_MNIST_NAME = 'mnist'
IMAGE_WIDTH = 28
IMAGE_HEIGHT = 28
if dataset_name == DATASET_CELEBA_NAME:
self.image_mode = 'RGB'
image_channels = 3
elif dataset_name == DATASET_MNIST_NAME:
self.image_mode = 'L'
image_channels = 1
self.data_files = data_files
self.shape = len(data_files), IMAGE_WIDTH, IMAGE_HEIGHT, image_channels
def get_batches(self, batch_size):
"""
Generate batches
:param batch_size: Batch Size
:return: Batches of data
"""
IMAGE_MAX_VALUE = 255
current_index = 0
while current_index + batch_size <= self.shape[0]:
data_batch = get_batch(
self.data_files[current_index:current_index + batch_size],
*self.shape[1:3],
self.image_mode)
current_index += batch_size
yield data_batch / IMAGE_MAX_VALUE - 0.5
class DLProgress(tqdm):
"""
Handle Progress Bar while Downloading
"""
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
"""
A hook function that will be called once on establishment of the network connection and
once after each block read thereafter.
:param block_num: A count of blocks transferred so far
:param block_size: Block size in bytes
:param total_size: The total size of the file. This may be -1 on older FTP servers which do not return
a file size in response to a retrieval request.
"""
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
|
mit
|
woozzoom/namebench
|
nb_third_party/dns/dnssec.py
|
215
|
2144
|
# Copyright (C) 2003-2007, 2009 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Common DNSSEC-related functions and constants."""
RSAMD5 = 1
DH = 2
DSA = 3
ECC = 4
RSASHA1 = 5
DSANSEC3SHA1 = 6
RSASHA1NSEC3SHA1 = 7
RSASHA256 = 8
RSASHA512 = 10
INDIRECT = 252
PRIVATEDNS = 253
PRIVATEOID = 254
_algorithm_by_text = {
'RSAMD5' : RSAMD5,
'DH' : DH,
'DSA' : DSA,
'ECC' : ECC,
'RSASHA1' : RSASHA1,
'DSANSEC3SHA1' : DSANSEC3SHA1,
'RSASHA1NSEC3SHA1' : RSASHA1NSEC3SHA1,
'RSASHA256' : RSASHA256,
'RSASHA512' : RSASHA512,
'INDIRECT' : INDIRECT,
'PRIVATEDNS' : PRIVATEDNS,
'PRIVATEOID' : PRIVATEOID,
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_algorithm_by_value = dict([(y, x) for x, y in _algorithm_by_text.iteritems()])
class UnknownAlgorithm(Exception):
"""Raised if an algorithm is unknown."""
pass
def algorithm_from_text(text):
"""Convert text into a DNSSEC algorithm value
@rtype: int"""
value = _algorithm_by_text.get(text.upper())
if value is None:
value = int(text)
return value
def algorithm_to_text(value):
"""Convert a DNSSEC algorithm value to text
@rtype: string"""
text = _algorithm_by_value.get(value)
if text is None:
text = str(value)
return text
|
apache-2.0
|
oldm/OldMan
|
oldman/schema/hydra.py
|
1
|
1520
|
from uuid import uuid4
from rdflib import URIRef, RDF, RDFS
from oldman.vocabulary import OLDM_CORRESPONDING_CLASS
class HydraSchemaAdapter(object):
"""Updates some Hydra patterns in the schema graph:
- hydra:Link: create a hydra:Class, subclass of the link range that support the same operations
"""
def update_schema_graph(self, graph):
graph = graph.skolemize()
graph = self._update_links(graph)
return graph
@staticmethod
def _update_links(graph):
links = list(graph.subjects(RDF.type, URIRef(u"http://www.w3.org/ns/hydra/core#Link")))
for link_property in links:
new_class_iri = URIRef(u"http://localhost/.well-known/genid/link_class/%s" % uuid4())
graph.add((new_class_iri, RDF.type, URIRef(u"http://www.w3.org/ns/hydra/core#Class")))
graph.add((link_property, URIRef(OLDM_CORRESPONDING_CLASS), new_class_iri))
# Ranges --> upper classes
ranges = list(graph.objects(link_property, RDFS.range))
for range in ranges:
graph.add((new_class_iri, RDFS.subClassOf, range))
# supported Operations
supported_operation_property = URIRef(u"http://www.w3.org/ns/hydra/core#supportedOperation")
operations = list(graph.objects(link_property, supported_operation_property))
for operation in operations:
graph.add((new_class_iri, supported_operation_property, operation))
return graph
|
bsd-3-clause
|
jmehnle/ansible
|
lib/ansible/modules/network/nxos/nxos_snmp_user.py
|
57
|
9987
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_snmp_user
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP users for monitoring.
description:
- Manages SNMP user configuration.
author:
- Jason Edelman (@jedelman8)
notes:
- Authentication parameters not idempotent.
options:
user:
description:
- Name of the user.
required: true
group:
description:
- Group to which the user will belong to.
required: true
auth:
description:
- Auth parameters for the user.
required: false
default: null
choices: ['md5', 'sha']
pwd:
description:
- Auth password when using md5 or sha.
required: false
default: null
privacy:
description:
- Privacy password for the user.
required: false
default: null
encrypt:
description:
- Enables AES-128 bit encryption when using privacy password.
required: false
default: null
choices: ['true','false']
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_snmp_user:
user: ntc
group: network-operator
auth: md5
pwd: test_password
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"authentication": "md5", "group": "network-operator",
"pwd": "test_password", "user": "ntc"}
existing:
description:
- k/v pairs of existing configuration
returned: always
type: dict
sample: {"authentication": "no", "encrypt": "none",
"group": ["network-operator"], "user": "ntc"}
end_state:
description: k/v pairs configuration vtp after module execution
returned: always
type: dict
sample: {"authentication": "md5", "encrypt": "none",
"group": ["network-operator"], "user": "ntc"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-server user ntc network-operator auth md5 test_password"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
import re
def execute_show_command(command, module, command_type='cli_show', text=False):
if module.params['transport'] == 'cli':
if 'show run' not in command and text is False:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_groups(module):
command = 'show snmp group'
body = execute_show_command(command, module)
g_list = []
try:
group_table = body[0]['TABLE_role']['ROW_role']
for each in group_table:
g_list.append(each['role_name'])
except (KeyError, AttributeError, IndexError):
return g_list
return g_list
def get_snmp_user(user, module):
command = 'show snmp user {0}'.format(user)
body = execute_show_command(command, module, text=True)
if 'No such entry' not in body[0]:
body = execute_show_command(command, module)
resource = {}
group_list = []
try:
resource_table = body[0]['TABLE_snmp_users']['ROW_snmp_users']
resource['user'] = str(resource_table['user'])
resource['authentication'] = str(resource_table['auth']).strip()
encrypt = str(resource_table['priv']).strip()
if encrypt.startswith('aes'):
resource['encrypt'] = 'aes-128'
else:
resource['encrypt'] = 'none'
group_table = resource_table['TABLE_groups']['ROW_groups']
groups = []
try:
for group in group_table:
groups.append(str(group['group']).strip())
except TypeError:
groups.append(str(group_table['group']).strip())
resource['group'] = groups
except (KeyError, AttributeError, IndexError, TypeError):
return resource
return resource
def remove_snmp_user(user):
return ['no snmp-server user {0}'.format(user)]
def config_snmp_user(proposed, user, reset, new):
if reset and not new:
commands = remove_snmp_user(user)
else:
commands = []
group = proposed.get('group', None)
cmd = ''
if group:
cmd = 'snmp-server user {0} {group}'.format(user, **proposed)
auth = proposed.get('authentication', None)
pwd = proposed.get('pwd', None)
if auth and pwd:
cmd += ' auth {authentication} {pwd}'.format(**proposed)
encrypt = proposed.get('encrypt', None)
privacy = proposed.get('privacy', None)
if encrypt and privacy:
cmd += ' priv {encrypt} {privacy}'.format(**proposed)
elif privacy:
cmd += ' priv {privacy}'.format(**proposed)
if cmd:
commands.append(cmd)
return commands
def main():
argument_spec = dict(
user=dict(required=True, type='str'),
group=dict(type='str', required=True),
pwd=dict(type='str'),
privacy=dict(type='str'),
authentication=dict(choices=['md5', 'sha']),
encrypt=dict(type='bool'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_together=[['authentication', 'pwd'],
['encrypt', 'privacy']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
user = module.params['user']
group = module.params['group']
pwd = module.params['pwd']
privacy = module.params['privacy']
encrypt = module.params['encrypt']
authentication = module.params['authentication']
state = module.params['state']
if privacy and encrypt:
if not pwd and authentication:
module.fail_json(msg='pwd and authentication must be provided '
'when using privacy and encrypt')
if group and group not in get_snmp_groups(module):
module.fail_json(msg='group not configured yet on switch.')
existing = get_snmp_user(user, module)
end_state = existing
store = existing.get('group', None)
if existing:
if group not in existing['group']:
existing['group'] = None
else:
existing['group'] = group
changed = False
commands = []
proposed = {}
if state == 'absent' and existing:
commands.append(remove_snmp_user(user))
elif state == 'present':
new = False
reset = False
args = dict(user=user, pwd=pwd, group=group, privacy=privacy,
encrypt=encrypt, authentication=authentication)
proposed = dict((k, v) for k, v in args.items() if v is not None)
if not existing:
if encrypt:
proposed['encrypt'] = 'aes-128'
commands.append(config_snmp_user(proposed, user, reset, new))
elif existing:
if encrypt and not existing['encrypt'].startswith('aes'):
reset = True
proposed['encrypt'] = 'aes-128'
elif encrypt:
proposed['encrypt'] = 'aes-128'
delta = dict(
set(proposed.items()).difference(existing.items()))
if delta.get('pwd'):
delta['authentication'] = authentication
if delta:
delta['group'] = group
command = config_snmp_user(delta, user, reset, new)
commands.append(command)
cmds = flatten_list(commands)
results = {}
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_snmp_user(user, module)
if 'configure' in cmds:
cmds.pop(0)
if store:
existing['group'] = store
results['proposed'] = proposed
results['existing'] = existing
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == "__main__":
main()
|
gpl-3.0
|
zfzwy/p2pool
|
SOAPpy/Server.py
|
289
|
27143
|
from __future__ import nested_scopes
"""
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (Gregory.R.Warnes@Pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Server.py 1468 2008-05-24 01:55:33Z warnes $'
from version import __version__
#import xml.sax
import socket
import sys
import SocketServer
from types import *
import BaseHTTPServer
import thread
# SOAPpy modules
from Parser import parseSOAPRPC
from Config import Config
from Types import faultType, voidType, simplify
from NS import NS
from SOAPBuilder import buildSOAP
from Utilities import debugHeader, debugFooter
try: from M2Crypto import SSL
except: pass
ident = '$Id: Server.py 1468 2008-05-24 01:55:33Z warnes $'
from version import __version__
################################################################################
# Call context dictionary
################################################################################
_contexts = dict()
def GetSOAPContext():
global _contexts
return _contexts[thread.get_ident()]
################################################################################
# Server
################################################################################
# Method Signature class for adding extra info to registered funcs, right now
# used just to indicate it should be called with keywords, instead of ordered
# params.
class MethodSig:
def __init__(self, func, keywords=0, context=0):
self.func = func
self.keywords = keywords
self.context = context
self.__name__ = func.__name__
def __call__(self, *args, **kw):
return apply(self.func,args,kw)
class SOAPContext:
def __init__(self, header, body, attrs, xmldata, connection, httpheaders,
soapaction):
self.header = header
self.body = body
self.attrs = attrs
self.xmldata = xmldata
self.connection = connection
self.httpheaders= httpheaders
self.soapaction = soapaction
# A class to describe how header messages are handled
class HeaderHandler:
# Initially fail out if there are any problems.
def __init__(self, header, attrs):
for i in header.__dict__.keys():
if i[0] == "_":
continue
d = getattr(header, i)
try:
fault = int(attrs[id(d)][(NS.ENV, 'mustUnderstand')])
except:
fault = 0
if fault:
raise faultType, ("%s:MustUnderstand" % NS.ENV_T,
"Required Header Misunderstood",
"%s" % i)
################################################################################
# SOAP Server
################################################################################
class SOAPServerBase:
def get_request(self):
sock, addr = SocketServer.TCPServer.get_request(self)
if self.ssl_context:
sock = SSL.Connection(self.ssl_context, sock)
sock._setup_ssl(addr)
if sock.accept_ssl() != 1:
raise socket.error, "Couldn't accept SSL connection"
return sock, addr
def registerObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
self.objmap[namespace] = object
def registerFunction(self, function, namespace = '', funcName = None,
path = ''):
if not funcName : funcName = function.__name__
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
if self.funcmap.has_key(namespace):
self.funcmap[namespace][funcName] = function
else:
self.funcmap[namespace] = {funcName : function}
def registerKWObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
for i in dir(object.__class__):
if i[0] != "_" and callable(getattr(object, i)):
self.registerKWFunction(getattr(object,i), namespace)
# convenience - wraps your func for you.
def registerKWFunction(self, function, namespace = '', funcName = None,
path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
self.registerFunction(MethodSig(function,keywords=1), namespace,
funcName)
def unregisterObject(self, object, namespace = '', path = ''):
if namespace == '' and path == '': namespace = self.namespace
if namespace == '' and path != '':
namespace = path.replace("/", ":")
if namespace[0] == ":": namespace = namespace[1:]
del self.objmap[namespace]
class SOAPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def version_string(self):
return '<a href="http://pywebsvcs.sf.net">' + \
'SOAPpy ' + __version__ + '</a> (Python ' + \
sys.version.split()[0] + ')'
def date_time_string(self):
self.__last_date_time_string = \
BaseHTTPServer.BaseHTTPRequestHandler.\
date_time_string(self)
return self.__last_date_time_string
def do_POST(self):
global _contexts
status = 500
try:
if self.server.config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
print self.raw_requestline.strip()
print "\n".join(map (lambda x: x.strip(),
self.headers.headers))
debugFooter(s)
data = self.rfile.read(int(self.headers["Content-length"]))
if self.server.config.dumpSOAPIn:
s = 'Incoming SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
(r, header, body, attrs) = \
parseSOAPRPC(data, header = 1, body = 1, attrs = 1)
method = r._name
args = r._aslist()
kw = r._asdict()
if Config.simplify_objects:
args = simplify(args)
kw = simplify(kw)
# Handle mixed named and unnamed arguments by assuming
# that all arguments with names of the form "v[0-9]+"
# are unnamed and should be passed in numeric order,
# other arguments are named and should be passed using
# this name.
# This is a non-standard exension to the SOAP protocol,
# but is supported by Apache AXIS.
# It is enabled by default. To disable, set
# Config.specialArgs to False.
ordered_args = {}
named_args = {}
if Config.specialArgs:
for (k,v) in kw.items():
if k[0]=="v":
try:
i = int(k[1:])
ordered_args[i] = v
except ValueError:
named_args[str(k)] = v
else:
named_args[str(k)] = v
# We have to decide namespace precedence
# I'm happy with the following scenario
# if r._ns is specified use it, if not check for
# a path, if it's specified convert it and use it as the
# namespace. If both are specified, use r._ns.
ns = r._ns
if len(self.path) > 1 and not ns:
ns = self.path.replace("/", ":")
if ns[0] == ":": ns = ns[1:]
# authorization method
a = None
keylist = ordered_args.keys()
keylist.sort()
# create list in proper order w/o names
tmp = map( lambda x: ordered_args[x], keylist)
ordered_args = tmp
#print '<-> Argument Matching Yielded:'
#print '<-> Ordered Arguments:' + str(ordered_args)
#print '<-> Named Arguments :' + str(named_args)
resp = ""
# For fault messages
if ns:
nsmethod = "%s:%s" % (ns, method)
else:
nsmethod = method
try:
# First look for registered functions
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(method):
f = self.server.funcmap[ns][method]
# look for the authorization method
if self.server.config.authMethod != None:
authmethod = self.server.config.authMethod
if self.server.funcmap.has_key(ns) and \
self.server.funcmap[ns].has_key(authmethod):
a = self.server.funcmap[ns][authmethod]
else:
# Now look at registered objects
# Check for nested attributes. This works even if
# there are none, because the split will return
# [method]
f = self.server.objmap[ns]
# Look for the authorization method
if self.server.config.authMethod != None:
authmethod = self.server.config.authMethod
if hasattr(f, authmethod):
a = getattr(f, authmethod)
# then continue looking for the method
l = method.split(".")
for i in l:
f = getattr(f, i)
except:
info = sys.exc_info()
try:
resp = buildSOAP(faultType("%s:Client" % NS.ENV_T,
"Method Not Found",
"%s : %s %s %s" % (nsmethod,
info[0],
info[1],
info[2])),
encoding = self.server.encoding,
config = self.server.config)
finally:
del info
status = 500
else:
try:
if header:
x = HeaderHandler(header, attrs)
fr = 1
# call context book keeping
# We're stuffing the method into the soapaction if there
# isn't one, someday, we'll set that on the client
# and it won't be necessary here
# for now we're doing both
if "SOAPAction".lower() not in self.headers.keys() or \
self.headers["SOAPAction"] == "\"\"":
self.headers["SOAPAction"] = method
thread_id = thread.get_ident()
_contexts[thread_id] = SOAPContext(header, body,
attrs, data,
self.connection,
self.headers,
self.headers["SOAPAction"])
# Do an authorization check
if a != None:
if not apply(a, (), {"_SOAPContext" :
_contexts[thread_id] }):
raise faultType("%s:Server" % NS.ENV_T,
"Authorization failed.",
"%s" % nsmethod)
# If it's wrapped, some special action may be needed
if isinstance(f, MethodSig):
c = None
if f.context: # retrieve context object
c = _contexts[thread_id]
if Config.specialArgs:
if c:
named_args["_SOAPContext"] = c
fr = apply(f, ordered_args, named_args)
elif f.keywords:
# This is lame, but have to de-unicode
# keywords
strkw = {}
for (k, v) in kw.items():
strkw[str(k)] = v
if c:
strkw["_SOAPContext"] = c
fr = apply(f, (), strkw)
elif c:
fr = apply(f, args, {'_SOAPContext':c})
else:
fr = apply(f, args, {})
else:
if Config.specialArgs:
fr = apply(f, ordered_args, named_args)
else:
fr = apply(f, args, {})
if type(fr) == type(self) and \
isinstance(fr, voidType):
resp = buildSOAP(kw = {'%sResponse' % method: fr},
encoding = self.server.encoding,
config = self.server.config)
else:
resp = buildSOAP(kw =
{'%sResponse' % method: {'Result': fr}},
encoding = self.server.encoding,
config = self.server.config)
# Clean up _contexts
if _contexts.has_key(thread_id):
del _contexts[thread_id]
except Exception, e:
import traceback
info = sys.exc_info()
try:
if self.server.config.dumpFaultInfo:
s = 'Method %s exception' % nsmethod
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if isinstance(e, faultType):
f = e
else:
f = faultType("%s:Server" % NS.ENV_T,
"Method Failed",
"%s" % nsmethod)
if self.server.config.returnFaultInfo:
f._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(f, 'detail'):
f._setDetail("%s %s" % (info[0], info[1]))
finally:
del info
resp = buildSOAP(f, encoding = self.server.encoding,
config = self.server.config)
status = 500
else:
status = 200
except faultType, e:
import traceback
info = sys.exc_info()
try:
if self.server.config.dumpFaultInfo:
s = 'Received fault exception'
debugHeader(s)
traceback.print_exception(info[0], info[1],
info[2])
debugFooter(s)
if self.server.config.returnFaultInfo:
e._setDetail("".join(traceback.format_exception(
info[0], info[1], info[2])))
elif not hasattr(e, 'detail'):
e._setDetail("%s %s" % (info[0], info[1]))
finally:
del info
resp = buildSOAP(e, encoding = self.server.encoding,
config = self.server.config)
status = 500
except Exception, e:
# internal error, report as HTTP server error
if self.server.config.dumpFaultInfo:
s = 'Internal exception %s' % e
import traceback
debugHeader(s)
info = sys.exc_info()
try:
traceback.print_exception(info[0], info[1], info[2])
finally:
del info
debugFooter(s)
self.send_response(500)
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, 500, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
debugFooter(s)
else:
# got a valid SOAP response
self.send_response(status)
t = 'text/xml';
if self.server.encoding != None:
t += '; charset=%s' % self.server.encoding
self.send_header("Content-type", t)
self.send_header("Content-length", str(len(resp)))
self.end_headers()
if self.server.config.dumpHeadersOut and \
self.request_version != 'HTTP/0.9':
s = 'Outgoing HTTP headers'
debugHeader(s)
if self.responses.has_key(status):
s = ' ' + self.responses[status][0]
else:
s = ''
print "%s %d%s" % (self.protocol_version, status, s)
print "Server:", self.version_string()
print "Date:", self.__last_date_time_string
print "Content-type:", t
print "Content-length:", len(resp)
debugFooter(s)
if self.server.config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print resp,
if resp[-1] != '\n':
print
debugFooter(s)
self.wfile.write(resp)
self.wfile.flush()
# We should be able to shut down both a regular and an SSL
# connection, but under Python 2.1, calling shutdown on an
# SSL connections drops the output, so this work-around.
# This should be investigated more someday.
if self.server.config.SSLserver and \
isinstance(self.connection, SSL.Connection):
self.connection.set_shutdown(SSL.SSL_SENT_SHUTDOWN |
SSL.SSL_RECEIVED_SHUTDOWN)
else:
self.connection.shutdown(1)
def do_GET(self):
#print 'command ', self.command
#print 'path ', self.path
#print 'request_version', self.request_version
#print 'headers'
#print ' type ', self.headers.type
#print ' maintype', self.headers.maintype
#print ' subtype ', self.headers.subtype
#print ' params ', self.headers.plist
path = self.path.lower()
if path.endswith('wsdl'):
method = 'wsdl'
function = namespace = None
if self.server.funcmap.has_key(namespace) \
and self.server.funcmap[namespace].has_key(method):
function = self.server.funcmap[namespace][method]
else:
if namespace in self.server.objmap.keys():
function = self.server.objmap[namespace]
l = method.split(".")
for i in l:
function = getattr(function, i)
if function:
self.send_response(200)
self.send_header("Content-type", 'text/plain')
self.end_headers()
response = apply(function, ())
self.wfile.write(str(response))
return
# return error
self.send_response(200)
self.send_header("Content-type", 'text/html')
self.end_headers()
self.wfile.write('''\
<title>
<head>Error!</head>
</title>
<body>
<h1>Oops!</h1>
<p>
This server supports HTTP GET requests only for the the purpose of
obtaining Web Services Description Language (WSDL) for a specific
service.
Either you requested an URL that does not end in "wsdl" or this
server does not implement a wsdl method.
</p>
</body>''')
def log_message(self, format, *args):
if self.server.log:
BaseHTTPServer.BaseHTTPRequestHandler.\
log_message (self, format, *args)
class SOAPServer(SOAPServerBase, SocketServer.TCPServer):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.TCPServer.__init__(self, addr, RequestHandler)
class ThreadingSOAPServer(SOAPServerBase, SocketServer.ThreadingTCPServer):
def __init__(self, addr = ('localhost', 8000),
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.ThreadingTCPServer.__init__(self, addr, RequestHandler)
# only define class if Unix domain sockets are available
if hasattr(socket, "AF_UNIX"):
class SOAPUnixSocketServer(SOAPServerBase, SocketServer.UnixStreamServer):
def __init__(self, addr = 8000,
RequestHandler = SOAPRequestHandler, log = 0, encoding = 'UTF-8',
config = Config, namespace = None, ssl_context = None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
if ssl_context != None and not config.SSLserver:
raise AttributeError, \
"SSL server not supported by this Python installation"
self.namespace = namespace
self.objmap = {}
self.funcmap = {}
self.ssl_context = ssl_context
self.encoding = encoding
self.config = config
self.log = log
self.allow_reuse_address= 1
SocketServer.UnixStreamServer.__init__(self, str(addr), RequestHandler)
|
gpl-3.0
|
israeltobias/DownMedia
|
youtube-dl/youtube_dl/extractor/brightcove.py
|
2
|
27942
|
# coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_parse_qs,
compat_str,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_xml_parse_error,
compat_HTTPError,
)
from ..utils import (
determine_ext,
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
float_or_none,
js_to_json,
int_or_none,
parse_iso8601,
unescapeHTML,
unsmuggle_url,
update_url_query,
clean_html,
mimetype2ext,
)
class BrightcoveLegacyIE(InfoExtractor):
IE_NAME = 'brightcove:legacy'
_VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)'
_FEDERATED_URL = 'http://c.brightcove.com/services/viewer/htmlFederated'
_TESTS = [
{
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
'md5': '5423e113865d26e40624dce2e4b45d95',
'note': 'Test Brightcove downloads and detection in GenericIE',
'info_dict': {
'id': '2371591881001',
'ext': 'mp4',
'title': 'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
'uploader': '8TV',
'description': 'md5:a950cc4285c43e44d763d036710cd9cd',
'timestamp': 1368213670,
'upload_date': '20130510',
'uploader_id': '1589608506001',
}
},
{
# From http://medianetwork.oracle.com/video/player/1785452137001
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1217746023001&flashID=myPlayer&%40videoPlayer=1785452137001',
'info_dict': {
'id': '1785452137001',
'ext': 'flv',
'title': 'JVMLS 2012: Arrays 2.0 - Opportunities and Challenges',
'description': 'John Rose speaks at the JVM Language Summit, August 1, 2012.',
'uploader': 'Oracle',
'timestamp': 1344975024,
'upload_date': '20120814',
'uploader_id': '1460825906',
},
},
{
# From http://mashable.com/2013/10/26/thermoelectric-bracelet-lets-you-control-your-body-temperature/
'url': 'http://c.brightcove.com/services/viewer/federated_f9?&playerID=1265504713001&publisherID=AQ%7E%7E%2CAAABBzUwv1E%7E%2CxP-xFHVUstiMFlNYfvF4G9yFnNaqCw_9&videoID=2750934548001',
'info_dict': {
'id': '2750934548001',
'ext': 'mp4',
'title': 'This Bracelet Acts as a Personal Thermostat',
'description': 'md5:547b78c64f4112766ccf4e151c20b6a0',
'uploader': 'Mashable',
'timestamp': 1382041798,
'upload_date': '20131017',
'uploader_id': '1130468786001',
},
},
{
# test that the default referer works
# from http://national.ballet.ca/interact/video/Lost_in_Motion_II/
'url': 'http://link.brightcove.com/services/player/bcpid756015033001?bckey=AQ~~,AAAApYJi_Ck~,GxhXCegT1Dp39ilhXuxMJxasUhVNZiil&bctid=2878862109001',
'info_dict': {
'id': '2878862109001',
'ext': 'mp4',
'title': 'Lost in Motion II',
'description': 'md5:363109c02998fee92ec02211bd8000df',
'uploader': 'National Ballet of Canada',
},
'skip': 'Video gone',
},
{
# test flv videos served by akamaihd.net
# From http://www.redbull.com/en/bike/stories/1331655643987/replay-uci-dh-world-cup-2014-from-fort-william
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?%40videoPlayer=ref%3Aevent-stream-356&linkBaseURL=http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fvideos%2F1331655630249%2Freplay-uci-fort-william-2014-dh&playerKey=AQ%7E%7E%2CAAAApYJ7UqE%7E%2Cxqr_zXk0I-zzNndy8NlHogrCb5QdyZRf&playerID=1398061561001#__youtubedl_smuggle=%7B%22Referer%22%3A+%22http%3A%2F%2Fwww.redbull.com%2Fen%2Fbike%2Fstories%2F1331655643987%2Freplay-uci-dh-world-cup-2014-from-fort-william%22%7D',
# The md5 checksum changes on each download
'info_dict': {
'id': '3750436379001',
'ext': 'flv',
'title': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
'uploader': 'RBTV Old (do not use)',
'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
'timestamp': 1409122195,
'upload_date': '20140827',
'uploader_id': '710858724001',
},
},
{
# playlist with 'videoList'
# from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL',
'info_dict': {
'title': 'Sealife',
'id': '3550319591001',
},
'playlist_mincount': 7,
},
{
# playlist with 'playlistTab' (https://github.com/rg3/youtube-dl/issues/9965)
'url': 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=AQ%7E%7E,AAABXlLMdok%7E,NJ4EoMlZ4rZdx9eU1rkMVd8EaYPBBUlg',
'info_dict': {
'id': '1522758701001',
'title': 'Lesson 08',
},
'playlist_mincount': 10,
},
]
FLV_VCODECS = {
1: 'SORENSON',
2: 'ON2',
3: 'H264',
4: 'VP8',
}
@classmethod
def _build_brighcove_url(cls, object_str):
"""
Build a Brightcove url from a xml string containing
<object class="BrightcoveExperience">{params}</object>
"""
# Fix up some stupid HTML, see https://github.com/rg3/youtube-dl/issues/1553
object_str = re.sub(r'(<param(?:\s+[a-zA-Z0-9_]+="[^"]*")*)>',
lambda m: m.group(1) + '/>', object_str)
# Fix up some stupid XML, see https://github.com/rg3/youtube-dl/issues/1608
object_str = object_str.replace('<--', '<!--')
# remove namespace to simplify extraction
object_str = re.sub(r'(<object[^>]*)(xmlns=".*?")', r'\1', object_str)
object_str = fix_xml_ampersands(object_str)
try:
object_doc = compat_etree_fromstring(object_str.encode('utf-8'))
except compat_xml_parse_error:
return
fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars')
if fv_el is not None:
flashvars = dict(
(k, v[0])
for k, v in compat_parse_qs(fv_el.attrib['value']).items())
else:
flashvars = {}
data_url = object_doc.attrib.get('data', '')
data_url_params = compat_parse_qs(compat_urllib_parse_urlparse(data_url).query)
def find_param(name):
if name in flashvars:
return flashvars[name]
node = find_xpath_attr(object_doc, './param', 'name', name)
if node is not None:
return node.attrib['value']
return data_url_params.get(name)
params = {}
playerID = find_param('playerID') or find_param('playerId')
if playerID is None:
raise ExtractorError('Cannot find player ID')
params['playerID'] = playerID
playerKey = find_param('playerKey')
# Not all pages define this value
if playerKey is not None:
params['playerKey'] = playerKey
# These fields hold the id of the video
videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID') or find_param('@videoList')
if videoPlayer is not None:
params['@videoPlayer'] = videoPlayer
linkBase = find_param('linkBaseURL')
if linkBase is not None:
params['linkBaseURL'] = linkBase
return cls._make_brightcove_url(params)
@classmethod
def _build_brighcove_url_from_js(cls, object_js):
# The layout of JS is as follows:
# customBC.createVideo = function (width, height, playerID, playerKey, videoPlayer, VideoRandomID) {
# // build Brightcove <object /> XML
# }
m = re.search(
r'''(?x)customBC\.createVideo\(
.*? # skipping width and height
["\'](?P<playerID>\d+)["\']\s*,\s* # playerID
["\'](?P<playerKey>AQ[^"\']{48})[^"\']*["\']\s*,\s* # playerKey begins with AQ and is 50 characters
# in length, however it's appended to itself
# in places, so truncate
["\'](?P<videoID>\d+)["\'] # @videoPlayer
''', object_js)
if m:
return cls._make_brightcove_url(m.groupdict())
@classmethod
def _make_brightcove_url(cls, params):
return update_url_query(cls._FEDERATED_URL, params)
@classmethod
def _extract_brightcove_url(cls, webpage):
"""Try to extract the brightcove url from the webpage, returns None
if it can't be found
"""
urls = cls._extract_brightcove_urls(webpage)
return urls[0] if urls else None
@classmethod
def _extract_brightcove_urls(cls, webpage):
"""Return a list of all Brightcove URLs from the webpage """
url_m = re.search(
r'''(?x)
<meta\s+
(?:property|itemprop)=([\'"])(?:og:video|embedURL)\1[^>]+
content=([\'"])(?P<url>https?://(?:secure|c)\.brightcove.com/(?:(?!\2).)+)\2
''', webpage)
if url_m:
url = unescapeHTML(url_m.group('url'))
# Some sites don't add it, we can't download with this url, for example:
# http://www.ktvu.com/videos/news/raw-video-caltrain-releases-video-of-man-almost/vCTZdY/
if 'playerKey' in url or 'videoId' in url or 'idVideo' in url:
return [url]
matches = re.findall(
r'''(?sx)<object
(?:
[^>]+?class=[\'"][^>]*?BrightcoveExperience.*?[\'"] |
[^>]*?>\s*<param\s+name="movie"\s+value="https?://[^/]*brightcove\.com/
).+?>\s*</object>''',
webpage)
if matches:
return list(filter(None, [cls._build_brighcove_url(m) for m in matches]))
return list(filter(None, [
cls._build_brighcove_url_from_js(custom_bc)
for custom_bc in re.findall(r'(customBC\.createVideo\(.+?\);)', webpage)]))
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
# Change the 'videoId' and others field to '@videoPlayer'
url = re.sub(r'(?<=[?&])(videoI(d|D)|idVideo|bctid)', '%40videoPlayer', url)
# Change bckey (used by bcove.me urls) to playerKey
url = re.sub(r'(?<=[?&])bckey', 'playerKey', url)
mobj = re.match(self._VALID_URL, url)
query_str = mobj.group('query')
query = compat_urlparse.parse_qs(query_str)
videoPlayer = query.get('@videoPlayer')
if videoPlayer:
# We set the original url as the default 'Referer' header
referer = smuggled_data.get('Referer', url)
return self._get_video_info(
videoPlayer[0], query, referer=referer)
elif 'playerKey' in query:
player_key = query['playerKey']
return self._get_playlist_info(player_key[0])
else:
raise ExtractorError(
'Cannot find playerKey= variable. Did you forget quotes in a shell invocation?',
expected=True)
def _get_video_info(self, video_id, query, referer=None):
headers = {}
linkBase = query.get('linkBaseURL')
if linkBase is not None:
referer = linkBase[0]
if referer is not None:
headers['Referer'] = referer
webpage = self._download_webpage(self._FEDERATED_URL, video_id, headers=headers, query=query)
error_msg = self._html_search_regex(
r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage,
'error message', default=None)
if error_msg is not None:
raise ExtractorError(
'brightcove said: %s' % error_msg, expected=True)
self.report_extraction(video_id)
info = self._search_regex(r'var experienceJSON = ({.*});', webpage, 'json')
info = json.loads(info)['data']
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
video_info['_youtubedl_adServerURL'] = info.get('adServerURL')
return self._extract_video_info(video_info)
def _get_playlist_info(self, player_key):
info_url = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s' % player_key
playlist_info = self._download_webpage(
info_url, player_key, 'Downloading playlist information')
json_data = json.loads(playlist_info)
if 'videoList' in json_data:
playlist_info = json_data['videoList']
playlist_dto = playlist_info['mediaCollectionDTO']
elif 'playlistTabs' in json_data:
playlist_info = json_data['playlistTabs']
playlist_dto = playlist_info['lineupListDTO']['playlistDTOs'][0]
else:
raise ExtractorError('Empty playlist')
videos = [self._extract_video_info(video_info) for video_info in playlist_dto['videoDTOs']]
return self.playlist_result(videos, playlist_id='%s' % playlist_info['id'],
playlist_title=playlist_dto['displayName'])
def _extract_video_info(self, video_info):
video_id = compat_str(video_info['id'])
publisher_id = video_info.get('publisherId')
info = {
'id': video_id,
'title': video_info['displayName'].strip(),
'description': video_info.get('shortDescription'),
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
'uploader': video_info.get('publisherName'),
'uploader_id': compat_str(publisher_id) if publisher_id else None,
'duration': float_or_none(video_info.get('length'), 1000),
'timestamp': int_or_none(video_info.get('creationDate'), 1000),
}
renditions = video_info.get('renditions', []) + video_info.get('IOSRenditions', [])
if renditions:
formats = []
for rend in renditions:
url = rend['defaultURL']
if not url:
continue
ext = None
if rend['remote']:
url_comp = compat_urllib_parse_urlparse(url)
if url_comp.path.endswith('.m3u8'):
formats.extend(
self._extract_m3u8_formats(
url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
continue
elif 'akamaihd.net' in url_comp.netloc:
# This type of renditions are served through
# akamaihd.net, but they don't use f4m manifests
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
ext = 'flv'
if ext is None:
ext = determine_ext(url)
tbr = int_or_none(rend.get('encodingRate'), 1000)
a_format = {
'format_id': 'http%s' % ('-%s' % tbr if tbr else ''),
'url': url,
'ext': ext,
'filesize': int_or_none(rend.get('size')) or None,
'tbr': tbr,
}
if rend.get('audioOnly'):
a_format.update({
'vcodec': 'none',
})
else:
a_format.update({
'height': int_or_none(rend.get('frameHeight')),
'width': int_or_none(rend.get('frameWidth')),
'vcodec': rend.get('videoCodec'),
})
# m3u8 manifests with remote == false are media playlists
# Not calling _extract_m3u8_formats here to save network traffic
if ext == 'm3u8':
a_format.update({
'format_id': 'hls%s' % ('-%s' % tbr if tbr else ''),
'ext': 'mp4',
'protocol': 'm3u8_native',
})
formats.append(a_format)
self._sort_formats(formats)
info['formats'] = formats
elif video_info.get('FLVFullLengthURL') is not None:
info.update({
'url': video_info['FLVFullLengthURL'],
'vcodec': self.FLV_VCODECS.get(video_info.get('FLVFullCodec')),
'filesize': int_or_none(video_info.get('FLVFullSize')),
})
if self._downloader.params.get('include_ads', False):
adServerURL = video_info.get('_youtubedl_adServerURL')
if adServerURL:
ad_info = {
'_type': 'url',
'url': adServerURL,
}
if 'url' in info:
return {
'_type': 'playlist',
'title': info['title'],
'entries': [ad_info, info],
}
else:
return ad_info
if 'url' not in info and not info.get('formats'):
raise ExtractorError('Unable to extract video url for %s' % video_id)
return info
class BrightcoveNewIE(InfoExtractor):
IE_NAME = 'brightcove:new'
_VALID_URL = r'https?://players\.brightcove\.net/(?P<account_id>\d+)/(?P<player_id>[^/]+)_(?P<embed>[^/]+)/index\.html\?.*videoId=(?P<video_id>\d+|ref:[^&]+)'
_TESTS = [{
'url': 'http://players.brightcove.net/929656772001/e41d32dc-ec74-459e-a845-6c69f7b724ea_default/index.html?videoId=4463358922001',
'md5': 'c8100925723840d4b0d243f7025703be',
'info_dict': {
'id': '4463358922001',
'ext': 'mp4',
'title': 'Meet the man behind Popcorn Time',
'description': 'md5:eac376a4fe366edc70279bfb681aea16',
'duration': 165.768,
'timestamp': 1441391203,
'upload_date': '20150904',
'uploader_id': '929656772001',
'formats': 'mincount:22',
},
}, {
# with rtmp streams
'url': 'http://players.brightcove.net/4036320279001/5d112ed9-283f-485f-a7f9-33f42e8bc042_default/index.html?videoId=4279049078001',
'info_dict': {
'id': '4279049078001',
'ext': 'mp4',
'title': 'Titansgrave: Chapter 0',
'description': 'Titansgrave: Chapter 0',
'duration': 1242.058,
'timestamp': 1433556729,
'upload_date': '20150606',
'uploader_id': '4036320279001',
'formats': 'mincount:41',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
# ref: prefixed video id
'url': 'http://players.brightcove.net/3910869709001/21519b5c-4b3b-4363-accb-bdc8f358f823_default/index.html?videoId=ref:7069442',
'only_matching': True,
}, {
# non numeric ref: prefixed video id
'url': 'http://players.brightcove.net/710858724001/default_default/index.html?videoId=ref:event-stream-356',
'only_matching': True,
}, {
# unavailable video without message but with error_code
'url': 'http://players.brightcove.net/1305187701/c832abfb-641b-44eb-9da0-2fe76786505f_default/index.html?videoId=4377407326001',
'only_matching': True,
}]
@staticmethod
def _extract_url(webpage):
urls = BrightcoveNewIE._extract_urls(webpage)
return urls[0] if urls else None
@staticmethod
def _extract_urls(webpage):
# Reference:
# 1. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideoiniframe
# 2. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideousingjavascript
# 3. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/embed-in-page.html
# 4. https://support.brightcove.com/en/video-cloud/docs/dynamically-assigning-videos-player
entries = []
# Look for iframe embeds [1]
for _, url in re.findall(
r'<iframe[^>]+src=(["\'])((?:https?:)?//players\.brightcove\.net/\d+/[^/]+/index\.html.+?)\1', webpage):
entries.append(url if url.startswith('http') else 'http:' + url)
# Look for embed_in_page embeds [2]
for video_id, account_id, player_id, embed in re.findall(
# According to examples from [3] it's unclear whether video id
# may be optional and what to do when it is
# According to [4] data-video-id may be prefixed with ref:
r'''(?sx)
<video[^>]+
data-video-id=["\'](\d+|ref:[^"\']+)["\'][^>]*>.*?
</video>.*?
<script[^>]+
src=["\'](?:https?:)?//players\.brightcove\.net/
(\d+)/([^/]+)_([^/]+)/index(?:\.min)?\.js
''', webpage):
entries.append(
'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s'
% (account_id, player_id, embed, video_id))
return entries
def _real_extract(self, url):
account_id, player_id, embed, video_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(
'http://players.brightcove.net/%s/%s_%s/index.min.js'
% (account_id, player_id, embed), video_id)
policy_key = None
catalog = self._search_regex(
r'catalog\(({.+?})\);', webpage, 'catalog', default=None)
if catalog:
catalog = self._parse_json(
js_to_json(catalog), video_id, fatal=False)
if catalog:
policy_key = catalog.get('policyKey')
if not policy_key:
policy_key = self._search_regex(
r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1',
webpage, 'policy key', group='pk')
api_url = 'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s' % (account_id, video_id)
try:
json_data = self._download_json(api_url, video_id, headers={
'Accept': 'application/json;pk=%s' % policy_key
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
json_data = self._parse_json(e.cause.read().decode(), video_id)[0]
raise ExtractorError(
json_data.get('message') or json_data['error_code'], expected=True)
raise
title = json_data['name'].strip()
formats = []
for source in json_data.get('sources', []):
container = source.get('container')
ext = mimetype2ext(source.get('type'))
src = source.get('src')
if ext == 'ism' or container == 'WVM':
continue
elif ext == 'm3u8' or container == 'M2TS':
if not src:
continue
formats.extend(self._extract_m3u8_formats(
src, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
elif ext == 'mpd':
if not src:
continue
formats.extend(self._extract_mpd_formats(src, video_id, 'dash', fatal=False))
else:
streaming_src = source.get('streaming_src')
stream_name, app_name = source.get('stream_name'), source.get('app_name')
if not src and not streaming_src and (not stream_name or not app_name):
continue
tbr = float_or_none(source.get('avg_bitrate'), 1000)
height = int_or_none(source.get('height'))
width = int_or_none(source.get('width'))
f = {
'tbr': tbr,
'filesize': int_or_none(source.get('size')),
'container': container,
'ext': ext or container.lower(),
}
if width == 0 and height == 0:
f.update({
'vcodec': 'none',
})
else:
f.update({
'width': width,
'height': height,
'vcodec': source.get('codec'),
})
def build_format_id(kind):
format_id = kind
if tbr:
format_id += '-%dk' % int(tbr)
if height:
format_id += '-%dp' % height
return format_id
if src or streaming_src:
f.update({
'url': src or streaming_src,
'format_id': build_format_id('http' if src else 'http-streaming'),
'source_preference': 0 if src else -1,
})
else:
f.update({
'url': app_name,
'play_path': stream_name,
'format_id': build_format_id('rtmp'),
})
formats.append(f)
errors = json_data.get('errors')
if not formats and errors:
error = errors[0]
raise ExtractorError(
error.get('message') or error.get('error_subcode') or error['error_code'], expected=True)
self._sort_formats(formats)
subtitles = {}
for text_track in json_data.get('text_tracks', []):
if text_track.get('src'):
subtitles.setdefault(text_track.get('srclang'), []).append({
'url': text_track['src'],
})
is_live = False
duration = float_or_none(json_data.get('duration'), 1000)
if duration and duration < 0:
is_live = True
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'description': clean_html(json_data.get('description')),
'thumbnail': json_data.get('thumbnail') or json_data.get('poster'),
'duration': duration,
'timestamp': parse_iso8601(json_data.get('published_at')),
'uploader_id': account_id,
'formats': formats,
'subtitles': subtitles,
'tags': json_data.get('tags', []),
'is_live': is_live,
}
|
gpl-3.0
|
rkokkelk/Gulliver
|
deluge/core/core.py
|
1
|
36618
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Andrew Resch <andrewresch@gmail.com>
# Copyright (C) 2011 Pedro Algarvio <pedro@algarvio.me>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
import base64
import glob
import logging
import os
import shutil
import tempfile
import threading
from urlparse import urljoin
import twisted.web.client
import twisted.web.error
import deluge.common
import deluge.component as component
from deluge import path_chooser_common
from deluge._libtorrent import lt
from deluge.configmanager import ConfigManager, get_config_dir
from deluge.core.alertmanager import AlertManager
from deluge.core.authmanager import (AUTH_LEVEL_ADMIN, AUTH_LEVEL_NONE, AUTH_LEVELS_MAPPING,
AUTH_LEVELS_MAPPING_REVERSE, AuthManager)
from deluge.core.eventmanager import EventManager
from deluge.core.filtermanager import FilterManager
from deluge.core.pluginmanager import PluginManager
from deluge.core.preferencesmanager import PreferencesManager
from deluge.core.rpcserver import export
from deluge.core.torrentmanager import TorrentManager
from deluge.core.scanner import Scanner
from deluge.error import DelugeError, InvalidPathError, InvalidTorrentError
from deluge.event import NewVersionAvailableEvent, SessionPausedEvent, SessionResumedEvent, TorrentQueueChangedEvent
from deluge.httpdownloader import download_file
log = logging.getLogger(__name__)
class Core(component.Component):
def __init__(self, listen_interface=None):
log.debug("Core init...")
component.Component.__init__(self, "Core")
# Create the client fingerprint
client_id = "DE"
client_version = deluge.common.VersionSplit(deluge.common.get_version()).version
while len(client_version) < 4:
client_version.append(0)
# Start the libtorrent session
log.info("Starting libtorrent %s (%s, %s) session...", lt.version, client_id, client_version)
self.session = lt.session(lt.fingerprint(client_id, *client_version), flags=0)
# Load the session state if available
self.__load_session_state()
# --- Set session settings ---
settings = self.session.get_settings()
settings["user_agent"] = "Deluge/%(deluge_version)s libtorrent/%(lt_version)s" % {
'deluge_version': deluge.common.get_version(),
'lt_version': self.get_libtorrent_version().rpartition(".")[0]
}
# No SSL torrent support in code so disable the listen port.
settings["ssl_listen"] = 0
# On Windows OS set the disk I/O read/write to bypass OS cache
if deluge.common.windows_check():
settings["disk_io_write_mode"] = lt.io_buffer_mode_t.disable_os_cache
settings["disk_io_read_mode"] = lt.io_buffer_mode_t.disable_os_cache
self.session.set_settings(settings)
# --- libtorrent plugins ---
# Allows peers to download the metadata from the swarm directly
self.session.add_extension("metadata_transfer")
self.session.add_extension("ut_metadata")
# Ban peers that sends bad data
self.session.add_extension("smart_ban")
# Create the components
self.eventmanager = EventManager()
self.preferencesmanager = PreferencesManager()
self.alertmanager = AlertManager()
self.pluginmanager = PluginManager(self)
self.torrentmanager = TorrentManager()
self.filtermanager = FilterManager(self)
self.authmanager = AuthManager()
# New release check information
self.new_release = None
# Get the core config
self.torrent_config = self.session.get_settings()
self.config = ConfigManager("core.conf")
self.config.save()
# Config needs to be ready for scanner
self.scanner = Scanner()
# If there was an interface value from the command line, use it, but
# store the one in the config so we can restore it on shutdown
self.__old_interface = None
if listen_interface:
if deluge.common.is_ip(listen_interface):
self.__old_interface = self.config["listen_interface"]
self.config["listen_interface"] = listen_interface
else:
log.error("Invalid listen interface (must be IP Address): %s", listen_interface)
def start(self):
"""Starts the core"""
# New release check information
self.__new_release = None
def stop(self):
log.debug("Core stopping...")
# Save the libtorrent session state
self.__save_session_state()
# We stored a copy of the old interface value
if self.__old_interface:
self.config["listen_interface"] = self.__old_interface
# Make sure the config file has been saved
self.config.save()
def shutdown(self):
pass
def __save_session_state(self):
"""Saves the libtorrent session state"""
filename = "session.state"
filepath = get_config_dir(filename)
filepath_bak = filepath + ".bak"
filepath_tmp = filepath + ".tmp"
try:
if os.path.isfile(filepath):
log.debug("Creating backup of %s at: %s", filename, filepath_bak)
shutil.copy2(filepath, filepath_bak)
except IOError as ex:
log.error("Unable to backup %s to %s: %s", filepath, filepath_bak, ex)
else:
log.info("Saving the %s at: %s", filename, filepath)
try:
with open(filepath_tmp, "wb") as _file:
_file.write(lt.bencode(self.session.save_state()))
_file.flush()
os.fsync(_file.fileno())
shutil.move(filepath_tmp, filepath)
except (IOError, EOFError) as ex:
log.error("Unable to save %s: %s", filename, ex)
if os.path.isfile(filepath_bak):
log.info("Restoring backup of %s from: %s", filename, filepath_bak)
shutil.move(filepath_bak, filepath)
def __load_session_state(self):
"""Loads the libtorrent session state"""
filename = "session.state"
filepath = get_config_dir(filename)
filepath_bak = filepath + ".bak"
for _filepath in (filepath, filepath_bak):
log.info("Opening %s for load: %s", filename, _filepath)
try:
with open(_filepath, "rb") as _file:
state = lt.bdecode(_file.read())
except (IOError, EOFError, RuntimeError) as ex:
log.warning("Unable to load %s: %s", _filepath, ex)
else:
log.info("Successfully loaded %s: %s", filename, _filepath)
self.session.load_state(state)
return
def get_new_release(self):
log.debug("get_new_release")
from urllib2 import urlopen, URLError
try:
self.new_release = urlopen("http://download.deluge-torrent.org/version-1.0").read().strip()
except URLError as ex:
log.debug("Unable to get release info from website: %s", ex)
return
self.check_new_release()
def check_new_release(self):
if self.new_release:
log.debug("new_release: %s", self.new_release)
if deluge.common.VersionSplit(self.new_release) > deluge.common.VersionSplit(deluge.common.get_version()):
component.get("EventManager").emit(NewVersionAvailableEvent(self.new_release))
return self.new_release
return False
# Exported Methods
@export
def start_scan(self, scan_dir):
return self.scanner.scan(scan_dir, False)
@export
def add_torrent_file(self, filename, filedump, options):
"""Adds a torrent file to the session.
Args:
filename (str): the filename of the torrent
filedump (str): A base64 encoded string of the torrent file contents
options (dict): The options to apply to the torrent on add
Returns:
str: The torrent_id or None
"""
try:
filedump = base64.decodestring(filedump)
except Exception as ex:
log.error("There was an error decoding the filedump string!")
log.exception(ex)
try:
torrent_id = self.torrentmanager.add(
filedump=filedump, options=options, filename=filename
)
except Exception as ex:
log.error("There was an error adding the torrent file %s", filename)
log.exception(ex)
torrent_id = None
return torrent_id
@export
def add_torrent_url(self, url, options, headers=None):
"""
Adds a torrent from a url. Deluge will attempt to fetch the torrent
from url prior to adding it to the session.
:param url: the url pointing to the torrent file
:type url: string
:param options: the options to apply to the torrent on add
:type options: dict
:param headers: any optional headers to send
:type headers: dict
:returns: a Deferred which returns the torrent_id as a str or None
"""
log.info("Attempting to add url %s", url)
def on_download_success(filename):
# We got the file, so add it to the session
f = open(filename, "rb")
data = f.read()
f.close()
try:
os.remove(filename)
except OSError as ex:
log.warning("Couldn't remove temp file: %s", ex)
return self.add_torrent_file(filename, base64.encodestring(data), options)
def on_download_fail(failure):
if failure.check(twisted.web.error.PageRedirect):
new_url = urljoin(url, failure.getErrorMessage().split(" to ")[1])
result = download_file(
new_url, tempfile.mkstemp()[1], headers=headers,
force_filename=True
)
result.addCallbacks(on_download_success, on_download_fail)
elif failure.check(twisted.web.client.PartialDownloadError):
result = download_file(
url, tempfile.mkstemp()[1], headers=headers,
force_filename=True, allow_compression=False
)
result.addCallbacks(on_download_success, on_download_fail)
else:
# Log the error and pass the failure onto the client
log.error("Error occurred downloading torrent from %s", url)
log.error("Reason: %s", failure.getErrorMessage())
result = failure
return result
d = download_file(
url, tempfile.mkstemp()[1], headers=headers, force_filename=True
)
d.addCallbacks(on_download_success, on_download_fail)
return d
@export
def add_torrent_magnet(self, uri, options):
"""
Adds a torrent from a magnet link.
:param uri: the magnet link
:type uri: string
:param options: the options to apply to the torrent on add
:type options: dict
:returns: the torrent_id
:rtype: string
"""
log.debug("Attempting to add by magnet uri: %s", uri)
return self.torrentmanager.add(magnet=uri, options=options)
@export
def add_torrent_seed(self, filename, filedump, seedname, options):
"""Adds a torrent file to the session.
Args:
filename (str): the filename of the torrent
filedump (str): A base64 encoded string of the torrent file contents
seedname (str): the filename of the seed
options (dict): The options to apply to the torrent on add
Returns:
str: The torrent_id or None
"""
options["download_location"] = seedname
try:
filedump = base64.decodestring(filedump)
except Exception as ex:
log.error("There was an error decoding the filedump string!")
log.exception(ex)
try:
torrent_id = self.torrentmanager.add(
filedump=filedump, options=options, filename=filename
)
except Exception as ex:
log.error("There was an error adding the torrent file %s", filename)
log.exception(ex)
torrent_id = None
return torrent_id
@export
def remove_torrent(self, torrent_id, remove_data):
"""
Removes a torrent from the session.
:param torrent_id: the torrent_id of the torrent to remove
:type torrent_id: string
:param remove_data: if True, remove the data associated with this torrent
:type remove_data: boolean
:returns: True if removed successfully
:rtype: bool
:raises InvalidTorrentError: if the torrent_id does not exist in the session
"""
log.debug("Removing torrent %s from the core.", torrent_id)
return self.torrentmanager.remove(torrent_id, remove_data)
@export
def get_session_status(self, keys):
"""
Gets the session status values for 'keys', these keys are taking
from libtorrent's session status.
See: http://www.rasterbar.com/products/libtorrent/manual.html#status
:param keys: the keys for which we want values
:type keys: list
:returns: a dictionary of {key: value, ...}
:rtype: dict
"""
status = {}
session_status = self.session.status()
for key in keys:
status[key] = getattr(session_status, key)
return status
@export
def get_cache_status(self):
"""
Returns a dictionary of the session's cache status.
:returns: the cache status
:rtype: dict
"""
status = self.session.get_cache_status()
cache = {}
for attr in dir(status):
if attr.startswith("_"):
continue
cache[attr] = getattr(status, attr)
# Add in a couple ratios
try:
cache["write_hit_ratio"] = float((cache["blocks_written"] -
cache["writes"])) / float(cache["blocks_written"])
except ZeroDivisionError:
cache["write_hit_ratio"] = 0.0
try:
cache["read_hit_ratio"] = float(cache["blocks_read_hit"]) / float(cache["blocks_read"])
except ZeroDivisionError:
cache["read_hit_ratio"] = 0.0
return cache
@export
def force_reannounce(self, torrent_ids):
log.debug("Forcing reannouncment to: %s", torrent_ids)
for torrent_id in torrent_ids:
self.torrentmanager[torrent_id].force_reannounce()
@export
def pause_torrent(self, torrent_ids):
log.debug("Pausing: %s", torrent_ids)
for torrent_id in torrent_ids:
if not self.torrentmanager[torrent_id].pause():
log.warning("Error pausing torrent %s", torrent_id)
@export
def connect_peer(self, torrent_id, ip, port):
log.debug("adding peer %s to %s", ip, torrent_id)
if not self.torrentmanager[torrent_id].connect_peer(ip, port):
log.warning("Error adding peer %s:%s to %s", ip, port, torrent_id)
@export
def move_storage(self, torrent_ids, dest):
log.debug("Moving storage %s to %s", torrent_ids, dest)
for torrent_id in torrent_ids:
if not self.torrentmanager[torrent_id].move_storage(dest):
log.warning("Error moving torrent %s to %s", torrent_id, dest)
@export
def pause_session(self):
"""Pause all torrents in the session"""
if not self.session.is_paused():
self.session.pause()
component.get("EventManager").emit(SessionPausedEvent())
@export
def resume_session(self):
"""Resume all torrents in the session"""
if self.session.is_paused():
self.session.resume()
component.get("EventManager").emit(SessionResumedEvent())
@export
def resume_torrent(self, torrent_ids):
log.debug("Resuming: %s", torrent_ids)
for torrent_id in torrent_ids:
self.torrentmanager[torrent_id].resume()
def create_torrent_status(self, torrent_id, torrent_keys, plugin_keys, diff=False, update=False, all_keys=False):
try:
status = self.torrentmanager[torrent_id].get_status(torrent_keys, diff, update=update, all_keys=all_keys)
except KeyError:
import traceback
traceback.print_exc()
# Torrent was probaly removed meanwhile
return {}
# Ask the plugin manager to fill in the plugin keys
if len(plugin_keys) > 0:
status.update(self.pluginmanager.get_status(torrent_id, plugin_keys))
return status
@export
def get_torrent_status(self, torrent_id, keys, diff=False):
torrent_keys, plugin_keys = self.torrentmanager.separate_keys(keys, [torrent_id])
return self.create_torrent_status(torrent_id, torrent_keys, plugin_keys, diff=diff, update=True,
all_keys=not keys)
@export
def get_torrents_status(self, filter_dict, keys, diff=False):
"""
returns all torrents , optionally filtered by filter_dict.
"""
torrent_ids = self.filtermanager.filter_torrent_ids(filter_dict)
d = self.torrentmanager.torrents_status_update(torrent_ids, keys, diff=diff)
def add_plugin_fields(args):
status_dict, plugin_keys = args
# Ask the plugin manager to fill in the plugin keys
if len(plugin_keys) > 0:
for key in status_dict.keys():
status_dict[key].update(self.pluginmanager.get_status(key, plugin_keys))
return status_dict
d.addCallback(add_plugin_fields)
return d
@export
def get_filter_tree(self, show_zero_hits=True, hide_cat=None):
"""
returns {field: [(value,count)] }
for use in sidebar(s)
"""
return self.filtermanager.get_filter_tree(show_zero_hits, hide_cat)
@export
def get_session_state(self):
"""Returns a list of torrent_ids in the session."""
# Get the torrent list from the TorrentManager
return self.torrentmanager.get_torrent_list()
@export
def get_config(self):
"""Get all the preferences as a dictionary"""
return self.config.config
@export
def get_config_value(self, key):
"""Get the config value for key"""
return self.config.get(key)
@export
def get_config_values(self, keys):
"""Get the config values for the entered keys"""
return dict((key, self.config.get(key)) for key in keys)
@export
def set_config(self, config):
"""Set the config with values from dictionary"""
# Load all the values into the configuration
for key in config.keys():
if isinstance(config[key], basestring):
config[key] = config[key].encode("utf8")
self.config[key] = config[key]
@export
def get_torrent_config_value(self, key):
"""Get the config value for key"""
return self.torrent_config(key)
@export
def get_torrent_config(self):
"""Get all the preferences as a dictionary"""
return self.session.get_settings()
@export
def set_torrent_config(self, config):
"""Set the torrent settings with values from dictionary"""
# Load all the values into the configuration
for key in config.keys():
if isinstance(config[key], basestring):
config[key] = config[key].encode("utf8")
self.torrent_config[key] = config[key]
self.session.set_settings(self.torrent_config)
@export
def set_torrent_high_speed_seed(self):
"""Set the torrent setting to high performance seed"""
high_speed_settings = lt.high_performance_seed()
self.session.set_settings(high_speed_settings)
@export
def get_listen_port(self):
"""Returns the active listen port"""
return self.session.listen_port()
@export
def get_i2p_proxy(self):
"""Returns the active listen port"""
i2p_settings = self.session.i2p_proxy()
i2p_dict = {"hostname": i2p_settings.hostname, "port": i2p_settings.port}
return i2p_dict
@export
def get_proxy(self):
"""Returns the active listen port"""
proxy_settings = self.session.proxy()
proxy_dict = {
"type": int(proxy_settings.type),
"hostname": proxy_settings.hostname,
"username": proxy_settings.username,
"password": proxy_settings.password,
"port": proxy_settings.port,
"proxy_hostnames": proxy_settings.proxy_hostnames,
"proxy_peer_connections": proxy_settings.proxy_peer_connections
}
return proxy_dict
@export
def get_available_plugins(self):
"""Returns a list of plugins available in the core"""
return self.pluginmanager.get_available_plugins()
@export
def get_enabled_plugins(self):
"""Returns a list of enabled plugins in the core"""
return self.pluginmanager.get_enabled_plugins()
@export
def enable_plugin(self, plugin):
self.pluginmanager.enable_plugin(plugin)
return None
@export
def disable_plugin(self, plugin):
self.pluginmanager.disable_plugin(plugin)
return None
@export
def force_recheck(self, torrent_ids):
"""Forces a data recheck on torrent_ids"""
for torrent_id in torrent_ids:
self.torrentmanager[torrent_id].force_recheck()
@export
def set_torrent_options(self, torrent_ids, options):
"""Sets the torrent options for torrent_ids"""
for torrent_id in torrent_ids:
self.torrentmanager[torrent_id].set_options(options)
@export
def set_torrent_trackers(self, torrent_id, trackers):
"""Sets a torrents tracker list. trackers will be [{"url", "tier"}]"""
return self.torrentmanager[torrent_id].set_trackers(trackers)
@export
def set_torrent_max_connections(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets a torrents max number of connections"""
return self.torrentmanager[torrent_id].set_max_connections(value)
@export
def set_torrent_max_upload_slots(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets a torrents max number of upload slots"""
return self.torrentmanager[torrent_id].set_max_upload_slots(value)
@export
def set_torrent_max_upload_speed(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets a torrents max upload speed"""
return self.torrentmanager[torrent_id].set_max_upload_speed(value)
@export
def set_torrent_max_download_speed(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets a torrents max download speed"""
return self.torrentmanager[torrent_id].set_max_download_speed(value)
@export
def set_torrent_file_priorities(self, torrent_id, priorities):
# Deprecated method, use set_torrent_options instead
# Used by at least one 3rd party plugin:
"""Sets a torrents file priorities"""
return self.torrentmanager[torrent_id].set_file_priorities(priorities)
@export
def set_torrent_prioritize_first_last(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets a higher priority to the first and last pieces"""
return self.torrentmanager[torrent_id].set_prioritize_first_last_pieces(value)
@export
def set_torrent_auto_managed(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets the auto managed flag for queueing purposes"""
return self.torrentmanager[torrent_id].set_auto_managed(value)
@export
def set_torrent_stop_at_ratio(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets the torrent to stop at 'stop_ratio'"""
return self.torrentmanager[torrent_id].set_stop_at_ratio(value)
@export
def set_torrent_stop_ratio(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets the ratio when to stop a torrent if 'stop_at_ratio' is set"""
return self.torrentmanager[torrent_id].set_stop_ratio(value)
@export
def set_torrent_remove_at_ratio(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets the torrent to be removed at 'stop_ratio'"""
return self.torrentmanager[torrent_id].set_remove_at_ratio(value)
@export
def set_torrent_move_completed(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets the torrent to be moved when completed"""
return self.torrentmanager[torrent_id].set_move_completed(value)
@export
def set_torrent_move_completed_path(self, torrent_id, value):
# Deprecated method, use set_torrent_options instead
"""Sets the path for the torrent to be moved when completed"""
return self.torrentmanager[torrent_id].set_move_completed_path(value)
@export(AUTH_LEVEL_ADMIN)
def set_owner(self, torrent_ids, username):
"""Set's the torrent owner.
:param torrent_id: the torrent_id of the torrent to remove
:type torrent_id: string
:param username: the new owner username
:type username: string
:raises DelugeError: if the username is not known
"""
if not self.authmanager.has_account(username):
raise DelugeError("Username \"%s\" is not known." % username)
if isinstance(torrent_ids, basestring):
torrent_ids = [torrent_ids]
for torrent_id in torrent_ids:
self.torrentmanager[torrent_id].set_owner(username)
return None
@export
def get_path_size(self, path):
"""Returns the size of the file or folder 'path' and -1 if the path is
unaccessible (non-existent or insufficient privs)"""
return deluge.common.get_path_size(path)
@export
def create_torrent(self, path, tracker, piece_length, comment, target,
webseeds, private, created_by, trackers, add_to_session):
log.debug("creating torrent..")
threading.Thread(target=self._create_torrent_thread,
args=(
path,
tracker,
piece_length,
comment,
target,
webseeds,
private,
created_by,
trackers,
add_to_session)).start()
def _create_torrent_thread(self, path, tracker, piece_length, comment, target,
webseeds, private, created_by, trackers, add_to_session):
import deluge.metafile
deluge.metafile.make_meta_file(
path,
tracker,
piece_length,
comment=comment,
target=target,
webseeds=webseeds,
private=private,
created_by=created_by,
trackers=trackers)
log.debug("torrent created!")
if add_to_session:
options = {}
options["download_location"] = os.path.split(path)[0]
self.add_torrent_file(os.path.split(target)[1], open(target, "rb").read(), options)
@export
def upload_plugin(self, filename, filedump):
"""This method is used to upload new plugins to the daemon. It is used
when connecting to the daemon remotely and installing a new plugin on
the client side. 'plugin_data' is a xmlrpc.Binary object of the file data,
ie, plugin_file.read()"""
try:
filedump = base64.decodestring(filedump)
except Exception as ex:
log.error("There was an error decoding the filedump string!")
log.exception(ex)
return
f = open(os.path.join(get_config_dir(), "plugins", filename), "wb")
f.write(filedump)
f.close()
component.get("CorePluginManager").scan_for_plugins()
@export
def rescan_plugins(self):
"""
Rescans the plugin folders for new plugins
"""
component.get("CorePluginManager").scan_for_plugins()
@export
def rename_files(self, torrent_id, filenames):
"""
Rename files in torrent_id. Since this is an asynchronous operation by
libtorrent, watch for the TorrentFileRenamedEvent to know when the
files have been renamed.
:param torrent_id: the torrent_id to rename files
:type torrent_id: string
:param filenames: a list of index, filename pairs
:type filenames: ((index, filename), ...)
:raises InvalidTorrentError: if torrent_id is invalid
"""
if torrent_id not in self.torrentmanager.torrents:
raise InvalidTorrentError("torrent_id is not in session")
self.torrentmanager[torrent_id].rename_files(filenames)
@export
def rename_folder(self, torrent_id, folder, new_folder):
"""
Renames the 'folder' to 'new_folder' in 'torrent_id'. Watch for the
TorrentFolderRenamedEvent which is emitted when the folder has been
renamed successfully.
:param torrent_id: the torrent to rename folder in
:type torrent_id: string
:param folder: the folder to rename
:type folder: string
:param new_folder: the new folder name
:type new_folder: string
:raises InvalidTorrentError: if the torrent_id is invalid
"""
if torrent_id not in self.torrentmanager.torrents:
raise InvalidTorrentError("torrent_id is not in session")
self.torrentmanager[torrent_id].rename_folder(folder, new_folder)
@export
def queue_top(self, torrent_ids):
log.debug("Attempting to queue %s to top", torrent_ids)
# torrent_ids must be sorted in reverse before moving to preserve order
for torrent_id in sorted(torrent_ids, key=self.torrentmanager.get_queue_position, reverse=True):
try:
# If the queue method returns True, then we should emit a signal
if self.torrentmanager.queue_top(torrent_id):
component.get("EventManager").emit(TorrentQueueChangedEvent())
except KeyError:
log.warning("torrent_id: %s does not exist in the queue", torrent_id)
@export
def queue_up(self, torrent_ids):
log.debug("Attempting to queue %s to up", torrent_ids)
torrents = ((self.torrentmanager.get_queue_position(torrent_id), torrent_id) for torrent_id in torrent_ids)
torrent_moved = True
prev_queue_position = None
# torrent_ids must be sorted before moving.
for queue_position, torrent_id in sorted(torrents):
# Move the torrent if and only if there is space (by not moving it we preserve the order)
if torrent_moved or queue_position - prev_queue_position > 1:
try:
torrent_moved = self.torrentmanager.queue_up(torrent_id)
except KeyError:
log.warning("torrent_id: %s does not exist in the queue", torrent_id)
# If the torrent moved, then we should emit a signal
if torrent_moved:
component.get("EventManager").emit(TorrentQueueChangedEvent())
else:
prev_queue_position = queue_position
@export
def queue_down(self, torrent_ids):
log.debug("Attempting to queue %s to down", torrent_ids)
torrents = ((self.torrentmanager.get_queue_position(torrent_id), torrent_id) for torrent_id in torrent_ids)
torrent_moved = True
prev_queue_position = None
# torrent_ids must be sorted before moving.
for queue_position, torrent_id in sorted(torrents, reverse=True):
# Move the torrent if and only if there is space (by not moving it we preserve the order)
if torrent_moved or prev_queue_position - queue_position > 1:
try:
torrent_moved = self.torrentmanager.queue_down(torrent_id)
except KeyError:
log.warning("torrent_id: %s does not exist in the queue", torrent_id)
# If the torrent moved, then we should emit a signal
if torrent_moved:
component.get("EventManager").emit(TorrentQueueChangedEvent())
else:
prev_queue_position = queue_position
@export
def queue_bottom(self, torrent_ids):
log.debug("Attempting to queue %s to bottom", torrent_ids)
# torrent_ids must be sorted before moving to preserve order
for torrent_id in sorted(torrent_ids, key=self.torrentmanager.get_queue_position):
try:
# If the queue method returns True, then we should emit a signal
if self.torrentmanager.queue_bottom(torrent_id):
component.get("EventManager").emit(TorrentQueueChangedEvent())
except KeyError:
log.warning("torrent_id: %s does not exist in the queue", torrent_id)
@export
def glob(self, path):
return glob.glob(path)
@export
def test_listen_port(self):
"""
Checks if the active port is open
:returns: True if the port is open, False if not
:rtype: bool
"""
from twisted.web.client import getPage
d = getPage("http://deluge-torrent.org/test_port.php?port=%s" %
self.get_listen_port(), timeout=30)
def on_get_page(result):
return bool(int(result))
def on_error(failure):
log.warning("Error testing listen port: %s", failure)
d.addCallback(on_get_page)
d.addErrback(on_error)
return d
@export
def get_free_space(self, path=None):
"""
Returns the number of free bytes at path
:param path: the path to check free space at, if None, use the default download location
:type path: string
:returns: the number of free bytes at path
:rtype: int
:raises InvalidPathError: if the path is invalid
"""
if not path:
path = self.config["download_location"]
try:
return deluge.common.free_space(path)
except InvalidPathError:
return -1
@export
def get_libtorrent_version(self):
"""
Returns the libtorrent version.
:returns: the version
:rtype: string
"""
return lt.version
@export
def get_completion_paths(self, args):
"""
Returns the available path completions for the input value.
"""
return path_chooser_common.get_completion_paths(args)
@export(AUTH_LEVEL_ADMIN)
def get_known_accounts(self):
return self.authmanager.get_known_accounts()
@export(AUTH_LEVEL_NONE)
def get_auth_levels_mappings(self):
return (AUTH_LEVELS_MAPPING, AUTH_LEVELS_MAPPING_REVERSE)
@export(AUTH_LEVEL_ADMIN)
def create_account(self, username, password, authlevel):
return self.authmanager.create_account(username, password, authlevel)
@export(AUTH_LEVEL_ADMIN)
def update_account(self, username, password, authlevel):
return self.authmanager.update_account(username, password, authlevel)
@export(AUTH_LEVEL_ADMIN)
def remove_account(self, username):
return self.authmanager.remove_account(username)
|
gpl-3.0
|
blackshirt/dompetku
|
dompetku/handler/services.py
|
1
|
3750
|
#!/usr/bin/env python
#
# Copyright @2014 blackshirtmuslim@yahoo.com
# Licensed: see Python license
"""Module to handle json services."""
import datetime
import json
import peewee
import tornado.web
import tornado.escape
from dompetku.handler import base
from dompetku.utils import jsonify
from dompetku.model import Transaksi, User
from dompetku.form import TransaksiForm
class TransaksiContainer(object):
def __init__(self, user):
self.user = user
def find_one(self, tid):
cur_user = User.select().where(User.name == self.user)
if cur_user.exists():
user = cur_user.get()
trn = Transaksi.select().where(Transaksi.user == user.uid, Transaksi.tid == tid)
if trn.exists():
data = trn.get() # Transaksi instance
return data
return None
def find_data(self, *expr):
cur_user = User.select().where(User.name == self.user)
if cur_user.exists():
user = cur_user.get()
trn = Transaksi.select().where(Transaksi.user == user.uid, *expr)
return trn # Transaksi QueryResultWrapper
return None
class DataSources(TransaksiContainer):
def __init__(self, user):
self.user = user
super().__init__(self.user)
def get_one(self, tid):
data = self.find_one(tid)
if data is not None:
results = {
'tid': data.tid,
'user': data.user.name,
'info': data.info,
'amount': data.amount,
'transdate': data.transdate,
'memo': data.memo
}
return results # dict of transaksi item
def get_data(self, *expr):
temporary = {}
results = []
data = self.find_data(*expr)
for item in data:
temporary = {
'tid': item.tid,
'user': item.user.name,
'info': item.info,
'transdate': item.transdate,
'amount': item.amount,
'memo': item.memo
}
results.append(temporary)
return results # list of dict of transaksi item
class ApiTransactions(base.BaseHandler):
def initialize(self):
self.dsc = DataSources(self.current_user)
@tornado.web.authenticated
def get(self, *kondisi):
if kondisi:
data = self.dsc.get_data(*kondisi)
else:
# get data bulan sekarang
today = datetime.date.today()
cur_month = today.month
expr = (Transaksi.transdate.month == cur_month,)
data = self.dsc.get_data(expr)
self.write(jsonify(data))
def post(self):
data = tornado.escape.json_decode(self.request.body)
info = data.get('info')
amount = data.get('amount')
memo = data.get('memo')
try:
active_user = User.get(User.name == self.current_user)
except peewee.DoesNotExist:
active_user = None
return
item = Transaksi.insert(info = info,
amount=amount,
tipe=10,
user=active_user.uid,
memo=memo )
last_id = item.execute()
transaksi = Transaksi.get(Transaksi.tid == last_id)
response = {'info': transaksi.info,
'user': transaksi.user.name,
'amount': transaksi.amount,
'memo': transaksi.memo,
'transdate': transaksi.transdate}
self.write(jsonify(response))
|
bsd-2-clause
|
spel-uchile/SUCHAI-Flight-Software
|
sandbox/log_parser.py
|
1
|
1956
|
import re
import argparse
import pandas as pd
# General expressions
re_error = re.compile(r'\[ERROR\]\[(\d+)\]\[(\w+)\](.+)')
re_warning = re.compile(r'\[WARN \]\[(\d+)\]\[(\w+)\](.+)')
re_info = re.compile(r'\[INFO \]\[(\d+)\]\[(\w+)\](.+)')
re_debug = re.compile(r'\[DEBUG\]\[(\d+)\]\[(\w+)\](.+)')
re_verbose = re.compile(r'\[VERB \]\[(\d+)\]\[(\w+)\](.+)')
# Specific expressions
re_cmd_run = re.compile(r'\[INFO \]\[(\d+)]\[Executer\] Running the command: (.+)')
re_cmd_result = re.compile(r'\[INFO \]\[(\d+)]\[Executer\] Command result: (\d+)')
def get_parameters():
"""
Parse script arguments
"""
parser = argparse.ArgumentParser()
# General expressions
parser.add_argument('file', type=str, help="Log file")
parser.add_argument('--error', action="store_const", const=re_error)
parser.add_argument('--warning', action="store_const", const=re_warning)
parser.add_argument('--info', action="store_const", const=re_info)
parser.add_argument('--debug', action="store_const", const=re_debug)
parser.add_argument('--verbose', action="store_const", const=re_verbose)
# Specific expressions
parser.add_argument('--cmd-run', action="store_const", const=re_cmd_run)
parser.add_argument('--cmd-result', action="store_const", const=re_cmd_result)
return parser.parse_args()
def parse_text(text, regexp):
return regexp.findall(text)
def save_parsed(logs, file, format=None):
df = pd.DataFrame(logs)
# print(df)
df.to_csv(file)
if __name__ == "__main__":
args = get_parameters()
print("Reading file {}...".format(args.file))
with open(args.file) as logfile:
text = logfile.read()
args = vars(args)
print(args)
for type, regexp in args.items():
if type is not "file" and regexp is not None:
print("Parsing {}...", type)
logs = parse_text(text, regexp)
save_parsed(logs, args["file"]+type+".csv")
|
gpl-3.0
|
datasciencedev/locality-sensitive-hashing
|
mapreduce/model.py
|
4
|
39511
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model classes which are used to communicate between parts of implementation.
These model classes are describing mapreduce, its current state and
communication messages. They are either stored in the datastore or
serialized to/from json and passed around with other means.
"""
# Disable "Invalid method name"
# pylint: disable=g-bad-name
__all__ = ["MapreduceState",
"MapperSpec",
"MapreduceControl",
"MapreduceSpec",
"ShardState",
"CountersMap",
"TransientShardState",
"QuerySpec",
"HugeTask"]
import cgi
import datetime
import urllib
import zlib
from mapreduce.third_party.graphy.backends import google_chart_api
from mapreduce.third_party import simplejson
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import db
from mapreduce import context
from mapreduce import hooks
from mapreduce import json_util
from mapreduce import util
# pylint: disable=protected-access
# Special datastore kinds for MR.
_MAP_REDUCE_KINDS = ("_AE_MR_MapreduceControl",
"_AE_MR_MapreduceState",
"_AE_MR_ShardState",
"_AE_MR_TaskPayload")
class _HugeTaskPayload(db.Model):
"""Model object to store task payload."""
payload = db.BlobProperty()
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_TaskPayload"
class HugeTask(object):
"""HugeTask is a taskqueue.Task-like class that can store big payloads.
Payloads are stored either in the task payload itself or in the datastore.
Task handlers should inherit from base_handler.HugeTaskHandler class.
"""
PAYLOAD_PARAM = "__payload"
PAYLOAD_KEY_PARAM = "__payload_key"
# Leave some wiggle room for headers and other fields.
MAX_TASK_PAYLOAD = taskqueue.MAX_PUSH_TASK_SIZE_BYTES - 1024
MAX_DB_PAYLOAD = datastore_rpc.BaseConnection.MAX_RPC_BYTES
PAYLOAD_VERSION_HEADER = "AE-MR-Payload-Version"
# Update version when payload handling is changed
# in a backward incompatible way.
PAYLOAD_VERSION = "1"
def __init__(self,
url,
params,
name=None,
eta=None,
countdown=None,
parent=None,
headers=None):
"""Init.
Args:
url: task url in str.
params: a dict from str to str.
name: task name.
eta: task eta.
countdown: task countdown.
parent: parent entity of huge task's payload.
headers: a dict of headers for the task.
Raises:
ValueError: when payload is too big even for datastore, or parent is
not specified when payload is stored in datastore.
"""
self.url = url
self.name = name
self.eta = eta
self.countdown = countdown
self._headers = {
"Content-Type": "application/octet-stream",
self.PAYLOAD_VERSION_HEADER: self.PAYLOAD_VERSION
}
if headers:
self._headers.update(headers)
# TODO(user): Find a more space efficient way than urlencoding.
payload_str = urllib.urlencode(params)
compressed_payload = ""
if len(payload_str) > self.MAX_TASK_PAYLOAD:
compressed_payload = zlib.compress(payload_str)
# Payload is small. Don't bother with anything.
if not compressed_payload:
self._payload = payload_str
# Compressed payload is small. Don't bother with datastore.
elif len(compressed_payload) < self.MAX_TASK_PAYLOAD:
self._payload = self.PAYLOAD_PARAM + compressed_payload
elif len(compressed_payload) > self.MAX_DB_PAYLOAD:
raise ValueError(
"Payload from %s to big to be stored in database: %s" %
(self.name, len(compressed_payload)))
# Store payload in the datastore.
else:
if not parent:
raise ValueError("Huge tasks should specify parent entity.")
payload_entity = _HugeTaskPayload(payload=compressed_payload,
parent=parent)
payload_key = payload_entity.put()
self._payload = self.PAYLOAD_KEY_PARAM + str(payload_key)
def add(self, queue_name, transactional=False):
"""Add task to the queue."""
task = self.to_task()
task.add(queue_name, transactional)
def to_task(self):
"""Convert to a taskqueue task."""
# Never pass params to taskqueue.Task. Use payload instead. Otherwise,
# it's up to a particular taskqueue implementation to generate
# payload from params. It could blow up payload size over limit.
return taskqueue.Task(
url=self.url,
payload=self._payload,
name=self.name,
eta=self.eta,
countdown=self.countdown,
headers=self._headers)
@classmethod
def decode_payload(cls, request):
"""Decode task payload.
HugeTask controls its own payload entirely including urlencoding.
It doesn't depend on any particular web framework.
Args:
request: a webapp Request instance.
Returns:
A dict of str to str. The same as the params argument to __init__.
Raises:
DeprecationWarning: When task payload constructed from an older
incompatible version of mapreduce.
"""
# TODO(user): Pass mr_id into headers. Otherwise when payload decoding
# failed, we can't abort a mr.
if request.headers.get(cls.PAYLOAD_VERSION_HEADER) != cls.PAYLOAD_VERSION:
raise DeprecationWarning(
"Task is generated by an older incompatible version of mapreduce. "
"Please kill this job manually")
return cls._decode_payload(request.body)
@classmethod
def _decode_payload(cls, body):
compressed_payload_str = None
if body.startswith(cls.PAYLOAD_KEY_PARAM):
payload_key = body[len(cls.PAYLOAD_KEY_PARAM):]
payload_entity = _HugeTaskPayload.get(payload_key)
compressed_payload_str = payload_entity.payload
elif body.startswith(cls.PAYLOAD_PARAM):
compressed_payload_str = body[len(cls.PAYLOAD_PARAM):]
if compressed_payload_str:
payload_str = zlib.decompress(compressed_payload_str)
else:
payload_str = body
result = {}
for (name, value) in cgi.parse_qs(payload_str).items():
if len(value) == 1:
result[name] = value[0]
else:
result[name] = value
return result
class CountersMap(json_util.JsonMixin):
"""Maintains map from counter name to counter value.
The class is used to provide basic arithmetics of counter values (buil
add/remove), increment individual values and store/load data from json.
"""
def __init__(self, initial_map=None):
"""Constructor.
Args:
initial_map: initial counter values map from counter name (string) to
counter value (int).
"""
if initial_map:
self.counters = initial_map
else:
self.counters = {}
def __repr__(self):
"""Compute string representation."""
return "mapreduce.model.CountersMap(%r)" % self.counters
def get(self, counter_name, default=0):
"""Get current counter value.
Args:
counter_name: counter name as string.
default: default value if one doesn't exist.
Returns:
current counter value as int. 0 if counter was not set.
"""
return self.counters.get(counter_name, default)
def increment(self, counter_name, delta):
"""Increment counter value.
Args:
counter_name: counter name as String.
delta: increment delta as Integer.
Returns:
new counter value.
"""
current_value = self.counters.get(counter_name, 0)
new_value = current_value + delta
self.counters[counter_name] = new_value
return new_value
def add_map(self, counters_map):
"""Add all counters from the map.
For each counter in the passed map, adds its value to the counter in this
map.
Args:
counters_map: CounterMap instance to add.
"""
for counter_name in counters_map.counters:
self.increment(counter_name, counters_map.counters[counter_name])
def sub_map(self, counters_map):
"""Subtracts all counters from the map.
For each counter in the passed map, subtracts its value to the counter in
this map.
Args:
counters_map: CounterMap instance to subtract.
"""
for counter_name in counters_map.counters:
self.increment(counter_name, -counters_map.counters[counter_name])
def clear(self):
"""Clear all values."""
self.counters = {}
def to_json(self):
"""Serializes all the data in this map into json form.
Returns:
json-compatible data representation.
"""
return {"counters": self.counters}
@classmethod
def from_json(cls, json):
"""Create new CountersMap from the json data structure, encoded by to_json.
Args:
json: json representation of CountersMap .
Returns:
an instance of CountersMap with all data deserialized from json.
"""
counters_map = cls()
counters_map.counters = json["counters"]
return counters_map
def to_dict(self):
"""Convert to dictionary.
Returns:
a dictionary with counter name as key and counter values as value.
"""
return self.counters
class MapperSpec(json_util.JsonMixin):
"""Contains a specification for the mapper phase of the mapreduce.
MapperSpec instance can be changed only during mapreduce starting process,
and it remains immutable for the rest of mapreduce execution. MapperSpec is
passed as a payload to all mapreduce tasks in JSON encoding as part of
MapreduceSpec.
Specifying mapper handlers:
* '<module_name>.<class_name>' - __call__ method of class instance will be
called
* '<module_name>.<function_name>' - function will be called.
* '<module_name>.<class_name>.<method_name>' - class will be instantiated
and method called.
"""
def __init__(self,
handler_spec,
input_reader_spec,
params,
shard_count,
output_writer_spec=None):
"""Creates a new MapperSpec.
Args:
handler_spec: handler specification as string (see class doc for
details).
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
shard_count: number of shards to process in parallel.
Properties:
handler_spec: name of handler class/function to use.
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
shard_count: number of shards to process in parallel.
output_writer_spec: The class name of the output writer to use.
"""
self.handler_spec = handler_spec
self.input_reader_spec = input_reader_spec
self.output_writer_spec = output_writer_spec
self.shard_count = int(shard_count)
self.params = params
def get_handler(self):
"""Get mapper handler instance.
This always creates a new instance of the handler. If the handler is a
callable instance, MR only wants to create a new instance at the
beginning of a shard or shard retry. The pickled callable instance
should be accessed from TransientShardState.
Returns:
handler instance as callable.
"""
return util.handler_for_name(self.handler_spec)
handler = property(get_handler)
def input_reader_class(self):
"""Get input reader class.
Returns:
input reader class object.
"""
return util.for_name(self.input_reader_spec)
def output_writer_class(self):
"""Get output writer class.
Returns:
output writer class object.
"""
return self.output_writer_spec and util.for_name(self.output_writer_spec)
def to_json(self):
"""Serializes this MapperSpec into a json-izable object."""
result = {
"mapper_handler_spec": self.handler_spec,
"mapper_input_reader": self.input_reader_spec,
"mapper_params": self.params,
"mapper_shard_count": self.shard_count
}
if self.output_writer_spec:
result["mapper_output_writer"] = self.output_writer_spec
return result
def __str__(self):
return "MapperSpec(%s, %s, %s, %s)" % (
self.handler_spec, self.input_reader_spec, self.params,
self.shard_count)
@classmethod
def from_json(cls, json):
"""Creates MapperSpec from a dict-like object."""
return cls(json["mapper_handler_spec"],
json["mapper_input_reader"],
json["mapper_params"],
json["mapper_shard_count"],
json.get("mapper_output_writer")
)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.to_json() == other.to_json()
class MapreduceSpec(json_util.JsonMixin):
"""Contains a specification for the whole mapreduce.
MapreduceSpec instance can be changed only during mapreduce starting process,
and it remains immutable for the rest of mapreduce execution. MapreduceSpec is
passed as a payload to all mapreduce tasks in json encoding.
"""
# Url to call when mapreduce finishes its execution.
PARAM_DONE_CALLBACK = "done_callback"
# Queue to use to call done callback
PARAM_DONE_CALLBACK_QUEUE = "done_callback_queue"
def __init__(self,
name,
mapreduce_id,
mapper_spec,
params={},
hooks_class_name=None):
"""Create new MapreduceSpec.
Args:
name: The name of this mapreduce job type.
mapreduce_id: ID of the mapreduce.
mapper_spec: JSON-encoded string containing a MapperSpec.
params: dictionary of additional mapreduce parameters.
hooks_class_name: The fully qualified name of the hooks class to use.
Properties:
name: The name of this mapreduce job type.
mapreduce_id: unique id of this mapreduce as string.
mapper: This MapreduceSpec's instance of MapperSpec.
params: dictionary of additional mapreduce parameters.
hooks_class_name: The fully qualified name of the hooks class to use.
"""
self.name = name
self.mapreduce_id = mapreduce_id
self.mapper = MapperSpec.from_json(mapper_spec)
self.params = params
self.hooks_class_name = hooks_class_name
self.__hooks = None
self.get_hooks() # Fail fast on an invalid hook class.
def get_hooks(self):
"""Returns a hooks.Hooks class or None if no hooks class has been set."""
if self.__hooks is None and self.hooks_class_name is not None:
hooks_class = util.for_name(self.hooks_class_name)
if not isinstance(hooks_class, type):
raise ValueError("hooks_class_name must refer to a class, got %s" %
type(hooks_class).__name__)
if not issubclass(hooks_class, hooks.Hooks):
raise ValueError(
"hooks_class_name must refer to a hooks.Hooks subclass")
self.__hooks = hooks_class(self)
return self.__hooks
def to_json(self):
"""Serializes all data in this mapreduce spec into json form.
Returns:
data in json format.
"""
mapper_spec = self.mapper.to_json()
return {
"name": self.name,
"mapreduce_id": self.mapreduce_id,
"mapper_spec": mapper_spec,
"params": self.params,
"hooks_class_name": self.hooks_class_name,
}
@classmethod
def from_json(cls, json):
"""Create new MapreduceSpec from the json, encoded by to_json.
Args:
json: json representation of MapreduceSpec.
Returns:
an instance of MapreduceSpec with all data deserialized from json.
"""
mapreduce_spec = cls(json["name"],
json["mapreduce_id"],
json["mapper_spec"],
json.get("params"),
json.get("hooks_class_name"))
return mapreduce_spec
def __str__(self):
return str(self.to_json())
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.to_json() == other.to_json()
@classmethod
def _get_mapreduce_spec(cls, mr_id):
"""Get Mapreduce spec from mr id."""
key = 'GAE-MR-spec: %s' % mr_id
spec_json = memcache.get(key)
if spec_json:
return cls.from_json(spec_json)
state = MapreduceState.get_by_job_id(mr_id)
spec = state.mapreduce_spec
spec_json = spec.to_json()
memcache.set(key, spec_json)
return spec
class MapreduceState(db.Model):
"""Holds accumulated state of mapreduce execution.
MapreduceState is stored in datastore with a key name equal to the
mapreduce ID. Only controller tasks can write to MapreduceState.
Properties:
mapreduce_spec: cached deserialized MapreduceSpec instance. read-only
active: if this MR is still running.
last_poll_time: last time controller job has polled this mapreduce.
counters_map: shard's counters map as CountersMap. Mirrors
counters_map_json.
chart_url: last computed mapreduce status chart url. This chart displays the
progress of all the shards the best way it can.
sparkline_url: last computed mapreduce status chart url in small format.
result_status: If not None, the final status of the job.
active_shards: How many shards are still processing. This starts as 0,
then set by KickOffJob handler to be the actual number of input
readers after input splitting, and is updated by Controller task
as shards finish.
start_time: When the job started.
writer_state: Json property to be used by writer to store its state.
This is filled when single output per job. Will be deprecated.
Use OutputWriter.get_filenames instead.
"""
RESULT_SUCCESS = "success"
RESULT_FAILED = "failed"
RESULT_ABORTED = "aborted"
_RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])
# Functional properties.
# TODO(user): Replace mapreduce_spec with job_config.
mapreduce_spec = json_util.JsonProperty(MapreduceSpec, indexed=False)
active = db.BooleanProperty(default=True, indexed=False)
last_poll_time = db.DateTimeProperty(required=True)
counters_map = json_util.JsonProperty(
CountersMap, default=CountersMap(), indexed=False)
app_id = db.StringProperty(required=False, indexed=True)
writer_state = json_util.JsonProperty(dict, indexed=False)
active_shards = db.IntegerProperty(default=0, indexed=False)
failed_shards = db.IntegerProperty(default=0, indexed=False)
aborted_shards = db.IntegerProperty(default=0, indexed=False)
result_status = db.StringProperty(required=False, choices=_RESULTS)
# For UI purposes only.
chart_url = db.TextProperty(default="")
chart_width = db.IntegerProperty(default=300, indexed=False)
sparkline_url = db.TextProperty(default="")
start_time = db.DateTimeProperty(auto_now_add=True)
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_MapreduceState"
@classmethod
def get_key_by_job_id(cls, mapreduce_id):
"""Retrieves the Key for a Job.
Args:
mapreduce_id: The job to retrieve.
Returns:
Datastore Key that can be used to fetch the MapreduceState.
"""
return db.Key.from_path(cls.kind(), str(mapreduce_id))
@classmethod
def get_by_job_id(cls, mapreduce_id):
"""Retrieves the instance of state for a Job.
Args:
mapreduce_id: The mapreduce job to retrieve.
Returns:
instance of MapreduceState for passed id.
"""
return db.get(cls.get_key_by_job_id(mapreduce_id))
def set_processed_counts(self, shards_processed):
"""Updates a chart url to display processed count for each shard.
Args:
shards_processed: list of integers with number of processed entities in
each shard
"""
chart = google_chart_api.BarChart(shards_processed)
shard_count = len(shards_processed)
if shards_processed:
# Only 16 labels on the whole chart.
stride_length = max(1, shard_count / 16)
chart.bottom.labels = []
for x in xrange(shard_count):
if (x % stride_length == 0 or
x == shard_count - 1):
chart.bottom.labels.append(x)
else:
chart.bottom.labels.append("")
chart.left.labels = ["0", str(max(shards_processed))]
chart.left.min = 0
self.chart_width = min(700, max(300, shard_count * 20))
self.chart_url = chart.display.Url(self.chart_width, 200)
def get_processed(self):
"""Number of processed entities.
Returns:
The total number of processed entities as int.
"""
return self.counters_map.get(context.COUNTER_MAPPER_CALLS)
processed = property(get_processed)
@staticmethod
def create_new(mapreduce_id=None,
gettime=datetime.datetime.now):
"""Create a new MapreduceState.
Args:
mapreduce_id: Mapreduce id as string.
gettime: Used for testing.
"""
if not mapreduce_id:
mapreduce_id = MapreduceState.new_mapreduce_id()
state = MapreduceState(key_name=mapreduce_id,
last_poll_time=gettime())
state.set_processed_counts([])
return state
@staticmethod
def new_mapreduce_id():
"""Generate new mapreduce id."""
return util._get_descending_key()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.properties() == other.properties()
class TransientShardState(object):
"""A shard's states that are kept in task payload.
TransientShardState holds two types of states:
1. Some states just don't need to be saved to datastore. e.g.
serialized input reader and output writer instances.
2. Some states are duplicated from datastore, e.g. slice_id, shard_id.
These are used to validate the task.
"""
def __init__(self,
base_path,
mapreduce_spec,
shard_id,
slice_id,
input_reader,
initial_input_reader,
output_writer=None,
retries=0,
handler=None):
"""Init.
Args:
base_path: base path of this mapreduce job. Deprecated.
mapreduce_spec: an instance of MapReduceSpec.
shard_id: shard id.
slice_id: slice id. When enqueuing task for the next slice, this number
is incremented by 1.
input_reader: input reader instance for this shard.
initial_input_reader: the input reader instance before any iteration.
Used by shard retry.
output_writer: output writer instance for this shard, if exists.
retries: the number of retries of the current shard. Used to drop
tasks from old retries.
handler: map/reduce handler.
"""
self.base_path = base_path
self.mapreduce_spec = mapreduce_spec
self.shard_id = shard_id
self.slice_id = slice_id
self.input_reader = input_reader
self.initial_input_reader = initial_input_reader
self.output_writer = output_writer
self.retries = retries
self.handler = handler
self._input_reader_json = self.input_reader.to_json()
def reset_for_retry(self, output_writer):
"""Reset self for shard retry.
Args:
output_writer: new output writer that contains new output files.
"""
self.input_reader = self.initial_input_reader
self.slice_id = 0
self.retries += 1
self.output_writer = output_writer
self.handler = self.mapreduce_spec.mapper.handler
def advance_for_next_slice(self, recovery_slice=False):
"""Advance relavent states for next slice.
Args:
recovery_slice: True if this slice is running recovery logic.
See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery
for more info.
"""
if recovery_slice:
self.slice_id += 2
# Restore input reader to the beginning of the slice.
self.input_reader = self.input_reader.from_json(self._input_reader_json)
else:
self.slice_id += 1
def to_dict(self):
"""Convert state to dictionary to save in task payload."""
result = {"mapreduce_spec": self.mapreduce_spec.to_json_str(),
"shard_id": self.shard_id,
"slice_id": str(self.slice_id),
"input_reader_state": self.input_reader.to_json_str(),
"initial_input_reader_state":
self.initial_input_reader.to_json_str(),
"retries": str(self.retries)}
if self.output_writer:
result["output_writer_state"] = self.output_writer.to_json_str()
serialized_handler = util.try_serialize_handler(self.handler)
if serialized_handler:
result["serialized_handler"] = serialized_handler
return result
@classmethod
def from_request(cls, request):
"""Create new TransientShardState from webapp request."""
mapreduce_spec = MapreduceSpec.from_json_str(request.get("mapreduce_spec"))
mapper_spec = mapreduce_spec.mapper
input_reader_spec_dict = simplejson.loads(request.get("input_reader_state"),
cls=json_util.JsonDecoder)
input_reader = mapper_spec.input_reader_class().from_json(
input_reader_spec_dict)
initial_input_reader_spec_dict = simplejson.loads(
request.get("initial_input_reader_state"), cls=json_util.JsonDecoder)
initial_input_reader = mapper_spec.input_reader_class().from_json(
initial_input_reader_spec_dict)
output_writer = None
if mapper_spec.output_writer_class():
output_writer = mapper_spec.output_writer_class().from_json(
simplejson.loads(request.get("output_writer_state", "{}"),
cls=json_util.JsonDecoder))
assert isinstance(output_writer, mapper_spec.output_writer_class()), (
"%s.from_json returned an instance of wrong class: %s" % (
mapper_spec.output_writer_class(),
output_writer.__class__))
handler = util.try_deserialize_handler(request.get("serialized_handler"))
if not handler:
handler = mapreduce_spec.mapper.handler
return cls(mapreduce_spec.params["base_path"],
mapreduce_spec,
str(request.get("shard_id")),
int(request.get("slice_id")),
input_reader,
initial_input_reader,
output_writer=output_writer,
retries=int(request.get("retries")),
handler=handler)
class ShardState(db.Model):
"""Single shard execution state.
The shard state is stored in the datastore and is later aggregated by
controller task. ShardState key_name is equal to shard_id.
Shard state contains critical state to ensure the correctness of
shard execution. It is the single source of truth about a shard's
progress. For example:
1. A slice is allowed to run only if its payload matches shard state's
expectation.
2. A slice is considered running only if it has acquired the shard's lock.
3. A slice is considered done only if it has successfully committed shard
state to db.
Properties about the shard:
active: if we have this shard still running as boolean.
counters_map: shard's counters map as CountersMap. All counters yielded
within mapreduce are stored here.
mapreduce_id: unique id of the mapreduce.
shard_id: unique id of this shard as string.
shard_number: ordered number for this shard.
retries: the number of times this shard has been retried.
result_status: If not None, the final status of this shard.
update_time: The last time this shard state was updated.
shard_description: A string description of the work this shard will do.
last_work_item: A string description of the last work item processed.
writer_state: writer state for this shard. The shard's output writer
instance can save in-memory output references to this field in its
"finalize" method.
Properties about slice management:
slice_id: slice id of current executing slice. A slice's task
will not run unless its slice_id matches this. Initial
value is 0. By the end of slice execution, this number is
incremented by 1.
slice_start_time: a slice updates this to now at the beginning of
execution. If the transaction succeeds, the current task holds
a lease of slice duration + some grace period. During this time, no
other task with the same slice_id will execute. Upon slice failure,
the task should try to unset this value to allow retries to carry on
ASAP.
slice_request_id: the request id that holds/held the lease. When lease has
expired, new request needs to verify that said request has indeed
ended according to logs API. Do this only when lease has expired
because logs API is expensive. This field should always be set/unset
with slice_start_time. It is possible Logs API doesn't log a request
at all or doesn't log the end of a request. So a new request can
proceed after a long conservative timeout.
slice_retries: the number of times a slice has been retried due to
processing data when lock is held. Taskqueue/datastore errors
related to slice/shard management are not counted. This count is
only a lower bound and is used to determined when to fail a slice
completely.
acquired_once: whether the lock for this slice has been acquired at
least once. When this is True, duplicates in outputs are possible.
"""
RESULT_SUCCESS = "success"
RESULT_FAILED = "failed"
# Shard can be in aborted state when user issued abort, or controller
# issued abort because some other shard failed.
RESULT_ABORTED = "aborted"
_RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])
# Maximum number of shard states to hold in memory at any time.
_MAX_STATES_IN_MEMORY = 10
# Functional properties.
mapreduce_id = db.StringProperty(required=True)
active = db.BooleanProperty(default=True, indexed=False)
counters_map = json_util.JsonProperty(
CountersMap, default=CountersMap(), indexed=False)
result_status = db.StringProperty(choices=_RESULTS, indexed=False)
retries = db.IntegerProperty(default=0, indexed=False)
writer_state = json_util.JsonProperty(dict, indexed=False)
slice_id = db.IntegerProperty(default=0, indexed=False)
slice_start_time = db.DateTimeProperty(indexed=False)
slice_request_id = db.ByteStringProperty(indexed=False)
slice_retries = db.IntegerProperty(default=0, indexed=False)
acquired_once = db.BooleanProperty(default=False, indexed=False)
# For UI purposes only.
update_time = db.DateTimeProperty(auto_now=True, indexed=False)
shard_description = db.TextProperty(default="")
last_work_item = db.TextProperty(default="")
def __str__(self):
kv = {"active": self.active,
"slice_id": self.slice_id,
"last_work_item": self.last_work_item,
"update_time": self.update_time}
if self.result_status:
kv["result_status"] = self.result_status
if self.retries:
kv["retries"] = self.retries
if self.slice_start_time:
kv["slice_start_time"] = self.slice_start_time
if self.slice_retries:
kv["slice_retries"] = self.slice_retries
if self.slice_request_id:
kv["slice_request_id"] = self.slice_request_id
if self.acquired_once:
kv["acquired_once"] = self.acquired_once
keys = kv.keys()
keys.sort()
result = "ShardState is {"
for k in keys:
result += k + ":" + str(kv[k]) + ","
result += "}"
return result
def reset_for_retry(self):
"""Reset self for shard retry."""
self.retries += 1
self.last_work_item = ""
self.active = True
self.result_status = None
self.counters_map = CountersMap()
self.slice_id = 0
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
def advance_for_next_slice(self, recovery_slice=False):
"""Advance self for next slice.
Args:
recovery_slice: True if this slice is running recovery logic.
See handlers.MapperWorkerCallbackHandler._attempt_slice_recovery
for more info.
"""
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
if recovery_slice:
self.slice_id += 2
else:
self.slice_id += 1
def set_for_failure(self):
self.active = False
self.result_status = self.RESULT_FAILED
def set_for_abort(self):
self.active = False
self.result_status = self.RESULT_ABORTED
def set_for_success(self):
self.active = False
self.result_status = self.RESULT_SUCCESS
self.slice_start_time = None
self.slice_request_id = None
self.slice_retries = 0
self.acquired_once = False
def copy_from(self, other_state):
"""Copy data from another shard state entity to self."""
for prop in self.properties().values():
setattr(self, prop.name, getattr(other_state, prop.name))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.properties() == other.properties()
def get_shard_number(self):
"""Gets the shard number from the key name."""
return int(self.key().name().split("-")[-1])
shard_number = property(get_shard_number)
def get_shard_id(self):
"""Returns the shard ID."""
return self.key().name()
shard_id = property(get_shard_id)
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_ShardState"
@classmethod
def shard_id_from_number(cls, mapreduce_id, shard_number):
"""Get shard id by mapreduce id and shard number.
Args:
mapreduce_id: mapreduce id as string.
shard_number: shard number to compute id for as int.
Returns:
shard id as string.
"""
return "%s-%d" % (mapreduce_id, shard_number)
@classmethod
def get_key_by_shard_id(cls, shard_id):
"""Retrieves the Key for this ShardState.
Args:
shard_id: The shard ID to fetch.
Returns:
The Datatore key to use to retrieve this ShardState.
"""
return db.Key.from_path(cls.kind(), shard_id)
@classmethod
def get_by_shard_id(cls, shard_id):
"""Get shard state from datastore by shard_id.
Args:
shard_id: shard id as string.
Returns:
ShardState for given shard id or None if it's not found.
"""
return cls.get_by_key_name(shard_id)
@classmethod
def find_by_mapreduce_state(cls, mapreduce_state):
"""Find all shard states for given mapreduce.
Deprecated. Use find_all_by_mapreduce_state.
This will be removed after 1.8.9 release.
Args:
mapreduce_state: MapreduceState instance
Returns:
A list of ShardStates.
"""
return list(cls.find_all_by_mapreduce_state(mapreduce_state))
@classmethod
def find_all_by_mapreduce_state(cls, mapreduce_state):
"""Find all shard states for given mapreduce.
Args:
mapreduce_state: MapreduceState instance
Yields:
shard states sorted by shard id.
"""
keys = cls.calculate_keys_by_mapreduce_state(mapreduce_state)
i = 0
while i < len(keys):
@db.non_transactional
def no_tx_get(i):
return db.get(keys[i:i+cls._MAX_STATES_IN_MEMORY])
# We need a separate function to so that we can mix non-transactional and
# use be a generator
states = no_tx_get(i)
for s in states:
i += 1
if s is not None:
yield s
@classmethod
def calculate_keys_by_mapreduce_state(cls, mapreduce_state):
"""Calculate all shard states keys for given mapreduce.
Args:
mapreduce_state: MapreduceState instance
Returns:
A list of keys for shard states, sorted by shard id.
The corresponding shard states may not exist.
"""
if mapreduce_state is None:
return []
keys = []
for i in range(mapreduce_state.mapreduce_spec.mapper.shard_count):
shard_id = cls.shard_id_from_number(mapreduce_state.key().name(), i)
keys.append(cls.get_key_by_shard_id(shard_id))
return keys
@classmethod
def create_new(cls, mapreduce_id, shard_number):
"""Create new shard state.
Args:
mapreduce_id: unique mapreduce id as string.
shard_number: shard number for which to create shard state.
Returns:
new instance of ShardState ready to put into datastore.
"""
shard_id = cls.shard_id_from_number(mapreduce_id, shard_number)
state = cls(key_name=shard_id,
mapreduce_id=mapreduce_id)
return state
class MapreduceControl(db.Model):
"""Datastore entity used to control mapreduce job execution.
Only one command may be sent to jobs at a time.
Properties:
command: The command to send to the job.
"""
ABORT = "abort"
_COMMANDS = frozenset([ABORT])
_KEY_NAME = "command"
command = db.TextProperty(choices=_COMMANDS, required=True)
@classmethod
def kind(cls):
"""Returns entity kind."""
return "_AE_MR_MapreduceControl"
@classmethod
def get_key_by_job_id(cls, mapreduce_id):
"""Retrieves the Key for a mapreduce ID.
Args:
mapreduce_id: The job to fetch.
Returns:
Datastore Key for the command for the given job ID.
"""
return db.Key.from_path(cls.kind(), "%s:%s" % (mapreduce_id, cls._KEY_NAME))
@classmethod
def abort(cls, mapreduce_id, **kwargs):
"""Causes a job to abort.
Args:
mapreduce_id: The job to abort. Not verified as a valid job.
"""
cls(key_name="%s:%s" % (mapreduce_id, cls._KEY_NAME),
command=cls.ABORT).put(**kwargs)
class QuerySpec(object):
"""Encapsulates everything about a query needed by DatastoreInputReader."""
DEFAULT_BATCH_SIZE = 50
def __init__(self,
entity_kind,
keys_only=None,
filters=None,
batch_size=None,
model_class_path=None,
app=None,
ns=None):
self.entity_kind = entity_kind
self.keys_only = keys_only or False
self.filters = filters or None
self.batch_size = batch_size or self.DEFAULT_BATCH_SIZE
self.model_class_path = model_class_path
self.app = app
self.ns = ns
def to_json(self):
return {"entity_kind": self.entity_kind,
"keys_only": self.keys_only,
"filters": self.filters,
"batch_size": self.batch_size,
"model_class_path": self.model_class_path,
"app": self.app,
"ns": self.ns}
@classmethod
def from_json(cls, json):
return cls(json["entity_kind"],
json["keys_only"],
json["filters"],
json["batch_size"],
json["model_class_path"],
json["app"],
json["ns"])
|
apache-2.0
|
bl4ckdu5t/registron
|
old/e2etests/win32/NextID.py
|
9
|
2653
|
# Copyright (C) 2005, Giovanni Bajo
# Based on previous work under copyright (c) 1999, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#import pythoncom
pycomCLSCTX_INPROC = 3
pycomCLSCTX_LOCAL_SERVER = 4
import os
d = {}
class NextID:
_reg_clsid_ = '{25E06E61-2D18-11D5-945F-00609736B700}'
_reg_desc_ = 'Text COM server'
_reg_progid_ = 'MEInc.NextID'
_reg_clsctx_ = pycomCLSCTX_INPROC | pycomCLSCTX_LOCAL_SERVER
_public_methods_ = [
'getNextID'
]
def __init__(self):
import win32api
win32api.MessageBox(0, "NextID.__init__ started", "NextID.py")
global d
if sys.frozen:
for entry in sys.path:
if entry.find('?') > -1:
here = os.path.dirname(entry.split('?')[0])
break
else:
here = os.getcwd()
else:
here = os.path.dirname(__file__)
self.fnm = os.path.join(here, 'id.cfg')
try:
d = eval(open(self.fnm, 'rU').read()+'\n')
except:
d = {
'systemID': 0xaaaab,
'highID': 0
}
win32api.MessageBox(0, "NextID.__init__ complete", "NextID.py")
def getNextID(self):
global d
d['highID'] = d['highID'] + 1
open(self.fnm, 'w').write(repr(d))
return '%(systemID)-0.5x%(highID)-0.7x' % d
def RegisterNextID():
from win32com.server import register
register.UseCommandLine(NextID)
def UnRegisterNextID():
from win32com.server import register
register.UnregisterServer(NextID._reg_clsid_, NextID._reg_progid_)
if __name__ == '__main__':
import sys
if "/unreg" in sys.argv:
UnRegisterNextID()
elif "/register" in sys.argv:
RegisterNextID()
else:
print "running as server"
import win32com.server.localserver
win32com.server.localserver.main()
raw_input("Press any key...")
|
mit
|
RossBrunton/django
|
django/core/mail/__init__.py
|
347
|
4697
|
"""
Tools for sending email.
"""
from __future__ import unicode_literals
from django.conf import settings
# Imported for backwards compatibility and for the sake
# of a cleaner namespace. These symbols used to be in
# django/core/mail.py before the introduction of email
# backends and the subsequent reorganization (See #10355)
from django.core.mail.message import (
DEFAULT_ATTACHMENT_MIME_TYPE, BadHeaderError, EmailMessage,
EmailMultiAlternatives, SafeMIMEMultipart, SafeMIMEText,
forbid_multi_line_headers, make_msgid,
)
from django.core.mail.utils import DNS_NAME, CachedDnsName
from django.utils.module_loading import import_string
__all__ = [
'CachedDnsName', 'DNS_NAME', 'EmailMessage', 'EmailMultiAlternatives',
'SafeMIMEText', 'SafeMIMEMultipart', 'DEFAULT_ATTACHMENT_MIME_TYPE',
'make_msgid', 'BadHeaderError', 'forbid_multi_line_headers',
'get_connection', 'send_mail', 'send_mass_mail', 'mail_admins',
'mail_managers',
]
def get_connection(backend=None, fail_silently=False, **kwds):
"""Load an email backend and return an instance of it.
If backend is None (default) settings.EMAIL_BACKEND is used.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
klass = import_string(backend or settings.EMAIL_BACKEND)
return klass(fail_silently=fail_silently, **kwds)
def send_mail(subject, message, from_email, recipient_list,
fail_silently=False, auth_user=None, auth_password=None,
connection=None, html_message=None):
"""
Easy wrapper for sending a single message to a recipient list. All members
of the recipient list will see the other recipients in the 'To' field.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
mail = EmailMultiAlternatives(subject, message, from_email, recipient_list,
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
return mail.send()
def send_mass_mail(datatuple, fail_silently=False, auth_user=None,
auth_password=None, connection=None):
"""
Given a datatuple of (subject, message, from_email, recipient_list), sends
each message to each recipient list. Returns the number of emails sent.
If from_email is None, the DEFAULT_FROM_EMAIL setting is used.
If auth_user and auth_password are set, they're used to log in.
If auth_user is None, the EMAIL_HOST_USER setting is used.
If auth_password is None, the EMAIL_HOST_PASSWORD setting is used.
Note: The API for this method is frozen. New code wanting to extend the
functionality should use the EmailMessage class directly.
"""
connection = connection or get_connection(username=auth_user,
password=auth_password,
fail_silently=fail_silently)
messages = [EmailMessage(subject, message, sender, recipient,
connection=connection)
for subject, message, sender, recipient in datatuple]
return connection.send_messages(messages)
def mail_admins(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the admins, as defined by the ADMINS setting."""
if not settings.ADMINS:
return
mail = EmailMultiAlternatives('%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.ADMINS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
def mail_managers(subject, message, fail_silently=False, connection=None,
html_message=None):
"""Sends a message to the managers, as defined by the MANAGERS setting."""
if not settings.MANAGERS:
return
mail = EmailMultiAlternatives('%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message, settings.SERVER_EMAIL, [a[1] for a in settings.MANAGERS],
connection=connection)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
|
bsd-3-clause
|
pombreda/ruffus
|
ruffus/test/test_verbosity.py
|
1
|
8627
|
#!/usr/bin/env python
from __future__ import print_function
"""
test_verbosity.py
"""
temp_dir = "test_verbosity/"
import unittest
import os
import sys
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# funky code to import by file name
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
ruffus_name = os.path.basename(parent_dir)
ruffus = list(map(__import__, [ruffus_name]))[0]
import shutil
try:
from StringIO import StringIO
except:
from io import StringIO
import re
ruffus = __import__ (ruffus_name)
for attr in "pipeline_run", "pipeline_printout", "suffix", "transform", "split", "merge", "dbdict", "follows", "mkdir", "originate", "Pipeline":
globals()[attr] = getattr (ruffus, attr)
RethrownJobError = ruffus.ruffus_exceptions.RethrownJobError
RUFFUS_HISTORY_FILE = ruffus.ruffus_utility.RUFFUS_HISTORY_FILE
CHECKSUM_FILE_TIMESTAMPS = ruffus.ruffus_utility.CHECKSUM_FILE_TIMESTAMPS
#---------------------------------------------------------------
# create initial files
#
@mkdir(temp_dir + 'data/scratch/lg/what/one/two/three/')
@originate([ [temp_dir + 'data/scratch/lg/what/one/two/three/job1.a.start', temp_dir + 'job1.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job2.a.start', temp_dir + 'job2.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job3.a.start', temp_dir + 'job3.b.start'] ])
def create_initial_file_pairs(output_files):
# create both files as necessary
for output_file in output_files:
with open(output_file, "w") as oo: pass
#---------------------------------------------------------------
# first task
@transform(create_initial_file_pairs, suffix(".start"), ".output.1")
def first_task(input_files, output_file):
with open(output_file, "w"): pass
#---------------------------------------------------------------
# second task
@transform(first_task, suffix(".output.1"), ".output.2")
def second_task(input_files, output_file):
with open(output_file, "w"): pass
test_pipeline = Pipeline("test")
test_pipeline.originate(output = [ [temp_dir + 'data/scratch/lg/what/one/two/three/job1.a.start', temp_dir + 'job1.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job2.a.start', temp_dir + 'job2.b.start'],
[temp_dir + 'data/scratch/lg/what/one/two/three/job3.a.start', temp_dir + 'job3.b.start'] ],
task_func = create_initial_file_pairs)
test_pipeline.transform(task_func = first_task, input = create_initial_file_pairs, filter = suffix(".start"), output = ".output.1")
test_pipeline.transform(input = first_task, filter = suffix(".output.1"), output = ".output.2", task_func= second_task)
decorator_syntax = 0
oop_syntax = 1
class Test_verbosity(unittest.TestCase):
#___________________________________________________________________________
#
# test_printout_abbreviated_path1
#___________________________________________________________________________
def test_printout_abbreviated_path1(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 1)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 1, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue(re.search('Job needs update:.*Missing files.*'
'\[\.\.\./job2\.a\.start, test_verbosity/job2\.b\.start, \.\.\./job2.a.output.1\]', ret, re.DOTALL) is not None)
#___________________________________________________________________________
#
# test_printout_abbreviated_path2
#___________________________________________________________________________
def test_printout_abbreviated_path2(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 2, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 2, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[.../three/job1.a.start, test_verbosity/job1.b.start, .../three/job1.a.output.1]' in ret)
#___________________________________________________________________________
#
# test_printout_abbreviated_path3
#___________________________________________________________________________
def test_printout_abbreviated_path3(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 3, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 3, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[.../two/three/job1.a.start, test_verbosity/job1.b.start, .../two/three/job1.a.output.1]' in s.getvalue())
#___________________________________________________________________________
#
# test_printout_abbreviated_path9
#___________________________________________________________________________
def test_printout_abbreviated_path9(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 9, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 9, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[%sdata/scratch/lg/what/one/two/three/job2.a.start, test_verbosity/job2.b.start,' % temp_dir in ret)
#___________________________________________________________________________
#
# test_printout_abbreviated_path0
#___________________________________________________________________________
def test_printout_abbreviated_path0(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 0, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = 0, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
path_str = os.path.abspath('%sdata/scratch/lg/what/one/two/three/job2.a.start' % temp_dir)
path_str = '[[%s' % path_str
self.assertTrue(path_str in ret)
self.assertTrue(temp_dir + 'job2.b.start]' in ret)
#___________________________________________________________________________
#
# test_printout_abbreviated_path_minus_60
#___________________________________________________________________________
def test_printout_abbreviated_path_minus_60(self):
"""Input file exists, output doesn't exist"""
for syntax in decorator_syntax, oop_syntax:
s = StringIO()
if syntax == oop_syntax:
test_pipeline.printout(s, [second_task], verbose = 5, verbose_abbreviated_path = -60, wrap_width = 500)
else:
pipeline_printout(s, [second_task], verbose = 5, verbose_abbreviated_path = -60, wrap_width = 500, pipeline= "main")
ret = s.getvalue()
self.assertTrue('[<???> ne/two/three/job2.a.start, test_verbosity/job2.b.start]' in ret)
#
# Necessary to protect the "entry point" of the program under windows.
# see: http://docs.python.org/library/multiprocessing.html#multiprocessing-programming
#
if __name__ == '__main__':
unittest.main()
|
mit
|
abalkin/numpy
|
numpy/polynomial/laguerre.py
|
3
|
49890
|
"""
==================================================
Laguerre Series (:mod:`numpy.polynomial.laguerre`)
==================================================
This module provides a number of objects (mostly functions) useful for
dealing with Laguerre series, including a `Laguerre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Classes
-------
.. autosummary::
:toctree: generated/
Laguerre
Constants
---------
.. autosummary::
:toctree: generated/
lagdomain
lagzero
lagone
lagx
Arithmetic
----------
.. autosummary::
:toctree: generated/
lagadd
lagsub
lagmulx
lagmul
lagdiv
lagpow
lagval
lagval2d
lagval3d
laggrid2d
laggrid3d
Calculus
--------
.. autosummary::
:toctree: generated/
lagder
lagint
Misc Functions
--------------
.. autosummary::
:toctree: generated/
lagfromroots
lagroots
lagvander
lagvander2d
lagvander3d
laggauss
lagweight
lagcompanion
lagfit
lagtrim
lagline
lag2poly
poly2lag
See also
--------
`numpy.polynomial`
"""
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd',
'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder',
'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander',
'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d',
'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion',
'laggauss', 'lagweight']
lagtrim = pu.trimcoef
def poly2lag(pol):
"""
poly2lag(pol)
Convert a polynomial to a Laguerre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Laguerre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Laguerre
series.
See Also
--------
lag2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import poly2lag
>>> poly2lag(np.arange(4))
array([ 23., -63., 58., -18.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = lagadd(lagmulx(res), pol[i])
return res
def lag2poly(c):
"""
Convert a Laguerre series to a polynomial.
Convert an array representing the coefficients of a Laguerre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Laguerre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2lag
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import lag2poly
>>> lag2poly([ 23., -63., 58., -18.])
array([0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i)
return polyadd(c0, polysub(c1, polymulx(c1)))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Laguerre
lagdomain = np.array([0, 1])
# Laguerre coefficients representing zero.
lagzero = np.array([0])
# Laguerre coefficients representing one.
lagone = np.array([1])
# Laguerre coefficients representing the identity x.
lagx = np.array([1, -1])
def lagline(off, scl):
"""
Laguerre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Laguerre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.laguerre import lagline, lagval
>>> lagval(0,lagline(3, 2))
3.0
>>> lagval(1,lagline(3, 2))
5.0
"""
if scl != 0:
return np.array([off + scl, -scl])
else:
return np.array([off])
def lagfromroots(roots):
"""
Generate a Laguerre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Laguerre form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Laguerre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, chebfromroots, hermfromroots, hermefromroots
Examples
--------
>>> from numpy.polynomial.laguerre import lagfromroots, lagval
>>> coef = lagfromroots((-1, 0, 1))
>>> lagval((-1, 0, 1), coef)
array([0., 0., 0.])
>>> coef = lagfromroots((-1j, 1j))
>>> lagval((-1j, 1j), coef)
array([0.+0.j, 0.+0.j])
"""
return pu._fromroots(lagline, lagmul, roots)
def lagadd(c1, c2):
"""
Add one Laguerre series to another.
Returns the sum of two Laguerre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Laguerre series of their sum.
See Also
--------
lagsub, lagmulx, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Laguerre series
is a Laguerre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.laguerre import lagadd
>>> lagadd([1, 2, 3], [1, 2, 3, 4])
array([2., 4., 6., 4.])
"""
return pu._add(c1, c2)
def lagsub(c1, c2):
"""
Subtract one Laguerre series from another.
Returns the difference of two Laguerre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their difference.
See Also
--------
lagadd, lagmulx, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Laguerre
series is a Laguerre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.laguerre import lagsub
>>> lagsub([1, 2, 3, 4], [1, 2, 3])
array([0., 0., 0., 4.])
"""
return pu._sub(c1, c2)
def lagmulx(c):
"""Multiply a Laguerre series by x.
Multiply the Laguerre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
See Also
--------
lagadd, lagsub, lagmul, lagdiv, lagpow
Notes
-----
The multiplication uses the recursion relationship for Laguerre
polynomials in the form
.. math::
xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.laguerre import lagmulx
>>> lagmulx([1, 2, 3])
array([-1., -1., 11., -9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]
prd[1] = -c[0]
for i in range(1, len(c)):
prd[i + 1] = -c[i]*(i + 1)
prd[i] += c[i]*(2*i + 1)
prd[i - 1] -= c[i]*i
return prd
def lagmul(c1, c2):
"""
Multiply one Laguerre series by another.
Returns the product of two Laguerre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their product.
See Also
--------
lagadd, lagsub, lagmulx, lagdiv, lagpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Laguerre polynomial basis set. Thus, to express
the product as a Laguerre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagmul
>>> lagmul([1, 2, 3], [0, 1, 2])
array([ 8., -13., 38., -51., 36.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd)
return lagadd(c0, lagsub(c1, lagmulx(c1)))
def lagdiv(c1, c2):
"""
Divide one Laguerre series by another.
Returns the quotient-with-remainder of two Laguerre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Laguerre series coefficients representing the quotient and
remainder.
See Also
--------
lagadd, lagsub, lagmulx, lagmul, lagpow
Notes
-----
In general, the (polynomial) division of one Laguerre series by another
results in quotient and remainder terms that are not in the Laguerre
polynomial basis set. Thus, to express these results as a Laguerre
series, it is necessary to "reproject" the results onto the Laguerre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagdiv
>>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2])
(array([1., 2., 3.]), array([0.]))
>>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2])
(array([1., 2., 3.]), array([1., 1.]))
"""
return pu._div(lagmul, c1, c2)
def lagpow(c, pow, maxpower=16):
"""Raise a Laguerre series to a power.
Returns the Laguerre series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Laguerre series of power.
See Also
--------
lagadd, lagsub, lagmulx, lagmul, lagdiv
Examples
--------
>>> from numpy.polynomial.laguerre import lagpow
>>> lagpow([1, 2, 3], 2)
array([ 14., -16., 56., -72., 54.])
"""
return pu._pow(lagmul, c, pow, maxpower)
def lagder(c, m=1, scl=1, axis=0):
"""
Differentiate a Laguerre series.
Returns the Laguerre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Laguerre series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Laguerre series of the derivative.
See Also
--------
lagint
Notes
-----
In general, the result of differentiating a Laguerre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagder
>>> lagder([ 1., 1., 1., -3.])
array([1., 2., 3.])
>>> lagder([ 1., 0., 0., -4., 3.], m=2)
array([1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt = pu._deprecate_as_int(m, "the order of derivation")
iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 1, -1):
der[j - 1] = -c[j]
c[j - 1] += c[j]
der[0] = -c[1]
c = der
c = np.moveaxis(c, 0, iaxis)
return c
def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Laguerre series.
Returns the Laguerre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Laguerre series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Laguerre series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
``np.ndim(scl) != 0``.
See Also
--------
lagder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagint
>>> lagint([1,2,3])
array([ 1., 1., 1., -3.])
>>> lagint([1,2,3], m=2)
array([ 1., 0., 0., -4., 3.])
>>> lagint([1,2,3], k=1)
array([ 2., 1., 1., -3.])
>>> lagint([1,2,3], lbnd=-1)
array([11.5, 1. , 1. , -3. ])
>>> lagint([1,2], m=2, k=[1,2], lbnd=-1)
array([ 11.16666667, -5. , -3. , 2. ]) # may vary
"""
c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt = pu._deprecate_as_int(m, "the order of integration")
iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if np.ndim(lbnd) != 0:
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]
tmp[1] = -c[0]
for j in range(1, n):
tmp[j] += c[j]
tmp[j + 1] = -c[j]
tmp[0] += k[i] - lagval(lbnd, tmp)
c = tmp
c = np.moveaxis(c, 0, iaxis)
return c
def lagval(x, c, tensor=True):
"""
Evaluate a Laguerre series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
lagval2d, laggrid2d, lagval3d, laggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.laguerre import lagval
>>> coef = [1,2,3]
>>> lagval(1, coef)
-0.5
>>> lagval([[1,2],[3,4]], coef)
array([[-0.5, -4. ],
[-4.5, -2. ]])
"""
c = np.array(c, ndmin=1, copy=False)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*((2*nd - 1) - x))/nd
return c0 + c1*(1 - x)
def lagval2d(x, y, c):
"""
Evaluate a 2-D Laguerre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
lagval, laggrid2d, lagval3d, laggrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._valnd(lagval, c, x, y)
def laggrid2d(x, y, c):
"""
Evaluate a 2-D Laguerre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
lagval, lagval2d, lagval3d, laggrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._gridnd(lagval, c, x, y)
def lagval3d(x, y, z, c):
"""
Evaluate a 3-D Laguerre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimension polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
lagval, lagval2d, laggrid2d, laggrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._valnd(lagval, c, x, y, z)
def laggrid3d(x, y, z, c):
"""
Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
lagval, lagval2d, laggrid2d, lagval3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._gridnd(lagval, c, x, y, z)
def lagvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Laguerre polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and
``lagval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Laguerre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Laguerre polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.laguerre import lagvander
>>> x = np.array([0, 1, 2])
>>> lagvander(x, 3)
array([[ 1. , 1. , 1. , 1. ],
[ 1. , 0. , -0.5 , -0.66666667],
[ 1. , -1. , -1. , -0.33333333]])
"""
ideg = pu._deprecate_as_int(deg, "deg")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
v[1] = 1 - x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i
return np.moveaxis(v, 0, -1)
def lagvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Laguerre polynomials.
If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Laguerre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
lagvander, lagvander3d, lagval2d, lagval3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._vander_nd_flat((lagvander, lagvander), (x, y), deg)
def lagvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Laguerre polynomials.
If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Laguerre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
lagvander, lagvander3d, lagval2d, lagval3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._vander_nd_flat((lagvander, lagvander, lagvander), (x, y, z), deg)
def lagfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Laguerre series to data.
Return the coefficients of a Laguerre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Laguerre coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
chebfit, legfit, polyfit, hermfit, hermefit
lagval : Evaluates a Laguerre series.
lagvander : pseudo Vandermonde matrix of Laguerre series.
lagweight : Laguerre weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Laguerre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Laguerre series are probably most useful when the data can
be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Laguerre
weight. In that case the weight ``sqrt(w(x[i]))`` should be used
together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is
available as `lagweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
https://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.laguerre import lagfit, lagval
>>> x = np.linspace(0, 10)
>>> err = np.random.randn(len(x))/10
>>> y = lagval(x, [1, 2, 3]) + err
>>> lagfit(x, y, 2)
array([ 0.96971004, 2.00193749, 3.00288744]) # may vary
"""
return pu._fit(lagvander, x, y, deg, rcond, full, w)
def lagcompanion(c):
"""
Return the companion matrix of c.
The usual companion matrix of the Laguerre polynomials is already
symmetric when `c` is a basis Laguerre polynomial, so no scaling is
applied.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded:: 1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[1 + c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
top = mat.reshape(-1)[1::n+1]
mid = mat.reshape(-1)[0::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = -np.arange(1, n)
mid[...] = 2.*np.arange(n) + 1.
bot[...] = top
mat[:, -1] += (c[:-1]/c[-1])*n
return mat
def lagroots(c):
"""
Compute the roots of a Laguerre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, chebroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Laguerre series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.laguerre import lagroots, lagfromroots
>>> coef = lagfromroots([0, 1, 2])
>>> coef
array([ 2., -8., 12., -6.])
>>> lagroots(coef)
array([-4.4408921e-16, 1.0000000e+00, 2.0000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([1 + c[0]/c[1]])
# rotated companion matrix reduces error
m = lagcompanion(c)[::-1,::-1]
r = la.eigvals(m)
r.sort()
return r
def laggauss(deg):
"""
Gauss-Laguerre quadrature.
Computes the sample points and weights for Gauss-Laguerre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[0, \\inf]`
with the weight function :math:`f(x) = \\exp(-x)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded:: 1.7.0
The results have only been tested up to degree 100 higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = pu._deprecate_as_int(deg, "deg")
if ideg <= 0:
raise ValueError("deg must be a positive integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = lagcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = lagval(x, c)
df = lagval(x, lagder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = lagval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# scale w to get the right value, 1 in this case
w /= w.sum()
return x, w
def lagweight(x):
"""Weight function of the Laguerre polynomials.
The weight function is :math:`exp(-x)` and the interval of integration
is :math:`[0, \\inf]`. The Laguerre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded:: 1.7.0
"""
w = np.exp(-x)
return w
#
# Laguerre series class
#
class Laguerre(ABCPolyBase):
"""A Laguerre series class.
The Laguerre class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Laguerre coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [0, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [0, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(lagadd)
_sub = staticmethod(lagsub)
_mul = staticmethod(lagmul)
_div = staticmethod(lagdiv)
_pow = staticmethod(lagpow)
_val = staticmethod(lagval)
_int = staticmethod(lagint)
_der = staticmethod(lagder)
_fit = staticmethod(lagfit)
_line = staticmethod(lagline)
_roots = staticmethod(lagroots)
_fromroots = staticmethod(lagfromroots)
# Virtual properties
nickname = 'lag'
domain = np.array(lagdomain)
window = np.array(lagdomain)
basis_name = 'L'
|
bsd-3-clause
|
ageron/tensorflow
|
tensorflow/contrib/proto/python/kernel_tests/descriptor_source_test_base.py
|
21
|
6788
|
# =============================================================================
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for proto ops reading descriptors from other sources."""
# Python3 preparedness imports.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from google.protobuf.descriptor_pb2 import FieldDescriptorProto
from google.protobuf.descriptor_pb2 import FileDescriptorSet
from tensorflow.contrib.proto.python.kernel_tests import proto_op_test_base as test_base
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class DescriptorSourceTestBase(test.TestCase):
"""Base class for testing descriptor sources."""
def __init__(self, decode_module, encode_module, methodName='runTest'): # pylint: disable=invalid-name
"""DescriptorSourceTestBase initializer.
Args:
decode_module: a module containing the `decode_proto_op` method
encode_module: a module containing the `encode_proto_op` method
methodName: the name of the test method (same as for test.TestCase)
"""
super(DescriptorSourceTestBase, self).__init__(methodName)
self._decode_module = decode_module
self._encode_module = encode_module
# NOTE: We generate the descriptor programmatically instead of via a compiler
# because of differences between different versions of the compiler.
#
# The generated descriptor should capture the subset of `test_example.proto`
# used in `test_base.simple_test_case()`.
def _createDescriptorFile(self):
set_proto = FileDescriptorSet()
file_proto = set_proto.file.add(
name='types.proto',
package='tensorflow',
syntax='proto3')
enum_proto = file_proto.enum_type.add(name='DataType')
enum_proto.value.add(name='DT_DOUBLE', number=0)
enum_proto.value.add(name='DT_BOOL', number=1)
file_proto = set_proto.file.add(
name='test_example.proto',
package='tensorflow.contrib.proto',
dependency=['types.proto'])
message_proto = file_proto.message_type.add(name='TestCase')
message_proto.field.add(
name='values',
number=1,
type=FieldDescriptorProto.TYPE_MESSAGE,
type_name='.tensorflow.contrib.proto.TestValue',
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto.field.add(
name='shapes',
number=2,
type=FieldDescriptorProto.TYPE_INT32,
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto.field.add(
name='sizes',
number=3,
type=FieldDescriptorProto.TYPE_INT32,
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto.field.add(
name='fields',
number=4,
type=FieldDescriptorProto.TYPE_MESSAGE,
type_name='.tensorflow.contrib.proto.FieldSpec',
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto = file_proto.message_type.add(
name='TestValue')
message_proto.field.add(
name='double_value',
number=1,
type=FieldDescriptorProto.TYPE_DOUBLE,
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto.field.add(
name='bool_value',
number=2,
type=FieldDescriptorProto.TYPE_BOOL,
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto = file_proto.message_type.add(
name='FieldSpec')
message_proto.field.add(
name='name',
number=1,
type=FieldDescriptorProto.TYPE_STRING,
label=FieldDescriptorProto.LABEL_OPTIONAL)
message_proto.field.add(
name='dtype',
number=2,
type=FieldDescriptorProto.TYPE_ENUM,
type_name='.tensorflow.DataType',
label=FieldDescriptorProto.LABEL_OPTIONAL)
message_proto.field.add(
name='value',
number=3,
type=FieldDescriptorProto.TYPE_MESSAGE,
type_name='.tensorflow.contrib.proto.TestValue',
label=FieldDescriptorProto.LABEL_OPTIONAL)
fn = os.path.join(self.get_temp_dir(), 'descriptor.pb')
with open(fn, 'wb') as f:
f.write(set_proto.SerializeToString())
return fn
def _testRoundtrip(self, descriptor_source):
# Numpy silently truncates the strings if you don't specify dtype=object.
in_bufs = np.array(
[test_base.ProtoOpTestBase.simple_test_case().SerializeToString()],
dtype=object)
message_type = 'tensorflow.contrib.proto.TestCase'
field_names = ['values', 'shapes', 'sizes', 'fields']
tensor_types = [dtypes.string, dtypes.int32, dtypes.int32, dtypes.string]
with self.cached_session() as sess:
sizes, field_tensors = self._decode_module.decode_proto(
in_bufs,
message_type=message_type,
field_names=field_names,
output_types=tensor_types,
descriptor_source=descriptor_source)
out_tensors = self._encode_module.encode_proto(
sizes,
field_tensors,
message_type=message_type,
field_names=field_names,
descriptor_source=descriptor_source)
out_bufs, = sess.run([out_tensors])
# Check that the re-encoded tensor has the same shape.
self.assertEqual(in_bufs.shape, out_bufs.shape)
# Compare the input and output.
for in_buf, out_buf in zip(in_bufs.flat, out_bufs.flat):
# Check that the input and output serialized messages are identical.
# If we fail here, there is a difference in the serialized
# representation but the new serialization still parses. This could
# be harmless (a change in map ordering?) or it could be bad (e.g.
# loss of packing in the encoding).
self.assertEqual(in_buf, out_buf)
def testWithFileDescriptorSet(self):
# First try parsing with a local proto db, which should fail.
with self.assertRaisesOpError('No descriptor found for message type'):
self._testRoundtrip('local://')
# Now try parsing with a FileDescriptorSet which contains the test proto.
descriptor_file = self._createDescriptorFile()
self._testRoundtrip(descriptor_file)
|
apache-2.0
|
gyang/nova
|
nova/rpc/impl_qpid.py
|
5
|
19137
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import time
import uuid
import json
import eventlet
import greenlet
import qpid.messaging
import qpid.messaging.exceptions
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova.rpc import amqp as rpc_amqp
from nova.rpc import common as rpc_common
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname'),
cfg.StrOpt('qpid_port',
default='5672',
help='Qpid broker port'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
cfg.StrOpt('qpid_password',
default='',
help='Password for qpid connection'),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
cfg.BoolOpt('qpid_reconnect',
default=True,
help='Automatically reconnect'),
cfg.IntOpt('qpid_reconnect_timeout',
default=0,
help='Reconnection timeout in seconds'),
cfg.IntOpt('qpid_reconnect_limit',
default=0,
help='Max reconnections before giving up'),
cfg.IntOpt('qpid_reconnect_interval_min',
default=0,
help='Minimum seconds between reconnection attempts'),
cfg.IntOpt('qpid_reconnect_interval_max',
default=0,
help='Maximum seconds between reconnection attempts'),
cfg.IntOpt('qpid_reconnect_interval',
default=0,
help='Equivalent to setting max and min to the same value'),
cfg.IntOpt('qpid_heartbeat',
default=5,
help='Seconds between connection keepalive heartbeats'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'"),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Disable Nagle algorithm'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(qpid_opts)
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, session, callback, node_name, node_opts,
link_name, link_opts):
"""Declare a queue on an amqp session.
'session' is the amqp session to use
'callback' is the callback to call when messages are received
'node_name' is the first part of the Qpid address string, before ';'
'node_opts' will be applied to the "x-declare" section of "node"
in the address string.
'link_name' goes into the "name" field of the "link" in the address
string
'link_opts' will be applied to the "x-declare" section of "link"
in the address string.
"""
self.callback = callback
self.receiver = None
self.session = None
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"name": link_name,
"durable": True,
"x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
},
}
addr_opts["node"]["x-declare"].update(node_opts)
addr_opts["link"]["x-declare"].update(link_opts)
self.address = "%s ; %s" % (node_name, json.dumps(addr_opts))
self.reconnect(session)
def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect"""
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = 1
def consume(self):
"""Fetch the message and pass it to the callback object"""
message = self.receiver.fetch()
self.callback(message.content)
def get_receiver(self):
return self.receiver
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'"""
def __init__(self, session, msg_id, callback):
"""Init a 'direct' queue.
'session' is the amqp session to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
"""
super(DirectConsumer, self).__init__(session, callback,
"%s/%s" % (msg_id, msg_id),
{"type": "direct"},
msg_id,
{"exclusive": True})
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'"""
def __init__(self, session, topic, callback):
"""Init a 'topic' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
super(TopicConsumer, self).__init__(session, callback,
"%s/%s" % (FLAGS.control_exchange, topic), {},
topic, {})
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'"""
def __init__(self, session, topic, callback):
"""Init a 'fanout' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
super(FanoutConsumer, self).__init__(session, callback,
"%s_fanout" % topic,
{"durable": False, "type": "fanout"},
"%s_fanout_%s" % (topic, uuid.uuid4().hex),
{"exclusive": True})
class Publisher(object):
"""Base Publisher class"""
def __init__(self, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.sender = None
self.session = session
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
if node_opts:
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, json.dumps(addr_opts))
self.reconnect(session)
def reconnect(self, session):
"""Re-establish the Sender after a reconnection"""
self.sender = session.sender(self.address)
def send(self, msg):
"""Send a message"""
self.sender.send(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'"""
def __init__(self, session, msg_id):
"""Init a 'direct' publisher."""
super(DirectPublisher, self).__init__(session, msg_id,
{"type": "Direct"})
class TopicPublisher(Publisher):
"""Publisher class for 'topic'"""
def __init__(self, session, topic):
"""init a 'topic' publisher.
"""
super(TopicPublisher, self).__init__(session,
"%s/%s" % (FLAGS.control_exchange, topic))
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'"""
def __init__(self, session, topic):
"""init a 'fanout' publisher.
"""
super(FanoutPublisher, self).__init__(session,
"%s_fanout" % topic, {"type": "fanout"})
class NotifyPublisher(Publisher):
"""Publisher class for notifications"""
def __init__(self, session, topic):
"""init a 'topic' publisher.
"""
super(NotifyPublisher, self).__init__(session,
"%s/%s" % (FLAGS.control_exchange, topic),
{"durable": True})
class Connection(object):
"""Connection object."""
def __init__(self, server_params=None):
self.session = None
self.consumers = {}
self.consumer_thread = None
if server_params is None:
server_params = {}
default_params = dict(hostname=FLAGS.qpid_hostname,
port=FLAGS.qpid_port,
username=FLAGS.qpid_username,
password=FLAGS.qpid_password)
params = server_params
for key in default_params.keys():
params.setdefault(key, default_params[key])
self.broker = params['hostname'] + ":" + str(params['port'])
# Create the connection - this does not open the connection
self.connection = qpid.messaging.Connection(self.broker)
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = params['username']
self.connection.password = params['password']
self.connection.sasl_mechanisms = FLAGS.qpid_sasl_mechanisms
self.connection.reconnect = FLAGS.qpid_reconnect
if FLAGS.qpid_reconnect_timeout:
self.connection.reconnect_timeout = FLAGS.qpid_reconnect_timeout
if FLAGS.qpid_reconnect_limit:
self.connection.reconnect_limit = FLAGS.qpid_reconnect_limit
if FLAGS.qpid_reconnect_interval_max:
self.connection.reconnect_interval_max = (
FLAGS.qpid_reconnect_interval_max)
if FLAGS.qpid_reconnect_interval_min:
self.connection.reconnect_interval_min = (
FLAGS.qpid_reconnect_interval_min)
if FLAGS.qpid_reconnect_interval:
self.connection.reconnect_interval = FLAGS.qpid_reconnect_interval
self.connection.hearbeat = FLAGS.qpid_heartbeat
self.connection.protocol = FLAGS.qpid_protocol
self.connection.tcp_nodelay = FLAGS.qpid_tcp_nodelay
# Open is part of reconnect -
# NOTE(WGH) not sure we need this with the reconnect flags
self.reconnect()
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
def _lookup_consumer(self, receiver):
return self.consumers[str(receiver)]
def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues"""
if self.connection.opened():
try:
self.connection.close()
except qpid.messaging.exceptions.ConnectionError:
pass
while True:
try:
self.connection.open()
except qpid.messaging.exceptions.ConnectionError, e:
LOG.error(_('Unable to connect to AMQP server: %s ') % e)
time.sleep(FLAGS.qpid_reconnect_interval or 1)
else:
break
LOG.info(_('Connected to AMQP server on %s') % self.broker)
self.session = self.connection.session()
for consumer in self.consumers.itervalues():
consumer.reconnect(self.session)
if self.consumers:
LOG.debug(_("Re-established AMQP queues"))
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (qpid.messaging.exceptions.Empty,
qpid.messaging.exceptions.ConnectionError), e:
if error_callback:
error_callback(e)
self.reconnect()
def close(self):
"""Close/release this connection"""
self.cancel_consumer_thread()
self.connection.close()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again"""
self.cancel_consumer_thread()
self.session.close()
self.session = self.connection.session()
self.consumers = {}
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.session, topic, callback)
self._register_consumer(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers"""
def _error_callback(exc):
if isinstance(exc, qpid.messaging.exceptions.Empty):
LOG.exception(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout)
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread"""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publisher_send():
publisher = cls(self.session, topic)
publisher.send(msg)
return self.ensure(_connect_error, _publisher_send)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None):
"""Create a 'topic' consumer."""
self.declare_consumer(TopicConsumer, topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer"""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message"""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg):
"""Send a 'topic' message"""
self.publisher_send(TopicPublisher, topic, msg)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message"""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic"""
self.publisher_send(NotifyPublisher, topic, msg)
def consume(self, limit=None):
"""Consume from all queues/consumers"""
it = self.iterconsume(limit=limit)
while True:
try:
it.next()
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread"""
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object"""
if fanout:
consumer = FanoutConsumer(self.session, topic,
rpc_amqp.ProxyCallback(proxy, Connection.pool))
else:
consumer = TopicConsumer(self.session, topic,
rpc_amqp.ProxyCallback(proxy, Connection.pool))
self._register_consumer(consumer)
return consumer
Connection.pool = rpc_amqp.Pool(connection_cls=Connection)
def create_connection(new=True):
"""Create a connection"""
return rpc_amqp.create_connection(new, Connection.pool)
def multicall(context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(context, topic, msg, timeout, Connection.pool)
def call(context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(context, topic, msg, timeout, Connection.pool)
def cast(context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(context, topic, msg, Connection.pool)
def fanout_cast(context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(context, topic, msg, Connection.pool)
def cast_to_server(context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(context, server_params, topic, msg,
Connection.pool)
def fanout_cast_to_server(context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(context, server_params, topic,
msg, Connection.pool)
def notify(context, topic, msg):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(context, topic, msg, Connection.pool)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
|
apache-2.0
|
ewilazarus/snnm
|
snnm.py
|
1
|
2886
|
#!/usr/bin/python
"""
snnm
~~~~
This module contains the source code for `snnm`
Snnm is an utility tool created to fetch synonyms for a given expression from
the web and print them to the console.
"""
import bs4
import click
import requests
BASE_URL = 'http://www.thesaurus.com/browse/'
def _fetch_html(expression):
"""
Returns the HTML containing the synonyms for the given expression
"""
response = requests.get(BASE_URL + expression)
response.raise_for_status()
return response.text
def _parse_html(html):
"""
Returns a parsed list of synonyms out of a given HTML
"""
parser = bs4.BeautifulSoup(html, 'html.parser')
synonyms = []
divs = parser.find_all('div', class_='relevancy-list')
for div in divs:
spans = div.find_all('span', class_='text')
synonyms += [str(span.string) for span in spans]
return synonyms
def fetch_synonyms(expression):
"""
Returns a list of synonyms for a given expression
"""
try:
return _parse_html(_fetch_html(expression))
except requests.exceptions.HTTPError:
return []
def clean(synonyms):
"""
Returns the deduped, sorted list of synonyms
"""
deduped_synonyms = list(set([s.strip() for s in synonyms]))
deduped_synonyms.sort()
return deduped_synonyms
def print_synonyms_ugly(synonyms):
"""
Prints the list of synonyms to the screen
"""
for synonym in synonyms:
print(synonym)
def print_synonyms(synonyms):
"""
Prints the list of synonyms to the screen, using colors and breakpoints
"""
if not synonyms:
click.secho('-- NO RESULTS --', fg='red')
click.echo()
else:
height = click.get_terminal_size()[1] - 3
batch = [synonyms[i:i+height] for i in range(0, len(synonyms), height)]
for synonyms in batch:
for synonym in synonyms:
click.secho(synonym, fg='yellow')
click.echo()
if batch.index(synonyms) != len(batch) - 1:
click.echo('Press any key to continue ...', nl=False)
key = click.getchar()
if key == '\x03':
raise KeyboardInterrupt()
click.echo()
@click.command(name='snnm')
@click.argument('expression')
@click.option('-u', '--ugly-output', is_flag=True)
def main(expression, ugly_output):
"""
List synonyms for an expression
"""
try:
if not ugly_output:
click.echo('Synonyms for {}:'.format(click.style(expression,
fg='blue')))
synonyms = clean(fetch_synonyms(expression))
if ugly_output:
print_synonyms_ugly(synonyms)
else:
print_synonyms(synonyms)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
mit
|
guillaume-philippon/aquilon
|
tests/broker/test_del_virtual_switch.py
|
1
|
2507
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the del network device command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDelVirtualSwitch(TestBrokerCommand):
def test_100_unregister_pg_tag(self):
self.noouttest(["unbind_port_group", "--virtual_switch", "utvswitch",
"--tag", "710"])
def test_105_verify_pg_gone(self):
command = ["show_virtual_switch", "--virtual_switch", "utvswitch"]
out = self.commandtest(command)
self.matchclean(out, "Port Group", command)
def test_110_del_utvswitch(self):
command = ["del_virtual_switch", "--virtual_switch", "utvswitch"]
self.noouttest(command)
def test_115_verify_utvswitch(self):
command = ["show_virtual_switch", "--virtual_switch", "utvswitch"]
out = self.notfoundtest(command)
self.matchoutput(out, "Virtual Switch utvswitch not found.", command)
def test_120_del_utvswitch2(self):
command = ["del_virtual_switch", "--virtual_switch", "utvswitch2"]
self.noouttest(command)
def test_130_del_camelcase(self):
self.check_plenary_exists("virtualswitchdata", "camelcase")
self.noouttest(["del_virtual_switch", "--virtual_switch", "CaMeLcAsE"])
self.check_plenary_gone("virtualswitchdata", "camelcase")
def test_200_del_again(self):
command = ["del_virtual_switch", "--virtual_switch", "utvswitch"]
out = self.notfoundtest(command)
self.matchoutput(out, "Virtual Switch utvswitch not found.", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelVirtualSwitch)
unittest.TextTestRunner(verbosity=2).run(suite)
|
apache-2.0
|
vmalloc/gossip
|
gossip/utils.py
|
2
|
2484
|
import itertools
from .exceptions import CannotResolveDependencies
from .helpers import DONT_CARE, FIRST
def topological_sort_registrations(registrations, unconstrained_priority=DONT_CARE):
graph = _build_dependency_graph(registrations, unconstrained_priority=unconstrained_priority)
returned_indices = _topological_sort(range(len(registrations)), graph)
assert len(returned_indices) == len(registrations)
return [registrations[idx] for idx in returned_indices]
def _topological_sort(indices, graph):
independent = sorted(set(indices) - set(m for n, m in graph), reverse=True)
returned = []
while independent:
n = independent.pop()
returned.append(n)
for m in indices:
edge = (n, m)
if m == n:
assert edge not in graph
continue
if edge in graph:
graph.remove(edge)
# check if m is now independent
for edge in graph:
if edge[1] == m:
# not indepdendent
break
else:
# no other incoming edges to m
independent.append(m)
if graph:
raise CannotResolveDependencies('Cyclic dependency detected')
return returned
def _build_dependency_graph(registrations, unconstrained_priority):
providers_by_name = {}
for index, registration in enumerate(registrations):
for name in registration.provides:
providers = providers_by_name.get(name)
if providers is None:
providers = providers_by_name[name] = []
providers.append(index)
graph = set()
for needer_index, registration in enumerate(registrations):
for need in registration.needs:
for provider_index in providers_by_name.get(need, []):
graph.add((provider_index, needer_index))
if unconstrained_priority != DONT_CARE:
caring_indices = set([idx for idx, r in enumerate(registrations) if r.needs or r.provides])
non_caring_indices = set(range(len(registrations))) - caring_indices
for caring_index, uncaring_index in itertools.product(caring_indices, non_caring_indices):
if unconstrained_priority == FIRST:
pair = (uncaring_index, caring_index)
else:
pair = (caring_index, uncaring_index)
graph.add(pair)
return graph
|
bsd-3-clause
|
qPCR4vir/orange
|
Orange/OrangeWidgets/Classify/OWClassificationTreeViewer.py
|
6
|
12035
|
"""
<name>Classification Tree Viewer</name>
<description>Classification tree viewer (hierarchical list view).</description>
<icon>icons/ClassificationTreeViewer.svg</icon>
<contact>Janez Demsar (janez.demsar(@at@)fri.uni-lj.si)</contact>
<priority>2100</priority>
"""
from OWWidget import *
from orngTree import TreeLearner
import OWGUI
import orngTree
import Orange
class ColumnCallback:
def __init__(self, widget, attribute, f = None):
self.widget = widget
self.attribute = attribute
self.f = f
widget.callbackDeposit.append(self)
def __call__(self, value):
setattr(self.widget, self.attribute, self.f and self.f(value) or value)
self.widget.setTreeView(1)
def checkColumn(widget, master, text, value):
wa = QCheckBox(text, widget)
widget.layout().addWidget(wa)
wa.setChecked(getattr(master, value))
master.connect(wa, SIGNAL("toggled(bool)"), ColumnCallback(master, value))
return wa
class OWClassificationTreeViewer(OWWidget):
settingsList = ["maj", "pmaj", "ptarget", "inst", "dist", "adist", "expslider", "sliderValue"]
contextHandlers = {"": DomainContextHandler("", ["targetClass"], matchValues=1)}
def __init__(self, parent=None, signalManager = None, name='Classification Tree Viewer'):
OWWidget.__init__(self, parent, signalManager, name)
self.dataLabels = (('Majority class', 'Class'),
('Probability of majority class', 'P(Class)'),
('Probability of target class', 'P(Target)'),
('Number of instances', '# Inst'),
('Relative distribution', 'Distribution (rel)'),
('Absolute distribution', 'Distribution (abs)'))
# self.callbackDeposit = []
self.inputs = [("Classification Tree", Orange.classification.tree.TreeClassifier, self.setClassificationTree)]
self.outputs = [("Data", ExampleTable)]
# Settings
for s in self.settingsList[:6]:
setattr(self, s, 1)
self.expslider = 5
self.targetClass = 0
self.loadSettings()
self.tree = None
self.sliderValue = 5
self.precision = 3
self.precFrmt = "%%2.%if" % self.precision
# GUI
# parameters
self.dBox = OWGUI.widgetBox(self.controlArea, 'Displayed information')
for i in range(len(self.dataLabels)):
checkColumn(self.dBox, self, self.dataLabels[i][0], self.settingsList[i])
OWGUI.separator(self.controlArea)
self.slider = OWGUI.hSlider(self.controlArea, self, "sliderValue", box = 'Expand/shrink to level', minValue = 1, maxValue = 9, step = 1, callback = self.sliderChanged)
OWGUI.separator(self.controlArea)
self.targetCombo=OWGUI.comboBox(self.controlArea, self, "targetClass", items=[], box="Target class", callback=self.setTarget, addSpace=True)
self.infBox = OWGUI.widgetBox(self.controlArea, 'Tree size')
self.infoa = OWGUI.widgetLabel(self.infBox, 'No tree.')
self.infob = OWGUI.widgetLabel(self.infBox, ' ')
OWGUI.rubber(self.controlArea)
# list view
self.splitter = QSplitter(Qt.Vertical, self.mainArea)
self.mainArea.layout().addWidget(self.splitter)
self.v = QTreeWidget(self.splitter)
self.splitter.addWidget(self.v)
self.v.setAllColumnsShowFocus(1)
self.v.setHeaderLabels(['Classification Tree'] + [label[1] for label in self.dataLabels])
self.v.setColumnWidth(0, 250)
self.connect(self.v, SIGNAL("itemSelectionChanged()"), self.viewSelectionChanged)
# rule
self.rule = QTextEdit(self.splitter)
self.splitter.addWidget(self.rule)
self.rule.setReadOnly(1)
self.splitter.setStretchFactor(0, 2)
self.splitter.setStretchFactor(1, 1)
self.resize(800,400)
self.resize(830, 400)
def sendReport(self):
if self.tree:
self.reportSettings("Information",
[("Target class",self.tree.domain.classVar.values[self.targetClass]),
("Tree size", "%i nodes, %i leaves" % (self.treeNodes, self.treeLeaves))])
else:
self.reportSettings("Information",
[("Target class", "N/A"),
("Tree size", "N/A")])
self.reportSection("Tree")
import OWReport
self.reportRaw(OWReport.reportTree(self.v))
def getTreeItemSibling(self, item):
parent = item.parent()
if not parent:
parent = self.v.invisibleRootItem()
ind = parent.indexOfChild(item)
return parent.child(ind+1)
# main part:
def setTreeView(self, updateonly = 0):
f = self.precFrmt
def addNode(node, parent, desc, anew):
return li
def walkupdate(listviewitem):
node = self.nodeClassDict[listviewitem]
if not node: return
ncl = node.nodeClassifier
dist = node.distribution
a = dist.abs
if a < 1e-20:
a = 1
try:
p_majclass = f % float(dist[int(ncl.defaultVal)]/a)
except:
p_majclass = "N/A"
try:
p_tarclass = f % float(dist[self.targetClass]/a)
except:
p_tarclass = "N/A"
colf = (str(ncl.defaultValue),
p_majclass,
p_tarclass,
"%d" % dist.cases,
len(dist) and reduce(lambda x, y: x+':'+y, [self.precFrmt % (x/a) for x in dist]) or "N/A",
len(dist) and reduce(lambda x, y: x+':'+y, ["%d" % int(x) for x in dist]) or "N/A"
)
col = 1
for j in range(6):
if getattr(self, self.settingsList[j]):
listviewitem.setText(col, colf[j])
col += 1
for i in range(listviewitem.childCount()):
walkupdate(listviewitem.child(i))
def walkcreate(node, parent):
if not node: return
if node.branchSelector:
for i in range(len(node.branches)):
if node.branches[i]:
bd = node.branchDescriptions[i]
if not bd[0] in ["<", ">"]:
bd = node.branchSelector.classVar.name + " = " + bd
else:
bd = node.branchSelector.classVar.name + " " + bd
li = QTreeWidgetItem(parent, [bd])
li.setExpanded(1)
self.nodeClassDict[li] = node.branches[i]
walkcreate(node.branches[i], li)
headerItemStrings = []
for i in range(len(self.dataLabels)):
if getattr(self, self.settingsList[i]):
headerItemStrings.append(self.dataLabels[i][1])
self.v.setHeaderLabels(["Classification Tree"] + headerItemStrings)
self.v.setColumnCount(len(headerItemStrings)+1)
self.v.setRootIsDecorated(1)
self.v.header().setResizeMode(0, QHeaderView.Interactive)
for i in range(len(headerItemStrings)):
self.v.header().setResizeMode(1+i, QHeaderView.ResizeToContents)
if not updateonly:
self.v.clear()
self.nodeClassDict = {}
li = QTreeWidgetItem(self.v, ["<root>"])
li.setExpanded(1)
if self.tree:
self.nodeClassDict[li] = self.tree.tree
walkcreate(self.tree.tree, li)
self.rule.setText("")
if self.tree:
walkupdate(self.v.invisibleRootItem().child(0))
self.v.show()
# slots: handle input signals
def setClassificationTree(self, tree):
self.closeContext()
if tree and (not tree.classVar or tree.classVar.varType != orange.VarTypes.Discrete):
self.error("This viewer only shows trees with discrete classes.\nThere is another viewer for regression trees")
self.tree = None
else:
self.error()
self.tree = tree
self.setTreeView()
self.sliderChanged()
self.targetCombo.clear()
if tree:
self.treeNodes, self.treeLeaves = orngTree.countNodes(tree), orngTree.countLeaves(tree)
self.infoa.setText('Number of nodes: %i' % self.treeNodes)
self.infob.setText('Number of leaves: %i' % self.treeLeaves)
self.targetCombo.addItems([name for name in tree.tree.examples.domain.classVar.values])
self.targetClass = 0
self.openContext("", tree.domain)
else:
self.treeNodes = self.treeLeaves = 0
self.infoa.setText('No tree on input.')
self.infob.setText('')
self.openContext("", None)
def setTarget(self):
def updatetarget(listviewitem):
dist = self.nodeClassDict[listviewitem].distribution
listviewitem.setText(targetindex, f % (dist[self.targetClass]/max(1, dist.abs)))
for i in range(listviewitem.childCount()):
updatetarget(listviewitem.child(i))
if self.ptarget:
targetindex = 1
for st in range(5):
if self.settingsList[st] == "ptarget":
break
if getattr(self, self.settingsList[st]):
targetindex += 1
f = self.precFrmt
if self.v.invisibleRootItem():
updatetarget(self.v.invisibleRootItem().child(0))
def expandTree(self, lev):
def expandTree0(listviewitem, lev):
if not listviewitem:
return
if not lev:
listviewitem.setExpanded(0)
else:
listviewitem.setExpanded(1)
for i in range(listviewitem.childCount()):
child = listviewitem.child(i)
expandTree0(child, lev-1)
expandTree0(self.v.invisibleRootItem().child(0), lev)
# signal processing
def viewSelectionChanged(self):
"""handles click on the tree"""
selected = self.v.selectedItems()
item = selected.pop() if selected else None
self.handleSelectionChanged(item)
if self.tree and item:
data = self.nodeClassDict[item].examples
self.send("Data", data)
tx = ""
f = 1
nodeclsfr = self.nodeClassDict[item].nodeClassifier
while item and item.parent():
if f:
tx = str(item.text(0))
f = 0
else:
tx = str(item.text(0)) + " AND\n "+tx
item = item.parent()
classLabel = str(nodeclsfr.defaultValue)
className = str(nodeclsfr.classVar.name)
if tx:
self.rule.setText("IF %(tx)s\nTHEN %(className)s = %(classLabel)s" % vars())
else:
self.rule.setText("%(className)s = %(classLabel)s" % vars())
else:
self.send("Data", None)
self.rule.setText("")
def handleSelectionChanged(self, item):
pass
def sliderChanged(self):
self.expandTree(self.sliderValue)
##############################################################################
# Test the widget, run from DOS prompt
# > python OWDataTable.py)
# Make sure that a sample data set (adult_sample.tab) is in the directory
if __name__=="__main__":
a=QApplication(sys.argv)
ow=OWClassificationTreeViewer()
#a.setMainWidget(ow)
data = orange.ExampleTable(r'../../doc/datasets/adult_sample')
tree = orange.TreeLearner(data, storeExamples = 1)
ow.setClassificationTree(tree)
ow.show()
a.exec_()
ow.saveSettings()
|
gpl-3.0
|
michaelgallacher/intellij-community
|
python/helpers/coveragepy/coverage/execfile.py
|
209
|
5865
|
"""Execute files of Python code."""
import imp, marshal, os, sys
from coverage.backward import exec_code_object, open_source
from coverage.misc import ExceptionDuringRun, NoCode, NoSource
try:
# In Py 2.x, the builtins were in __builtin__
BUILTINS = sys.modules['__builtin__']
except KeyError:
# In Py 3.x, they're in builtins
BUILTINS = sys.modules['builtins']
def rsplit1(s, sep):
"""The same as s.rsplit(sep, 1), but works in 2.3"""
parts = s.split(sep)
return sep.join(parts[:-1]), parts[-1]
def run_python_module(modulename, args):
"""Run a python module, as though with ``python -m name args...``.
`modulename` is the name of the module, possibly a dot-separated name.
`args` is the argument array to present as sys.argv, including the first
element naming the module being executed.
"""
openfile = None
glo, loc = globals(), locals()
try:
try:
# Search for the module - inside its parent package, if any - using
# standard import mechanics.
if '.' in modulename:
packagename, name = rsplit1(modulename, '.')
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
else:
packagename, name = None, modulename
searchpath = None # "top-level search" in imp.find_module()
openfile, pathname, _ = imp.find_module(name, searchpath)
# Complain if this is a magic non-file module.
if openfile is None and pathname is None:
raise NoSource(
"module does not live in a file: %r" % modulename
)
# If `modulename` is actually a package, not a mere module, then we
# pretend to be Python 2.7 and try running its __main__.py script.
if openfile is None:
packagename = modulename
name = '__main__'
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
openfile, pathname, _ = imp.find_module(name, searchpath)
except ImportError:
_, err, _ = sys.exc_info()
raise NoSource(str(err))
finally:
if openfile:
openfile.close()
# Finally, hand the file off to run_python_file for execution.
pathname = os.path.abspath(pathname)
args[0] = pathname
run_python_file(pathname, args, package=packagename)
def run_python_file(filename, args, package=None):
"""Run a python file as if it were the main program on the command line.
`filename` is the path to the file to execute, it need not be a .py file.
`args` is the argument array to present as sys.argv, including the first
element naming the file being executed. `package` is the name of the
enclosing package, if any.
"""
# Create a module to serve as __main__
old_main_mod = sys.modules['__main__']
main_mod = imp.new_module('__main__')
sys.modules['__main__'] = main_mod
main_mod.__file__ = filename
if package:
main_mod.__package__ = package
main_mod.__builtins__ = BUILTINS
# Set sys.argv properly.
old_argv = sys.argv
sys.argv = args
try:
# Make a code object somehow.
if filename.endswith(".pyc") or filename.endswith(".pyo"):
code = make_code_from_pyc(filename)
else:
code = make_code_from_py(filename)
# Execute the code object.
try:
exec_code_object(code, main_mod.__dict__)
except SystemExit:
# The user called sys.exit(). Just pass it along to the upper
# layers, where it will be handled.
raise
except:
# Something went wrong while executing the user code.
# Get the exc_info, and pack them into an exception that we can
# throw up to the outer loop. We peel two layers off the traceback
# so that the coverage.py code doesn't appear in the final printed
# traceback.
typ, err, tb = sys.exc_info()
raise ExceptionDuringRun(typ, err, tb.tb_next.tb_next)
finally:
# Restore the old __main__
sys.modules['__main__'] = old_main_mod
# Restore the old argv and path
sys.argv = old_argv
def make_code_from_py(filename):
"""Get source from `filename` and make a code object of it."""
# Open the source file.
try:
source_file = open_source(filename)
except IOError:
raise NoSource("No file to run: %r" % filename)
try:
source = source_file.read()
finally:
source_file.close()
# We have the source. `compile` still needs the last line to be clean,
# so make sure it is, then compile a code object from it.
if not source or source[-1] != '\n':
source += '\n'
code = compile(source, filename, "exec")
return code
def make_code_from_pyc(filename):
"""Get a code object from a .pyc file."""
try:
fpyc = open(filename, "rb")
except IOError:
raise NoCode("No file to run: %r" % filename)
try:
# First four bytes are a version-specific magic number. It has to
# match or we won't run the file.
magic = fpyc.read(4)
if magic != imp.get_magic():
raise NoCode("Bad magic number in .pyc file")
# Skip the junk in the header that we don't need.
fpyc.read(4) # Skip the moddate.
if sys.version_info >= (3, 3):
# 3.3 added another long to the header (size), skip it.
fpyc.read(4)
# The rest of the file is the code object we want.
code = marshal.load(fpyc)
finally:
fpyc.close()
return code
|
apache-2.0
|
jasonbot/django
|
django/contrib/gis/db/backends/oracle/introspection.py
|
539
|
1977
|
import sys
import cx_Oracle
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils import six
class OracleIntrospection(DatabaseIntrospection):
# Associating any OBJECTVAR instances with GeometryField. Of course,
# this won't work right on Oracle objects that aren't MDSYS.SDO_GEOMETRY,
# but it is the only object type supported within Django anyways.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[cx_Oracle.OBJECT] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information.
try:
cursor.execute(
'SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" '
'WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s',
(table_name.upper(), geo_col.upper())
)
row = cursor.fetchone()
except Exception as msg:
new_msg = (
'Could not find entry in USER_SDO_GEOM_METADATA '
'corresponding to "%s"."%s"\n'
'Error message: %s.') % (table_name, geo_col, msg)
six.reraise(Exception, Exception(new_msg), sys.exc_info()[2])
# TODO: Research way to find a more specific geometry field type for
# the column's contents.
field_type = 'GeometryField'
# Getting the field parameters.
field_params = {}
dim, srid = row
if srid != 4326:
field_params['srid'] = srid
# Length of object array ( SDO_DIM_ARRAY ) is number of dimensions.
dim = len(dim)
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
|
bsd-3-clause
|
Gchorba/Ask
|
lib/python2.7/site-packages/wolframalpha-1.2-py2.7.egg/wolframalpha/__init__.py
|
3
|
1994
|
from xml.etree import ElementTree as etree
from six.moves import urllib
from . import compat
compat.fix_HTTPMessage()
class Result(object):
def __init__(self, stream):
self.tree = etree.parse(stream)
self._handle_error()
def _handle_error(self):
error = self.tree.find('error')
if not error:
return
code = error.find('code').text
msg = error.find('msg').text
tmpl = 'Error {code}: {msg}'
raise Exception(tmpl.format(code=code, msg=msg))
def __iter__(self):
return (Pod(node) for node in self.tree.findall('pod'))
def __len__(self):
return len(self.tree)
@property
def pods(self):
return list(iter(self))
@property
def results(self):
return (pod for pod in self if pod.title=='Result')
class Pod(object):
def __init__(self, node):
self.node = node
self.__dict__.update(node.attrib)
def __iter__(self):
return (Content(node) for node in self.node.findall('subpod'))
@property
def main(self):
"The main content of this pod"
return next(iter(self))
@property
def text(self):
return self.main.text
class Content(object):
def __init__(self, node):
self.node = node
self.__dict__.update(node.attrib)
self.text = node.find('plaintext').text
class Client(object):
"""
Wolfram|Alpha v2.0 client
"""
def __init__(self, app_id):
self.app_id = app_id
def query(self, query):
"""
Query Wolfram|Alpha with query using the v2.0 API
"""
query = urllib.parse.urlencode(dict(
input=query,
appid=self.app_id,
))
url = 'http://api.wolframalpha.com/v2/query?' + query
resp = urllib.request.urlopen(url)
assert resp.headers.get_content_type() == 'text/xml'
assert resp.headers.get_param('charset') == 'utf-8'
return Result(resp)
|
mit
|
SurfasJones/djcmsrc3
|
venv/lib/python2.7/site-packages/django/contrib/webdesign/tests.py
|
232
|
1092
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from django.contrib.webdesign.lorem_ipsum import *
from django.template import loader, Context
class WebdesignTest(unittest.TestCase):
def test_words(self):
self.assertEqual(words(7), 'lorem ipsum dolor sit amet consectetur adipisicing')
def test_paragraphs(self):
self.assertEqual(paragraphs(1),
['Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'])
def test_lorem_tag(self):
t = loader.get_template_from_string("{% load webdesign %}{% lorem 3 w %}")
self.assertEqual(t.render(Context({})),
'lorem ipsum dolor')
|
mit
|
birryree/servo
|
tests/wpt/css-tests/tools/wptserve/wptserve/response.py
|
114
|
14756
|
from collections import OrderedDict
from datetime import datetime, timedelta
import Cookie
import json
import types
import uuid
import socket
from constants import response_codes
from logger import get_logger
missing = object()
class Response(object):
"""Object representing the response to a HTTP request
:param handler: RequestHandler being used for this response
:param request: Request that this is the response for
.. attribute:: request
Request associated with this Response.
.. attribute:: encoding
The encoding to use when converting unicode to strings for output.
.. attribute:: add_required_headers
Boolean indicating whether mandatory headers should be added to the
response.
.. attribute:: send_body_for_head_request
Boolean, default False, indicating whether the body content should be
sent when the request method is HEAD.
.. attribute:: explicit_flush
Boolean indicating whether output should be flushed automatically or only
when requested.
.. attribute:: writer
The ResponseWriter for this response
.. attribute:: status
Status tuple (code, message). Can be set to an integer, in which case the
message part is filled in automatically, or a tuple.
.. attribute:: headers
List of HTTP headers to send with the response. Each item in the list is a
tuple of (name, value).
.. attribute:: content
The body of the response. This can either be a string or a iterable of response
parts. If it is an iterable, any item may be a string or a function of zero
parameters which, when called, returns a string."""
def __init__(self, handler, request):
self.request = request
self.encoding = "utf8"
self.add_required_headers = True
self.send_body_for_head_request = False
self.explicit_flush = False
self.close_connection = False
self.writer = ResponseWriter(handler, self)
self._status = (200, None)
self.headers = ResponseHeaders()
self.content = []
self.logger = get_logger()
@property
def status(self):
return self._status
@status.setter
def status(self, value):
if hasattr(value, "__len__"):
if len(value) != 2:
raise ValueError
else:
self._status = (int(value[0]), str(value[1]))
else:
self._status = (int(value), None)
def set_cookie(self, name, value, path="/", domain=None, max_age=None,
expires=None, secure=False, httponly=False, comment=None):
"""Set a cookie to be sent with a Set-Cookie header in the
response
:param name: String name of the cookie
:param value: String value of the cookie
:param max_age: datetime.timedelta int representing the time (in seconds)
until the cookie expires
:param path: String path to which the cookie applies
:param domain: String domain to which the cookie applies
:param secure: Boolean indicating whether the cookie is marked as secure
:param httponly: Boolean indicating whether the cookie is marked as
HTTP Only
:param comment: String comment
:param expires: datetime.datetime or datetime.timedelta indicating a
time or interval from now when the cookie expires
"""
days = dict((i+1, name) for i, name in enumerate(["jan", "feb", "mar",
"apr", "may", "jun",
"jul", "aug", "sep",
"oct", "nov", "dec"]))
if value is None:
value = ''
max_age = 0
expires = timedelta(days=-1)
if isinstance(expires, timedelta):
expires = datetime.utcnow() + expires
if expires is not None:
expires_str = expires.strftime("%d %%s %Y %H:%M:%S GMT")
expires_str = expires_str % days[expires.month]
expires = expires_str
if max_age is not None:
if hasattr(max_age, "total_seconds"):
max_age = int(max_age.total_seconds())
max_age = "%.0d" % max_age
m = Cookie.Morsel()
def maybe_set(key, value):
if value is not None and value is not False:
m[key] = value
m.set(name, value, value)
maybe_set("path", path)
maybe_set("domain", domain)
maybe_set("comment", comment)
maybe_set("expires", expires)
maybe_set("max-age", max_age)
maybe_set("secure", secure)
maybe_set("httponly", httponly)
self.headers.append("Set-Cookie", m.OutputString())
def unset_cookie(self, name):
"""Remove a cookie from those that are being sent with the response"""
cookies = self.headers.get("Set-Cookie")
parser = Cookie.BaseCookie()
for cookie in cookies:
parser.load(cookie)
if name in parser.keys():
del self.headers["Set-Cookie"]
for m in parser.values():
if m.key != name:
self.headers.append(("Set-Cookie", m.OutputString()))
def delete_cookie(self, name, path="/", domain=None):
"""Delete a cookie on the client by setting it to the empty string
and to expire in the past"""
self.set_cookie(name, None, path=path, domain=domain, max_age=0,
expires=timedelta(days=-1))
def iter_content(self):
"""Iterator returning chunks of response body content.
If any part of the content is a function, this will be called
and the resulting value (if any) returned."""
if type(self.content) in types.StringTypes:
yield self.content
else:
for item in self.content:
if hasattr(item, "__call__"):
value = item()
else:
value = item
if value:
yield value
def write_status_headers(self):
"""Write out the status line and headers for the response"""
self.writer.write_status(*self.status)
for item in self.headers:
self.writer.write_header(*item)
self.writer.end_headers()
def write_content(self):
"""Write out the response content"""
if self.request.method != "HEAD" or self.send_body_for_head_request:
for item in self.iter_content():
self.writer.write_content(item)
def write(self):
"""Write the whole response"""
self.write_status_headers()
self.write_content()
def set_error(self, code, message=""):
"""Set the response status headers and body to indicate an
error"""
err = {"code": code,
"message": message}
data = json.dumps({"error": err})
self.status = code
self.headers = [("Content-Type", "application/json"),
("Content-Length", len(data))]
self.content = data
if code == 500:
self.logger.error(message)
class MultipartContent(object):
def __init__(self, boundary=None, default_content_type=None):
self.items = []
if boundary is None:
boundary = str(uuid.uuid4())
self.boundary = boundary
self.default_content_type = default_content_type
def __call__(self):
boundary = "--" + self.boundary
rv = ["", boundary]
for item in self.items:
rv.append(str(item))
rv.append(boundary)
rv[-1] += "--"
return "\r\n".join(rv)
def append_part(self, data, content_type=None, headers=None):
if content_type is None:
content_type = self.default_content_type
self.items.append(MultipartPart(data, content_type, headers))
def __iter__(self):
#This is hackish; when writing the response we need an iterable
#or a string. For a multipart/byterange response we want an
#iterable that contains a single callable; the MultipartContent
#object itself
yield self
class MultipartPart(object):
def __init__(self, data, content_type=None, headers=None):
self.headers = ResponseHeaders()
if content_type is not None:
self.headers.set("Content-Type", content_type)
if headers is not None:
for name, value in headers:
if name.lower() == "content-type":
func = self.headers.set
else:
func = self.headers.append
func(name, value)
self.data = data
def __str__(self):
rv = []
for item in self.headers:
rv.append("%s: %s" % item)
rv.append("")
rv.append(self.data)
return "\r\n".join(rv)
class ResponseHeaders(object):
"""Dictionary-like object holding the headers for the response"""
def __init__(self):
self.data = OrderedDict()
def set(self, key, value):
"""Set a header to a specific value, overwriting any previous header
with the same name
:param key: Name of the header to set
:param value: Value to set the header to
"""
self.data[key.lower()] = (key, [value])
def append(self, key, value):
"""Add a new header with a given name, not overwriting any existing
headers with the same name
:param key: Name of the header to add
:param value: Value to set for the header
"""
if key.lower() in self.data:
self.data[key.lower()][1].append(value)
else:
self.set(key, value)
def get(self, key, default=missing):
"""Get the set values for a particular header."""
try:
return self[key]
except KeyError:
if default is missing:
return []
return default
def __getitem__(self, key):
"""Get a list of values for a particular header
"""
return self.data[key.lower()][1]
def __delitem__(self, key):
del self.data[key.lower()]
def __contains__(self, key):
return key.lower() in self.data
def __setitem__(self, key, value):
self.set(key, value)
def __iter__(self):
for key, values in self.data.itervalues():
for value in values:
yield key, value
def items(self):
return list(self)
def update(self, items_iter):
for name, value in items_iter:
self.set(name, value)
def __repr__(self):
return repr(self.data)
class ResponseWriter(object):
"""Object providing an API to write out a HTTP response.
:param handler: The RequestHandler being used.
:param response: The Response associated with this writer.
After each part of the response is written, the output is
flushed unless response.explicit_flush is False, in which case
the user must call .flush() explicitly."""
def __init__(self, handler, response):
self._wfile = handler.wfile
self._response = response
self._handler = handler
self._headers_seen = set()
self._headers_complete = False
self.content_written = False
self.request = response.request
def write_status(self, code, message=None):
"""Write out the status line of a response.
:param code: The integer status code of the response.
:param message: The message of the response. Defaults to the message commonly used
with the status code."""
if message is None:
if code in response_codes:
message = response_codes[code][0]
else:
message = ''
self.write("%s %d %s\r\n" %
(self._response.request.protocol_version, code, message))
def write_header(self, name, value):
"""Write out a single header for the response.
:param name: Name of the header field
:param value: Value of the header field
"""
self._headers_seen.add(name.lower())
self.write("%s: %s\r\n" % (name, value))
if not self._response.explicit_flush:
self.flush()
def write_default_headers(self):
for name, f in [("Server", self._handler.version_string),
("Date", self._handler.date_time_string)]:
if name.lower() not in self._headers_seen:
self.write_header(name, f())
if (type(self._response.content) in (str, unicode) and
"content-length" not in self._headers_seen):
#Would be nice to avoid double-encoding here
self.write_header("Content-Length", len(self.encode(self._response.content)))
def end_headers(self):
"""Finish writing headers and write the separator.
Unless add_required_headers on the response is False,
this will also add HTTP-mandated headers that have not yet been supplied
to the response headers"""
if self._response.add_required_headers:
self.write_default_headers()
self.write("\r\n")
if "content-length" not in self._headers_seen:
self._response.close_connection = True
if not self._response.explicit_flush:
self.flush()
self._headers_complete = True
def write_content(self, data):
"""Write the body of the response."""
self.write(self.encode(data))
if not self._response.explicit_flush:
self.flush()
def write(self, data):
"""Write directly to the response, converting unicode to bytes
according to response.encoding. Does not flush."""
self.content_written = True
try:
self._wfile.write(self.encode(data))
except socket.error:
# This can happen if the socket got closed by the remote end
pass
def encode(self, data):
"""Convert unicode to bytes according to response.encoding."""
if isinstance(data, str):
return data
elif isinstance(data, unicode):
return data.encode(self._response.encoding)
else:
raise ValueError
def flush(self):
"""Flush the output."""
try:
self._wfile.flush()
except socket.error:
# This can happen if the socket got closed by the remote end
pass
|
mpl-2.0
|
kezilu/pextant
|
pextant/api.py
|
2
|
3350
|
import csv
import json
import logging
import re
from pextant.solvers.astarMesh import astarSolver
from pextant.analysis.loadWaypoints import JSONloader
import matplotlib.pyplot as plt
logger = logging.getLogger()
class Pathfinder:
"""
This class performs the A* path finding algorithm and contains the Cost Functions. Also includes
capabilities for analysis of a path.
This class still needs performance testing for maps of larger sizes. I don't believe that
we will be doing anything extremely computationally intensive though.
Current cost functions are Time, Distance, and (Metabolic) Energy. It would be useful to be able to
optimize on other resources like battery power or water sublimated, but those are significantly more
difficult because they depend on shadowing and was not implemented by Aaron.
"""
def __init__(self, explorer_model, environmental_model):
cheating = 1
self.solver = astarSolver(environmental_model, explorer_model,
optimize_on = 'Energy', heuristic_accelerate = cheating)
def aStarCompletePath(self, optimize_on, waypoints, returnType="JSON", dh=None, fileName=None ):
pass
def completeSearch(self, optimize_on, waypoints, filepath=None ):
"""
Returns a tuple representing the path and the total cost of the path.
The path will be a list. All activity points will be duplicated in
the returned path.
waypoints is a list of activityPoint objects, in the correct order. fileName is
used when we would like to write stuff to a file and is currently necessary
for csv return types.
"""
segmentsout, rawpoints, items = self.solver.solvemultipoint(waypoints)
if filepath:
extension = re.search('^(.+\/[^/]+)\.(\w+)$', filepath).group(2)
else:
extension = None
if extension == "json":
json.dump(segmentsout.tojson(), filepath)
elif extension == "csv":
header = [['isStation', 'x', 'y', 'z', 'distanceMeters', 'energyJoules', 'timeSeconds']]
rows = header + segmentsout.tocsv()
with open(filepath, 'wb') as csvfile:
writer = csv.writer(csvfile)
for row in rows:
writer.writerow(row)
return rows
return segmentsout, rawpoints, items
def completeSearchFromJSON(self, optimize_on, jsonInput, filepath=None, algorithm="A*",
numTestPoints=0):
jloader = JSONloader.from_string(jsonInput)
waypoints = jloader.get_waypoints()
#if algorithm == "A*":
segmentsout,_,_ = self.completeSearch(optimize_on, waypoints, filepath)
updatedjson = jloader.add_search_sol(segmentsout.list)
return updatedjson
if __name__ == '__main__':
from pextant.analysis.loadWaypoints import loadPoints
from explorers import Astronaut
from EnvironmentalModel import GDALMesh
hi_low = GDALMesh('maps/HI_lowqual_DEM.tif')
waypoints = loadPoints('waypoints/HI_13Nov16_MD7_A.json')
env_model = hi_low.loadSubSection(waypoints.geoEnvelope())
astronaut = Astronaut(80)
pathfinder = Pathfinder(astronaut, env_model)
out = pathfinder.aStarCompletePath('Energy', waypoints)
print out
|
mit
|
graphstorm/graphstorm
|
third_party/freetype/src/tools/docmaker/docbeauty.py
|
877
|
2642
|
#!/usr/bin/env python
#
# DocBeauty (c) 2003, 2004, 2008 David Turner <david@freetype.org>
#
# This program is used to beautify the documentation comments used
# in the FreeType 2 public headers.
#
from sources import *
from content import *
from utils import *
import utils
import sys, os, time, string, getopt
content_processor = ContentProcessor()
def beautify_block( block ):
if block.content:
content_processor.reset()
markups = content_processor.process_content( block.content )
text = []
first = 1
for markup in markups:
text.extend( markup.beautify( first ) )
first = 0
# now beautify the documentation "borders" themselves
lines = [" /*************************************************************************"]
for l in text:
lines.append( " *" + l )
lines.append( " */" )
block.lines = lines
def usage():
print "\nDocBeauty 0.1 Usage information\n"
print " docbeauty [options] file1 [file2 ...]\n"
print "using the following options:\n"
print " -h : print this page"
print " -b : backup original files with the 'orig' extension"
print ""
print " --backup : same as -b"
def main( argv ):
"""main program loop"""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:], \
"hb", \
["help", "backup"] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
#
output_dir = None
do_backup = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-b", "--backup" ):
do_backup = 1
# create context and processor
source_processor = SourceProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
for block in source_processor.blocks:
beautify_block( block )
new_name = filename + ".new"
ok = None
try:
file = open( new_name, "wt" )
for block in source_processor.blocks:
for line in block.lines:
file.write( line )
file.write( "\n" )
file.close()
except:
ok = 0
# if called from the command line
#
if __name__ == '__main__':
main( sys.argv )
# eof
|
mit
|
NoUsername/PrivateNotesExperimental
|
lib/south/management/commands/migrate.py
|
4
|
4882
|
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.conf import settings
from django.db import models
from optparse import make_option
from south import migration
import sys
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--all', action='store_true', dest='all_apps', default=False,
help='Run the specified migration for all apps.'),
make_option('--list', action='store_true', dest='list', default=False,
help='List migrations noting those that have been applied'),
make_option('--skip', action='store_true', dest='skip', default=False,
help='Will skip over out-of-order missing migrations'),
make_option('--merge', action='store_true', dest='merge', default=False,
help='Will run out-of-order missing migrations as they are - no rollbacks.'),
make_option('--no-initial-data', action='store_true', dest='no_initial_data', default=False,
help='Skips loading initial data if specified.'),
make_option('--fake', action='store_true', dest='fake', default=False,
help="Pretends to do the migrations, but doesn't actually execute them."),
make_option('--db-dry-run', action='store_true', dest='db_dry_run', default=False,
help="Doesn't execute the SQL generated by the db methods, and doesn't store a record that the migration(s) occurred. Useful to test migrations before applying them."),
)
if '--verbosity' not in [opt.get_opt_string() for opt in BaseCommand.option_list]:
option_list += (
make_option('--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
)
help = "Runs migrations for all apps."
def handle(self, app=None, target=None, skip=False, merge=False, backwards=False, fake=False, db_dry_run=False, list=False, **options):
# Work out what the resolve mode is
resolve_mode = merge and "merge" or (skip and "skip" or None)
# Turn on db debugging
from south.db import db
db.debug = True
# NOTE: THIS IS DUPLICATED FROM django.core.management.commands.syncdb
# This code imports any module named 'management' in INSTALLED_APPS.
# The 'management' module is the preferred way of listening to post_syncdb
# signals, and since we're sending those out with create_table migrations,
# we need apps to behave correctly.
for app_name in settings.INSTALLED_APPS:
try:
__import__(app_name + '.management', {}, {}, [''])
except ImportError, exc:
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
# END DJANGO DUPE CODE
# if all_apps flag is set, shift app over to target
if options['all_apps']:
target = app
app = None
# Migrate each app
if app:
apps = [migration.get_app(app.split(".")[-1])]
else:
apps = migration.get_migrated_apps()
silent = options.get('verbosity', 0) == 0
if list and apps:
list_migrations(apps)
if not list:
for app in apps:
result = migration.migrate_app(
app,
resolve_mode = resolve_mode,
target_name = target,
fake = fake,
db_dry_run = db_dry_run,
silent = silent,
load_inital_data = not options['no_initial_data'],
skip = skip,
)
if result is False:
return
def list_migrations(apps):
from south.models import MigrationHistory
apps = list(apps)
names = [migration.get_app_name(app) for app in apps]
applied_migrations = MigrationHistory.objects.filter(app_name__in=names)
applied_migrations = ['%s.%s' % (mi.app_name,mi.migration) for mi in applied_migrations]
print
for app in apps:
print migration.get_app_name(app)
all_migrations = migration.get_migration_names(app)
for migration_name in all_migrations:
long_form = '%s.%s' % (migration.get_app_name(app),migration_name)
if long_form in applied_migrations:
print format_migration_list_item(migration_name)
else:
print format_migration_list_item(migration_name, applied=False)
print
def format_migration_list_item(name, applied=True):
if applied:
return ' * %s' % name
return ' %s' % name
|
agpl-3.0
|
BrewPi/brewpi-service
|
test/test_couchdb_client.py
|
1
|
3284
|
"""
Tests brewblox_service.couchdb_client
"""
import pytest
from aiohttp import web
from aiohttp.client_exceptions import ClientResponseError
from brewblox_service import couchdb_client, http_client
TESTED = couchdb_client.__name__
SRV_URL = couchdb_client.COUCH_URL[len('http://'):]
DB_URL = '/sparkbase'
DOC_URL = '/sparkbase/sparkdoc'
@pytest.fixture
def app(app, mocker):
mocker.patch(TESTED + '.DB_RETRY_INTERVAL_S', 0.01)
http_client.setup(app)
couchdb_client.setup(app)
return app
@pytest.fixture
def cclient(app):
return couchdb_client.get_client(app)
async def test_client_read(app, client, cclient, aresponses):
# Blank database
aresponses.add(SRV_URL, '/', 'HEAD')
aresponses.add(SRV_URL, DB_URL, 'PUT')
aresponses.add(SRV_URL, DOC_URL, 'GET', web.json_response({}, status=404))
aresponses.add(SRV_URL, DOC_URL, 'PUT', web.json_response({'rev': 'rev_read'}))
assert await cclient.read('sparkbase', 'sparkdoc', [1, 2]) == ('rev_read', [1, 2])
# Retry contact server, content in database
for i in range(20):
aresponses.add(SRV_URL, '/', 'HEAD', web.json_response({}, status=404))
aresponses.add(SRV_URL, '/', 'HEAD')
aresponses.add(SRV_URL, DB_URL, 'PUT', web.json_response({}, status=412))
aresponses.add(SRV_URL, DOC_URL, 'GET', web.json_response({'_rev': 'rev_read', 'data': [2, 1]}))
aresponses.add(SRV_URL, DOC_URL, 'PUT', web.json_response({}, status=409))
assert await cclient.read('sparkbase', 'sparkdoc', []) == ('rev_read', [2, 1])
async def test_client_read_errors(app, client, cclient, aresponses):
with pytest.raises(ClientResponseError):
aresponses.add(SRV_URL, '/', 'HEAD')
aresponses.add(SRV_URL, DB_URL, 'PUT', web.json_response({}, status=404))
await cclient.read('sparkbase', 'sparkdoc', [])
with pytest.raises(ClientResponseError):
aresponses.add(SRV_URL, '/', 'HEAD')
aresponses.add(SRV_URL, DB_URL, 'PUT')
aresponses.add(SRV_URL, DOC_URL, 'PUT', web.json_response({}, status=404)) # unexpected
aresponses.add(SRV_URL, DOC_URL, 'GET', web.json_response({}, status=404))
await cclient.read('sparkbase', 'sparkdoc', [])
with pytest.raises(ClientResponseError):
aresponses.add(SRV_URL, '/', 'HEAD')
aresponses.add(SRV_URL, DB_URL, 'PUT')
aresponses.add(SRV_URL, DOC_URL, 'PUT', web.json_response({}, status=412))
aresponses.add(SRV_URL, DOC_URL, 'GET', web.json_response({}, status=500)) # unexpected
await cclient.read('sparkbase', 'sparkdoc', [])
with pytest.raises(ValueError):
aresponses.add(SRV_URL, '/', 'HEAD')
aresponses.add(SRV_URL, DB_URL, 'PUT')
# Either get or put must return an ok value
aresponses.add(SRV_URL, DOC_URL, 'PUT', web.json_response({}, status=409))
aresponses.add(SRV_URL, DOC_URL, 'GET', web.json_response({}, status=404))
await cclient.read('sparkbase', 'sparkdoc', [])
async def test_client_write(app, client, cclient, aresponses):
aresponses.add(
SRV_URL, f'{DOC_URL}?rev=revy', 'PUT',
web.json_response({'rev': 'rev_write'}), match_querystring=True)
assert await cclient.write('sparkbase', 'sparkdoc', 'revy', [1, 2]) == 'rev_write'
|
gpl-3.0
|
codeforeurope/Change-By-Us
|
framework/orm_holder.py
|
3
|
2772
|
"""
:copyright: (c) 2011 Local Projects, all rights reserved
:license: Affero GNU GPL v3, see LICENSE for more details.
"""
from lib import web
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from framework.config import Config
class OrmHolder (object):
@property
def orm(self):
"""
Gets the SQLAlchemy ORM session, which is stored on the thread-global
``web.ctx`` object. The object is wrapped so that we can more easily stub
it when necessary.
"""
if self.is_invalid:
config = self.get_db_config()
engine = self.get_db_engine(config)
web.ctx.orm = self.get_orm(engine)
return web.ctx.orm
@property
def is_invalid(self):
"""A flag denoting that the ORM session needs to be [re]loaded"""
return not hasattr(web.ctx, 'orm') or web.ctx.orm is None
@classmethod
def invalidate(cls):
web.ctx.orm = None
def get_db_config(self):
"""Pulls the database config information from the config.yaml file."""
return Config.get('database')
def get_db_engine(self, db_config):
"""
Gets the SQLAlchemy database engine.
The database engine should be a global object in the process. As such,
we stick it on ``web.config``. This way, all the threads share the
engine and the db connection pool that it maintains.
See http://docs.sqlalchemy.org/en/latest/core/engines.html for create_engine() params
"""
if not hasattr(web.config, 'db_engine'):
db_conn_string = '%(dbn)s://%(user)s:%(password)s@%(host)s/%(db)s' % db_config
# TODO:
# * encoding "Defaults to utf-8"
# * echo should be configurable based on DEBUG setting, otherwise all
# sql statements will be logged indiscriminately
web.config.db_engine = create_engine(db_conn_string,
encoding='utf-8',
convert_unicode=False,
echo=True, echo_pool=True,
# Secs between recycling pool connections
pool_recycle=600)
return web.config.db_engine
def get_orm(self, engine):
"""
Returns a thread-specific SQLAlchemy ORM session.
The session is a scoped session, which means that it is global within
a given thread. New threads, however, will create new sessions.
"""
OrmSession = scoped_session(sessionmaker(bind=engine))
return OrmSession
|
agpl-3.0
|
fullfanta/mxnet
|
example/sparse/linear_classification/data.py
|
20
|
1358
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os, gzip
import sys
import mxnet as mx
def get_avazu_data(data_dir, data_name, url):
if not os.path.isdir(data_dir):
os.mkdir(data_dir)
os.chdir(data_dir)
if (not os.path.exists(data_name)):
print("Dataset " + data_name + " not present. Downloading now ...")
import urllib
zippath = os.path.join(data_dir, data_name + ".bz2")
urllib.urlretrieve(url + data_name + ".bz2", zippath)
os.system("bzip2 -d %r" % data_name + ".bz2")
print("Dataset " + data_name + " is now present.")
os.chdir("..")
|
apache-2.0
|
annarev/tensorflow
|
tensorflow/python/ops/control_flow_grad.py
|
8
|
9421
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in control_flow_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.control_flow_ops import *
# pylint: enable=wildcard-import
def _SwitchGrad(op, *grad):
"""Gradients for a Switch op is calculated using a Merge op.
If the switch is a loop switch, it will be visited twice. We create
the merge on the first visit, and update the other input of the merge
on the second visit. A next_iteration is also added on second visit.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = op._get_control_flow_context()
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
merge_grad = grad_ctxt.grad_state.switch_map.get(op)
if merge_grad is not None:
# This is the second time this Switch is visited. It comes from
# the non-exit branch of the Switch, so update the second input
# to the Merge.
# TODO(yuanbyu): Perform shape inference with this new input.
if grad[1] is not None:
# pylint: disable=protected-access
control_flow_ops._AddNextAndBackEdge(merge_grad, grad[1],
enforce_shape_invariant=False)
# pylint: enable=protected-access
return None, None
elif grad[0] is not None:
# This is the first time this Switch is visited. It comes from
# the Exit branch, which is grad[0]. grad[1] is empty at this point.
# Use grad[0] for both inputs to merge for now, but update the second
# input of merge when we see this Switch the second time.
merge_grad = merge([grad[0], grad[0]], name="b_switch")[0]
grad_ctxt.grad_state.switch_map[op] = merge_grad
return merge_grad, None
else:
# This is the first time this Switch is visited. It comes from the
# Identity branch. Such a Switch has `None` gradient for the Exit branch,
# meaning the output is not differentiable.
return None, None
elif isinstance(op_ctxt, CondContext):
zero_grad = grad[1 - op_ctxt.branch]
# At this point, we have created zero_grad guarded by the right switch.
# Unfortunately, we may still get None here for not trainable data types.
if zero_grad is None:
# For resource variables we get None always on the other branch, so bypass
# this.
if op.inputs[0].dtype == dtypes.resource:
return merge(
[grad[op_ctxt.branch]] * 2, name="cond_resource_grad")[0], None
return None, None
return merge(grad, name="cond_grad")[0], None
else:
false_grad = switch(grad[0], op.inputs[1])[0]
true_grad = switch(grad[1], op.inputs[1])[1]
return merge([false_grad, true_grad])[0], None
ops.RegisterGradient("Switch")(_SwitchGrad)
ops.RegisterGradient("RefSwitch")(_SwitchGrad)
@ops.RegisterGradient("Merge")
def _MergeGrad(op, grad, _):
"""Gradients for a Merge op are calculated using a Switch op."""
input_op = op.inputs[0].op
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = control_flow_util.GetOutputContext(input_op)
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if isinstance(op_ctxt, WhileContext):
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, grad_ctxt.pivot)
# pylint: enable=protected-access
elif isinstance(op_ctxt, CondContext):
pred = op_ctxt.pred
if grad_ctxt and grad_ctxt.grad_state:
# This Merge node is part of a cond within a loop.
# The backprop needs to have the value of this predicate for every
# iteration. So we must have its values accumulated in the forward, and
# use the accumulated values as the predicate for this backprop switch.
grad_state = grad_ctxt.grad_state
real_pred = grad_state.history_map.get(pred.name)
if real_pred is None:
# Remember the value of pred for every iteration.
grad_ctxt = grad_state.grad_context
grad_ctxt.Exit()
history_pred = grad_state.AddForwardAccumulator(pred)
grad_ctxt.Enter()
# Add the stack pop op. If pred.op is in a (outer) CondContext,
# the stack pop will be guarded with a switch.
real_pred = grad_state.AddBackpropAccumulatedValue(history_pred, pred)
grad_state.history_map[pred.name] = real_pred
pred = real_pred
# pylint: disable=protected-access
return control_flow_ops._SwitchRefOrTensor(grad, pred, name="cond_grad")
# pylint: enable=protected-access
else:
num_inputs = len(op.inputs)
cond = [math_ops.equal(op.outputs[1], i) for i in xrange(num_inputs)]
# pylint: disable=protected-access
return [control_flow_ops._SwitchRefOrTensor(grad, cond[i])[1]
for i in xrange(num_inputs)]
# pylint: enable=protected-access
@ops.RegisterGradient("RefMerge")
def _RefMergeGrad(op, grad, _):
return _MergeGrad(op, grad, _)
@ops.RegisterGradient("Exit")
def _ExitGrad(op, grad):
"""Gradients for an exit op are calculated using an Enter op."""
graph = ops.get_default_graph()
# pylint: disable=protected-access
op_ctxt = op._get_control_flow_context()
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if not grad_ctxt.back_prop:
# The flag `back_prop` is set by users to suppress gradient
# computation for this loop. If the attribute `back_prop` is false,
# no gradient computation.
return None
if op_ctxt.grad_state:
raise TypeError("Second-order gradient for while loops not supported.")
if isinstance(grad, ops.Tensor):
grad_ctxt.AddName(grad.name)
else:
if not isinstance(grad, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(grad))
grad_ctxt.AddName(grad.values.name)
grad_ctxt.AddName(grad.indices.name)
dense_shape = grad.dense_shape
if dense_shape is not None:
grad_ctxt.AddName(dense_shape.name)
grad_ctxt.Enter()
# pylint: disable=protected-access
result = control_flow_ops._Enter(
grad, grad_ctxt.name, is_constant=False,
parallel_iterations=grad_ctxt.parallel_iterations,
name="b_exit")
# pylint: enable=protected-access
grad_ctxt.loop_enters.append(result)
grad_ctxt.Exit()
return result
ops.RegisterGradient("RefExit")(_ExitGrad)
@ops.RegisterGradient("NextIteration")
def _NextIterationGrad(_, grad):
"""A forward next_iteration is translated into a backprop identity.
Note that the backprop next_iteration is added in switch grad.
"""
return grad
@ops.RegisterGradient("RefNextIteration")
def _RefNextIterationGrad(_, grad):
return _NextIterationGrad(_, grad)
@ops.RegisterGradient("Enter")
def _EnterGrad(op, grad):
"""Gradients for an Enter are calculated using an Exit op.
For loop variables, grad is the gradient so just add an exit.
For loop invariants, we need to add an accumulator loop.
"""
graph = ops.get_default_graph()
# pylint: disable=protected-access
grad_ctxt = graph._get_control_flow_context()
# pylint: enable=protected-access
if grad_ctxt is None:
return grad
if not grad_ctxt.back_prop:
# Skip gradient computation, if the attribute `back_prop` is false.
return grad
if grad_ctxt.grad_state is None:
# Pass the gradient through if we are not in a gradient while context.
return grad
if op.get_attr("is_constant"):
# Add a gradient accumulator for each loop invariant.
if isinstance(grad, ops.Tensor):
result = grad_ctxt.AddBackpropAccumulator(op, grad)
elif isinstance(grad, ops.IndexedSlices):
result = grad_ctxt.AddBackpropIndexedSlicesAccumulator(op, grad)
else:
# TODO(yuanbyu, lukasr): Add support for SparseTensor.
raise TypeError("Type %s not supported" % type(grad))
else:
result = exit(grad)
grad_ctxt.loop_exits.append(result)
grad_ctxt.ExitResult([result])
return result
@ops.RegisterGradient("RefEnter")
def _RefEnterGrad(op, grad):
return _EnterGrad(op, grad)
@ops.RegisterGradient("LoopCond")
def _LoopCondGrad(_):
"""Stop backprop for the predicate of a while loop."""
return None
|
apache-2.0
|
philanthropy-u/edx-platform
|
openedx/core/djangoapps/user_authn/views/tests/test_login.py
|
1
|
29762
|
# coding:utf-8
"""
Tests for student activation and login
"""
import json
import unicodedata
import unittest
import ddt
import six
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core.cache import cache
from django.http import HttpResponse, HttpResponseBadRequest
from django.test.client import Client
from django.test.utils import override_settings
from django.urls import NoReverseMatch, reverse
from mock import patch
from six import text_type
from six.moves import range
from openedx.core.djangoapps.external_auth.models import ExternalAuthMap
from openedx.core.djangoapps.password_policy.compliance import (
NonCompliantPasswordException,
NonCompliantPasswordWarning
)
from openedx.core.djangoapps.user_api.config.waffle import PREVENT_AUTH_USER_WRITES, waffle
from openedx.core.djangoapps.user_authn.cookies import jwt_cookies
from openedx.core.djangoapps.user_authn.tests.utils import setup_login_oauth_client
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import RegistrationFactory, UserFactory, UserProfileFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
class LoginTest(CacheIsolationTestCase):
"""
Test login_user() view
"""
ENABLED_CACHES = ['default']
LOGIN_FAILED_WARNING = 'Email or password is incorrect'
ACTIVATE_ACCOUNT_WARNING = 'In order to sign in, you need to activate your account'
username = 'test'
user_email = 'test@edx.org'
password = 'test_password'
def setUp(self):
"""Setup a test user along with its registration and profile"""
super(LoginTest, self).setUp()
self.user = UserFactory.build(username=self.username, email=self.user_email)
self.user.set_password(self.password)
self.user.save()
RegistrationFactory(user=self.user)
UserProfileFactory(user=self.user)
self.client = Client()
cache.clear()
try:
self.url = reverse('login_post')
except NoReverseMatch:
self.url = reverse('login')
def test_login_success(self):
response, mock_audit_log = self._login_response(
self.user_email, self.password, patched_audit_log='student.models.AUDIT_LOG'
)
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', self.user_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_success_no_pii(self):
response, mock_audit_log = self._login_response(
self.user_email, self.password, patched_audit_log='student.models.AUDIT_LOG'
)
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success'])
self._assert_not_in_audit_log(mock_audit_log, 'info', [self.user_email])
def test_login_success_unicode_email(self):
unicode_email = u'test' + six.unichr(40960) + u'@edx.org'
self.user.email = unicode_email
self.user.save()
response, mock_audit_log = self._login_response(
unicode_email, self.password, patched_audit_log='student.models.AUDIT_LOG'
)
self._assert_response(response, success=True)
self._assert_audit_log(mock_audit_log, 'info', [u'Login success', unicode_email])
def test_last_login_updated(self):
old_last_login = self.user.last_login
self.test_login_success()
self.user.refresh_from_db()
assert self.user.last_login > old_last_login
def test_login_success_prevent_auth_user_writes(self):
with waffle().override(PREVENT_AUTH_USER_WRITES, True):
old_last_login = self.user.last_login
self.test_login_success()
self.user.refresh_from_db()
assert old_last_login == self.user.last_login
def test_login_fail_no_user_exists(self):
nonexistent_email = u'not_a_user@edx.org'
response, mock_audit_log = self._login_response(
nonexistent_email,
self.password,
)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_no_user_exists_no_pii(self):
nonexistent_email = u'not_a_user@edx.org'
response, mock_audit_log = self._login_response(
nonexistent_email,
self.password,
)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [nonexistent_email])
def test_login_fail_wrong_password(self):
response, mock_audit_log = self._login_response(
self.user_email,
'wrong_password',
)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning',
[u'Login failed', u'password for', self.user_email, u'invalid'])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_fail_wrong_password_no_pii(self):
response, mock_audit_log = self._login_response(self.user_email, 'wrong_password')
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'password for', u'invalid'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [self.user_email])
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_login_not_activated_no_pii(self):
# De-activate the user
self.user.is_active = False
self.user.save()
# Should now be unable to login
response, mock_audit_log = self._login_response(
self.user_email,
self.password
)
self._assert_response(response, success=False,
value="In order to sign in, you need to activate your account.")
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active for user'])
self._assert_not_in_audit_log(mock_audit_log, 'warning', [u'test'])
def test_login_not_activated_with_correct_credentials(self):
"""
Tests that when user login with the correct credentials but with an inactive
account, the system, send account activation email notification to the user.
"""
self.user.is_active = False
self.user.save()
response, mock_audit_log = self._login_response(
self.user_email,
self.password,
)
self._assert_response(response, success=False, value=self.ACTIVATE_ACCOUNT_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Account not active for user'])
@patch('openedx.core.djangoapps.user_authn.views.login._log_and_raise_inactive_user_auth_error')
def test_login_inactivated_user_with_incorrect_credentials(self, mock_inactive_user_email_and_error):
"""
Tests that when user login with incorrect credentials and an inactive account,
the system does *not* send account activation email notification to the user.
"""
nonexistent_email = 'incorrect@email.com'
self.user.is_active = False
self.user.save()
response, mock_audit_log = self._login_response(nonexistent_email, 'incorrect_password')
self.assertFalse(mock_inactive_user_email_and_error.called)
self._assert_response(response, success=False, value=self.LOGIN_FAILED_WARNING)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', u'Unknown user email', nonexistent_email])
def test_login_unicode_email(self):
unicode_email = self.user_email + six.unichr(40960)
response, mock_audit_log = self._login_response(
unicode_email,
self.password,
)
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning', [u'Login failed', unicode_email])
def test_login_unicode_password(self):
unicode_password = self.password + six.unichr(1972)
response, mock_audit_log = self._login_response(
self.user_email,
unicode_password,
)
self._assert_response(response, success=False)
self._assert_audit_log(mock_audit_log, 'warning',
[u'Login failed', u'password for', self.user_email, u'invalid'])
def test_logout_logging(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
logout_url = reverse('logout')
with patch('student.models.AUDIT_LOG') as mock_audit_log:
response = self.client.post(logout_url)
self.assertEqual(response.status_code, 200)
self._assert_audit_log(mock_audit_log, 'info', [u'Logout', u'test'])
def test_login_user_info_cookie(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
# Verify the format of the "user info" cookie set on login
cookie = self.client.cookies[settings.EDXMKTG_USER_INFO_COOKIE_NAME]
user_info = json.loads(cookie.value)
self.assertEqual(user_info["version"], settings.EDXMKTG_USER_INFO_COOKIE_VERSION)
self.assertEqual(user_info["username"], self.user.username)
# Check that the URLs are absolute
for url in user_info["header_urls"].values():
self.assertIn("http://testserver/", url)
def test_logout_deletes_mktg_cookies(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
# Check that the marketing site cookies have been set
self.assertIn(settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, self.client.cookies)
self.assertIn(settings.EDXMKTG_USER_INFO_COOKIE_NAME, self.client.cookies)
# Log out
logout_url = reverse('logout')
response = self.client.post(logout_url)
# Check that the marketing site cookies have been deleted
# (cookies are deleted by setting an expiration date in 1970)
for cookie_name in [settings.EDXMKTG_LOGGED_IN_COOKIE_NAME, settings.EDXMKTG_USER_INFO_COOKIE_NAME]:
cookie = self.client.cookies[cookie_name]
self.assertIn("01-Jan-1970", cookie.get('expires'))
@override_settings(
EDXMKTG_LOGGED_IN_COOKIE_NAME=u"unicode-logged-in",
EDXMKTG_USER_INFO_COOKIE_NAME=u"unicode-user-info",
)
def test_unicode_mktg_cookie_names(self):
# When logged in cookie names are loaded from JSON files, they may
# have type `unicode` instead of `str`, which can cause errors
# when calling Django cookie manipulation functions.
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
response = self.client.post(reverse('logout'))
expected = {
'target': '/',
}
self.assertDictContainsSubset(expected, response.context_data)
@patch.dict("django.conf.settings.FEATURES", {'SQUELCH_PII_IN_LOGS': True})
def test_logout_logging_no_pii(self):
response, _ = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
logout_url = reverse('logout')
with patch('student.models.AUDIT_LOG') as mock_audit_log:
response = self.client.post(logout_url)
self.assertEqual(response.status_code, 200)
self._assert_audit_log(mock_audit_log, 'info', [u'Logout'])
self._assert_not_in_audit_log(mock_audit_log, 'info', [u'test'])
def test_login_ratelimited_success(self):
# Try (and fail) logging in with fewer attempts than the limit of 30
# and verify that you can still successfully log in afterwards.
for i in range(20):
password = u'test_password{0}'.format(i)
response, _audit_log = self._login_response(self.user_email, password)
self._assert_response(response, success=False)
# now try logging in with a valid password
response, _audit_log = self._login_response(self.user_email, self.password)
self._assert_response(response, success=True)
def test_login_ratelimited(self):
# try logging in 30 times, the default limit in the number of failed
# login attempts in one 5 minute period before the rate gets limited
for i in range(30):
password = u'test_password{0}'.format(i)
self._login_response(self.user_email, password)
# check to see if this response indicates that this was ratelimited
response, _audit_log = self._login_response(self.user_email, 'wrong_password')
self._assert_response(response, success=False, value='Too many failed login attempts')
@patch.dict("django.conf.settings.FEATURES", {"DISABLE_SET_JWT_COOKIES_FOR_TESTS": False})
def test_login_refresh(self):
def _assert_jwt_cookie_present(response):
self.assertEqual(response.status_code, 200)
self.assertIn(jwt_cookies.jwt_refresh_cookie_name(), self.client.cookies)
setup_login_oauth_client()
response, _ = self._login_response(self.user_email, self.password)
_assert_jwt_cookie_present(response)
response = self.client.post(reverse('login_refresh'))
_assert_jwt_cookie_present(response)
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session(self):
creds = {'email': self.user_email, 'password': self.password}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
# Reload the user from the database
self.user = User.objects.get(pk=self.user.pk)
self.assertEqual(self.user.profile.get_meta()['session_id'], client1.session.session_key)
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
try:
# this test can be run with either lms or studio settings
# since studio does not have a dashboard url, we should
# look for another url that is login_required, in that case
url = reverse('dashboard')
except NoReverseMatch:
url = reverse('upload_transcripts')
response = client1.get(url)
# client1 will be logged out
self.assertEqual(response.status_code, 302)
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session_with_no_user_profile(self):
"""
Assert that user login with cas (Central Authentication Service) is
redirect to dashboard in case of lms or upload_transcripts in case of
cms
"""
user = UserFactory.build(username='tester', email='tester@edx.org')
user.set_password(self.password)
user.save()
# Assert that no profile is created.
self.assertFalse(hasattr(user, 'profile'))
creds = {'email': 'tester@edx.org', 'password': self.password}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
# Reload the user from the database
user = User.objects.get(pk=user.pk)
# Assert that profile is created.
self.assertTrue(hasattr(user, 'profile'))
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
try:
# this test can be run with either lms or studio settings
# since studio does not have a dashboard url, we should
# look for another url that is login_required, in that case
url = reverse('dashboard')
except NoReverseMatch:
url = reverse('upload_transcripts')
response = client1.get(url)
# client1 will be logged out
self.assertEqual(response.status_code, 302)
@patch.dict("django.conf.settings.FEATURES", {'PREVENT_CONCURRENT_LOGINS': True})
def test_single_session_with_url_not_having_login_required_decorator(self):
# accessing logout url as it does not have login-required decorator it will avoid redirect
# and go inside the enforce_single_login
creds = {'email': self.user_email, 'password': self.password}
client1 = Client()
client2 = Client()
response = client1.post(self.url, creds)
self._assert_response(response, success=True)
# Reload the user from the database
self.user = User.objects.get(pk=self.user.pk)
self.assertEqual(self.user.profile.get_meta()['session_id'], client1.session.session_key)
# second login should log out the first
response = client2.post(self.url, creds)
self._assert_response(response, success=True)
url = reverse('logout')
response = client1.get(url)
self.assertEqual(response.status_code, 200)
def test_change_enrollment_400(self):
"""
Tests that a 400 in change_enrollment doesn't lead to a 404
and in fact just logs in the user without incident
"""
# add this post param to trigger a call to change_enrollment
extra_post_params = {"enrollment_action": "enroll"}
with patch('student.views.change_enrollment') as mock_change_enrollment:
mock_change_enrollment.return_value = HttpResponseBadRequest("I am a 400")
response, _ = self._login_response(
self.user_email, self.password, extra_post_params=extra_post_params,
)
response_content = json.loads(response.content)
self.assertIsNone(response_content["redirect_url"])
self._assert_response(response, success=True)
def test_change_enrollment_200_no_redirect(self):
"""
Tests "redirect_url" is None if change_enrollment returns a HttpResponse
with no content
"""
# add this post param to trigger a call to change_enrollment
extra_post_params = {"enrollment_action": "enroll"}
with patch('student.views.change_enrollment') as mock_change_enrollment:
mock_change_enrollment.return_value = HttpResponse()
response, _ = self._login_response(
self.user_email, self.password, extra_post_params=extra_post_params,
)
response_content = json.loads(response.content)
self.assertIsNone(response_content["redirect_url"])
self._assert_response(response, success=True)
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': True})
def test_check_password_policy_compliance(self):
"""
Tests _enforce_password_policy_compliance succeeds when no exception is thrown
"""
enforce_compliance_path = 'openedx.core.djangoapps.password_policy.compliance.enforce_compliance_on_login'
with patch(enforce_compliance_path) as mock_check_password_policy_compliance:
mock_check_password_policy_compliance.return_value = HttpResponse()
response, _ = self._login_response(self.user_email, self.password)
response_content = json.loads(response.content)
self.assertTrue(response_content.get('success'))
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': True})
def test_check_password_policy_compliance_exception(self):
"""
Tests _enforce_password_policy_compliance fails with an exception thrown
"""
enforce_compliance_on_login = 'openedx.core.djangoapps.password_policy.compliance.enforce_compliance_on_login'
with patch(enforce_compliance_on_login) as mock_enforce_compliance_on_login:
mock_enforce_compliance_on_login.side_effect = NonCompliantPasswordException()
response, _ = self._login_response(
self.user_email,
self.password
)
response_content = json.loads(response.content)
self.assertFalse(response_content.get('success'))
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Password reset', mail.outbox[0].subject)
@override_settings(PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG={'ENFORCE_COMPLIANCE_ON_LOGIN': True})
def test_check_password_policy_compliance_warning(self):
"""
Tests _enforce_password_policy_compliance succeeds with a warning thrown
"""
enforce_compliance_on_login = 'openedx.core.djangoapps.password_policy.compliance.enforce_compliance_on_login'
with patch(enforce_compliance_on_login) as mock_enforce_compliance_on_login:
mock_enforce_compliance_on_login.side_effect = NonCompliantPasswordWarning('Test warning')
response, _ = self._login_response(self.user_email, self.password)
response_content = json.loads(response.content)
self.assertIn('Test warning', self.client.session['_messages'])
self.assertTrue(response_content.get('success'))
@ddt.data(
('test_password', 'test_password', True),
(unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'),
unicodedata.normalize('NFKC', u'Ṗŕệṿïệẅ Ṯệẍt'), False),
(unicodedata.normalize('NFKC', u'Ṗŕệṿïệẅ Ṯệẍt'),
unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'), True),
(unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'),
unicodedata.normalize('NFKD', u'Ṗŕệṿïệẅ Ṯệẍt'), False),
)
@ddt.unpack
def test_password_unicode_normalization_login(self, password, password_entered, login_success):
"""
Tests unicode normalization on user's passwords on login.
"""
self.user.set_password(password)
self.user.save()
response, _ = self._login_response(self.user.email, password_entered)
self._assert_response(response, success=login_success)
def _login_response(self, email, password, patched_audit_log=None, extra_post_params=None):
"""
Post the login info
"""
if patched_audit_log is None:
patched_audit_log = 'openedx.core.djangoapps.user_authn.views.login.AUDIT_LOG'
post_params = {'email': email, 'password': password}
if extra_post_params is not None:
post_params.update(extra_post_params)
with patch(patched_audit_log) as mock_audit_log:
result = self.client.post(self.url, post_params)
return result, mock_audit_log
def _assert_response(self, response, success=None, value=None):
"""
Assert that the response had status 200 and returned a valid
JSON-parseable dict.
If success is provided, assert that the response had that
value for 'success' in the JSON dict.
If value is provided, assert that the response contained that
value for 'value' in the JSON dict.
"""
self.assertEqual(response.status_code, 200)
try:
response_dict = json.loads(response.content)
except ValueError:
self.fail("Could not parse response content as JSON: %s"
% str(response.content))
if success is not None:
self.assertEqual(response_dict['success'], success)
if value is not None:
msg = ("'%s' did not contain '%s'" %
(unicode(response_dict['value']), unicode(value)))
self.assertIn(value, response_dict['value'], msg)
def _assert_audit_log(self, mock_audit_log, level, log_strings):
"""
Check that the audit log has received the expected call as its last call.
"""
method_calls = mock_audit_log.method_calls
name, args, _kwargs = method_calls[-1]
self.assertEquals(name, level)
self.assertEquals(len(args), 1)
format_string = args[0]
for log_string in log_strings:
self.assertIn(log_string, format_string)
def _assert_not_in_audit_log(self, mock_audit_log, level, log_strings):
"""
Check that the audit log has received the expected call as its last call.
"""
method_calls = mock_audit_log.method_calls
name, args, _kwargs = method_calls[-1]
self.assertEquals(name, level)
self.assertEquals(len(args), 1)
format_string = args[0]
for log_string in log_strings:
self.assertNotIn(log_string, format_string)
class ExternalAuthShibTest(ModuleStoreTestCase):
"""
Tests how login_user() interacts with ExternalAuth, in particular Shib
"""
def setUp(self):
super(ExternalAuthShibTest, self).setUp()
self.course = CourseFactory.create(
org='Stanford',
number='456',
display_name='NO SHIB',
user_id=self.user.id,
)
self.shib_course = CourseFactory.create(
org='Stanford',
number='123',
display_name='Shib Only',
enrollment_domain='shib:https://idp.stanford.edu/',
user_id=self.user.id,
)
self.user_w_map = UserFactory.create(email='withmap@stanford.edu')
self.extauth = ExternalAuthMap(external_id='withmap@stanford.edu',
external_email='withmap@stanford.edu',
external_domain='shib:https://idp.stanford.edu/',
external_credentials="",
user=self.user_w_map)
self.user_w_map.save()
self.extauth.save()
self.user_wo_map = UserFactory.create(email='womap@gmail.com')
self.user_wo_map.save()
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_login_page_redirect(self):
"""
Tests that when a shib user types their email address into the login page, they get redirected
to the shib login.
"""
response = self.client.post(reverse('login'), {'email': self.user_w_map.email, 'password': ''})
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertEqual(obj, {
'success': False,
'redirect': reverse('shib-login'),
})
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_login_required_dashboard(self):
"""
Tests redirects to when @login_required to dashboard, which should always be the normal login,
since there is no course context
"""
response = self.client.get(reverse('dashboard'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '/login?next=/dashboard')
@unittest.skipUnless(settings.FEATURES.get('AUTH_USE_SHIB'), "AUTH_USE_SHIB not set")
def test_externalauth_login_required_course_context(self):
"""
Tests the redirects when visiting course-specific URL with @login_required.
Should vary by course depending on its enrollment_domain
"""
target_url = reverse('courseware', args=[text_type(self.course.id)])
noshib_response = self.client.get(target_url, follow=True, HTTP_ACCEPT="text/html")
self.assertEqual(noshib_response.redirect_chain[-1],
('/login?next={url}'.format(url=target_url), 302))
self.assertContains(noshib_response, (u"Sign in or Register | {platform_name}"
.format(platform_name=settings.PLATFORM_NAME)))
self.assertEqual(noshib_response.status_code, 200)
target_url_shib = reverse('courseware', args=[text_type(self.shib_course.id)])
shib_response = self.client.get(**{'path': target_url_shib,
'follow': True,
'REMOTE_USER': self.extauth.external_id,
'Shib-Identity-Provider': 'https://idp.stanford.edu/',
'HTTP_ACCEPT': "text/html"})
# Test that the shib-login redirect page with ?next= and the desired page are part of the redirect chain
# The 'courseware' page actually causes a redirect itself, so it's not the end of the chain and we
# won't test its contents
self.assertEqual(shib_response.redirect_chain[-3],
('/shib-login/?next={url}'.format(url=target_url_shib), 302))
self.assertEqual(shib_response.redirect_chain[-2],
(target_url_shib, 302))
self.assertEqual(shib_response.status_code, 200)
|
agpl-3.0
|
kaiyuanl/gem5
|
src/mem/slicc/ast/StallAndWaitStatementAST.py
|
26
|
2360
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.StatementAST import StatementAST
class StallAndWaitStatementAST(StatementAST):
def __init__(self, slicc, in_port, address):
super(StatementAST, self).__init__(slicc)
self.in_port = in_port
self.address = address
def __repr__(self):
return "[StallAndWaitStatementAst: %r]" % self.in_port
def generate(self, code, return_type):
self.in_port.assertType("InPort")
self.address.assertType("Address")
in_port_code = self.in_port.var.code
address_code = self.address.var.code
code('''
stallBuffer(&($in_port_code), $address_code);
$in_port_code.stallMessage($address_code);
''')
|
bsd-3-clause
|
javachengwc/hue
|
desktop/core/ext-py/Django-1.6.10/tests/admin_inlines/tests.py
|
49
|
36114
|
from __future__ import absolute_import, unicode_literals
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.helpers import InlineAdminForm
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.test.utils import override_settings
# local test models
from .admin import InnerInline, TitleInline, site
from .models import (Holder, Inner, Holder2, Inner2, Holder3, Inner3, Person,
OutfitItem, Fashionista, Teacher, Parent, Child, Author, Book, Profile,
ProfileCollection, ParentModelWithCustomPk, ChildModel1, ChildModel2,
Sighting, Title, Novel, Chapter, FootNote, BinaryTree)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class TestInline(TestCase):
urls = "admin_inlines.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
self.change_url = '/admin/admin_inlines/holder/%i/' % holder.id
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
response = self.client.get(self.change_url)
inner_formset = response.context['inline_admin_formsets'][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, 'can_delete must be equal')
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
inner = Inner.objects.create(holder=holder, dummy=42, readonly='')
response = self.client.get('/admin/admin_inlines/holder/%i/'
% holder.id)
self.assertContains(response, '<label>Inner readonly label:</label>')
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get('/admin/admin_inlines/author/add/')
# The heading for the m2m inline block uses the right text
self.assertContains(response, '<h2>Author-book relationships</h2>')
# The "add another" label is correct
self.assertContains(response, 'Add another Author-Book Relationship')
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname='Imelda')
item = OutfitItem.objects.create(name='Shoes')
# Imelda likes shoes, but can't cary her own bags.
data = {
'shoppingweakness_set-TOTAL_FORMS': 1,
'shoppingweakness_set-INITIAL_FORMS': 0,
'shoppingweakness_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'person': person.id,
'max_weight': 0,
'shoppingweakness_set-0-item': item.id,
}
response = self.client.post('/admin/admin_inlines/fashionista/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1)
def test_tabular_non_field_errors(self):
"""
Ensure that non_field_errors are displayed correctly, including the
right value for colspan. Refs #13510.
"""
data = {
'title_set-TOTAL_FORMS': 1,
'title_set-INITIAL_FORMS': 0,
'title_set-MAX_NUM_FORMS': 0,
'_save': 'Save',
'title_set-0-title1': 'a title',
'title_set-0-title2': 'a different title',
}
response = self.client.post('/admin/admin_inlines/titlecollection/add/', data)
# Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbock.
self.assertContains(response, '<tr><td colspan="4"><ul class="errorlist"><li>The two titles must be the same</li></ul></td></tr>')
def test_no_parent_callable_lookup(self):
"""Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get('/admin/admin_inlines/novel/add/')
self.assertEqual(response.status_code, 200)
# View should have the child inlines section
self.assertContains(response, '<div class="inline-group" id="chapter_set-group">')
def test_callable_lookup(self):
"""Admin inline should invoke local callable when its name is listed in readonly_fields"""
response = self.client.get('/admin/admin_inlines/poll/add/')
self.assertEqual(response.status_code, 200)
# Add parent object view should have the child inlines section
self.assertContains(response, '<div class="inline-group" id="question_set-group">')
# The right callabe should be used for the inline readonly_fields
# column cells
self.assertContains(response, '<p>Callable in QuestionInline</p>')
def test_help_text(self):
"""
Ensure that the inlines' model field help texts are displayed when
using both the stacked and tabular layouts.
Ref #8190.
"""
response = self.client.get('/admin/admin_inlines/holder4/add/')
self.assertContains(response, '<p class="help">Awesome stacked help text is awesome.</p>', 4)
self.assertContains(response, '<img src="/static/admin/img/icon-unknown.gif" class="help help-tooltip" width="10" height="10" alt="(Awesome tabular help text is awesome.)" title="Awesome tabular help text is awesome." />', 1)
# ReadOnly fields
response = self.client.get('/admin/admin_inlines/capofamiglia/add/')
self.assertContains(response, '<img src="/static/admin/img/icon-unknown.gif" class="help help-tooltip" width="10" height="10" alt="(Help text for ReadOnlyInline)" title="Help text for ReadOnlyInline" />', 1)
def test_non_related_name_inline(self):
"""
Ensure that multiple inlines with related_name='+' have correct form
prefixes. Bug #16838.
"""
response = self.client.get('/admin/admin_inlines/capofamiglia/add/')
self.assertContains(response,
'<input type="hidden" name="-1-0-id" id="id_-1-0-id" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia" />', html=True)
self.assertContains(response,
'<input id="id_-1-0-name" type="text" class="vTextField" '
'name="-1-0-name" maxlength="100" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-2-0-id" id="id_-2-0-id" />', html=True)
self.assertContains(response,
'<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia" />', html=True)
self.assertContains(response,
'<input id="id_-2-0-name" type="text" class="vTextField" '
'name="-2-0-name" maxlength="100" />', html=True)
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_localize_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for locales that use
thousand separators
"""
holder = Holder.objects.create(pk=123456789, dummy=42)
inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='')
response = self.client.get('/admin/admin_inlines/holder/%i/' % holder.id)
inner_shortcut = 'r/%s/%s/'%(ContentType.objects.get_for_model(inner).pk, inner.pk)
self.assertContains(response, inner_shortcut)
def test_custom_pk_shortcut(self):
"""
Ensure that the "View on Site" link is correct for models with a
custom primary key field. Bug #18433.
"""
parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo")
child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent)
child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent)
response = self.client.get('/admin/admin_inlines/parentmodelwithcustompk/foo/')
child1_shortcut = 'r/%s/%s/'%(ContentType.objects.get_for_model(child1).pk, child1.pk)
child2_shortcut = 'r/%s/%s/'%(ContentType.objects.get_for_model(child2).pk, child2.pk)
self.assertContains(response, child1_shortcut)
self.assertContains(response, child2_shortcut)
def test_create_inlines_on_inherited_model(self):
"""
Ensure that an object can be created with inlines when it inherits
another class. Bug #19524.
"""
data = {
'name': 'Martian',
'sighting_set-TOTAL_FORMS': 1,
'sighting_set-INITIAL_FORMS': 0,
'sighting_set-MAX_NUM_FORMS': 0,
'sighting_set-0-place': 'Zone 51',
'_save': 'Save',
}
response = self.client.post('/admin/admin_inlines/extraterrestrial/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1)
def test_custom_get_extra_form(self):
bt_head = BinaryTree.objects.create(name="Tree Head")
bt_child = BinaryTree.objects.create(name="First Child", parent=bt_head)
# The maximum number of forms should respect 'get_max_num' on the
# ModelAdmin
max_forms_input = '<input id="id_binarytree_set-MAX_NUM_FORMS" name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d" />'
# The total number of forms will remain the same in either case
total_forms_hidden = '<input id="id_binarytree_set-TOTAL_FORMS" name="binarytree_set-TOTAL_FORMS" type="hidden" value="2" />'
response = self.client.get('/admin/admin_inlines/binarytree/add/')
self.assertContains(response, max_forms_input % 3)
self.assertContains(response, total_forms_hidden)
response = self.client.get("/admin/admin_inlines/binarytree/%d/" % bt_head.id)
self.assertContains(response, max_forms_input % 2)
self.assertContains(response, total_forms_hidden)
def test_inline_nonauto_noneditable_pk(self):
response = self.client.get('/admin/admin_inlines/author/add/')
self.assertContains(response,
'<input id="id_nonautopkbook_set-0-rand_pk" name="nonautopkbook_set-0-rand_pk" type="hidden" />',
html=True)
self.assertContains(response,
'<input id="id_nonautopkbook_set-2-0-rand_pk" name="nonautopkbook_set-2-0-rand_pk" type="hidden" />',
html=True)
def test_inline_editable_pk(self):
response = self.client.get('/admin/admin_inlines/author/add/')
self.assertContains(response,
'<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" name="editablepkbook_set-0-manual_pk" type="text" />',
html=True, count=1)
self.assertContains(response,
'<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" name="editablepkbook_set-2-0-manual_pk" type="text" />',
html=True, count=1)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class TestInlineMedia(TestCase):
urls = "admin_inlines.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder3/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_inline_scripts.js')
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder2/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
self.assertContains(response, 'my_awesome_inline_scripts.js')
class TestInlineAdminForm(TestCase):
urls = "admin_inlines.urls"
def test_immutable_content_type(self):
"""Regression for #9362
The problem depends only on InlineAdminForm and its "original"
argument, so we can safely set the other arguments to None/{}. We just
need to check that the content_type argument of Child isn't altered by
the internals of the inline form."""
sally = Teacher.objects.create(name='Sally')
john = Parent.objects.create(name='John')
joe = Child.objects.create(name='Joe', teacher=sally, parent=john)
iaf = InlineAdminForm(None, None, {}, {}, joe)
parent_ct = ContentType.objects.get_for_model(Parent)
self.assertEqual(iaf.original.content_type, parent_ct)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class TestInlineProtectedOnDelete(TestCase):
urls = "admin_inlines.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_deleting_inline_with_protected_delete_does_not_validate(self):
lotr = Novel.objects.create(name='Lord of the rings')
chapter = Chapter.objects.create(novel=lotr, name='Many Meetings')
foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda')
change_url = '/admin/admin_inlines/novel/%i/' % lotr.id
response = self.client.get(change_url)
data = {
'name': lotr.name,
'chapter_set-TOTAL_FORMS': 1,
'chapter_set-INITIAL_FORMS': 1,
'chapter_set-MAX_NUM_FORMS': 1000,
'_save': 'Save',
'chapter_set-0-id': chapter.id,
'chapter_set-0-name': chapter.name,
'chapter_set-0-novel': lotr.id,
'chapter_set-0-DELETE': 'on'
}
response = self.client.post(change_url, data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Deleting chapter %s would require deleting "
"the following protected related objects: foot note %s"
% (chapter, foot_note))
class TestInlinePermissions(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
urls = "admin_inlines.urls"
def setUp(self):
self.user = User(username='admin')
self.user.is_staff = True
self.user.is_active = True
self.user.set_password('secret')
self.user.save()
self.author_ct = ContentType.objects.get_for_model(Author)
self.holder_ct = ContentType.objects.get_for_model(Holder2)
self.book_ct = ContentType.objects.get_for_model(Book)
self.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(codename='add_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='add_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name='The Author')
book = author.books.create(name='The inline Book')
self.author_change_url = '/admin/admin_inlines/author/%i/' % author.id
# Get the ID of the automatically created intermediate model for thw Author-Book m2m
author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=book)
self.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
holder = Holder2.objects.create(dummy=13)
inner2 = Inner2.objects.create(dummy=42, holder=holder)
self.holder_change_url = '/admin/admin_inlines/holder2/%i/' % holder.id
self.inner2_id = inner2.id
self.assertEqual(
self.client.login(username='admin', password='secret'),
True)
def tearDown(self):
self.client.logout()
def test_inline_add_m2m_noperm(self):
response = self.client.get('/admin/admin_inlines/author/add/')
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get('/admin/admin_inlines/holder2/add/')
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get('/admin/admin_inlines/author/add/')
# No change permission on Books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get('/admin/admin_inlines/holder2/add/')
# Add permission on inner2s, so we get the inline
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS" />', html=True)
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(codename='change_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(response, 'Add another Author-Book Relationship')
self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" '
'value="4" name="Author_books-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_Author_books-0-id" '
'value="%i" name="Author_books-0-id" />' % self.author_book_auto_m2m_intermediate_id, html=True)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
# 3 extra forms only, not the existing instance form
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="3" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertNotContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add new
self.assertContains(response, '<h2>Inner2s</h2>')
# Just the one form for existing instances
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="1" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
# max-num 0 means we can't add new ones
self.assertContains(response, '<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" '
'value="0" name="inner2_set-MAX_NUM_FORMS" />', html=True)
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance and three extra for new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="4" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, no new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="1" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, three for new
self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" '
'value="4" name="inner2_set-TOTAL_FORMS" />', html=True)
self.assertContains(response, '<input type="hidden" id="id_inner2_set-0-id" '
'value="%i" name="inner2_set-0-id" />' % self.inner2_id, html=True)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_inlines'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-views-users.xml']
urls = "admin_inlines.urls"
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_add_stackeds(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
stacked formset.
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/holder4/add/'))
inline_id = '#inner4stacked_set-group'
rows_length = lambda: len(self.selenium.find_elements_by_css_selector(
'%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 Stacked')
add_button.click()
self.assertEqual(rows_length(), 4)
def test_delete_stackeds(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/holder4/add/'))
inline_id = '#inner4stacked_set-group'
rows_length = lambda: len(self.selenium.find_elements_by_css_selector(
'%s .dynamic-inner4stacked_set' % inline_id))
self.assertEqual(rows_length(), 3)
add_button = self.selenium.find_element_by_link_text(
'Add another Inner4 Stacked')
add_button.click()
add_button.click()
self.assertEqual(rows_length(), 5, msg="sanity check")
for delete_link in self.selenium.find_elements_by_css_selector(
'%s .inline-deletelink' % inline_id):
delete_link.click()
self.assertEqual(rows_length(), 3)
def test_add_inlines(self):
"""
Ensure that the "Add another XXX" link correctly adds items to the
inline form.
"""
from selenium.common.exceptions import TimeoutException
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/profilecollection/add/'))
# Check that there's only one inline to start with and that it has the
# correct ID.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 1)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[0].get_attribute('id'),
'profile_set-0')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1)
# Add an inline
self.selenium.find_element_by_link_text('Add another Profile').click()
# Check that the inline has been added, that it has the right id, and
# that it contains the right fields.
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 2)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1)
# Let's add another one to be sure
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')), 3)
self.assertEqual(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2')
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1)
# Enter some data and click 'Save'
self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1')
self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2')
self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1')
self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2')
self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1')
self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2')
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
# Check that the objects have been created in the database
self.assertEqual(ProfileCollection.objects.all().count(), 1)
self.assertEqual(Profile.objects.all().count(), 3)
def test_delete_inlines(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/profilecollection/add/'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 5)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1)
# Click on a few delete buttons
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click()
self.selenium.find_element_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click()
# Verify that they're gone and that the IDs have been re-sequenced
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'#profile_set-group table tr.dynamic-profile_set')), 3)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1)
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1)
def test_alternating_rows(self):
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/admin_inlines/profilecollection/add/'))
# Add a few inlines
self.selenium.find_element_by_link_text('Add another Profile').click()
self.selenium.find_element_by_link_text('Add another Profile').click()
row_selector = 'form#profilecollection_form tr.dynamic-profile_set'
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row1" % row_selector)), 2, msg="Expect two row1 styled rows")
self.assertEqual(len(self.selenium.find_elements_by_css_selector(
"%s.row2" % row_selector)), 1, msg="Expect one row2 styled row")
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
|
apache-2.0
|
MIPS/external-chromium_org
|
webkit/tools/layout_tests/canary-webkit-revisions.py
|
96
|
9820
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Retrieve passing and failing WebKit revision numbers from canaries.
From each canary,
- the last WebKit revision number for which all the tests have passed,
- the last WebKit revision number for which the tests were run, and
- the names of failing layout tests
are retrieved and printed.
"""
import json
import optparse
import re
import sys
import urllib2
_WEBKIT_REVISION_IN_DEPS_RE = re.compile(r'"webkit_revision"\s*:\s*"(\d+)"')
_DEPS_FILE_URL = "http://src.chromium.org/viewvc/chrome/trunk/src/DEPS"
_DEFAULT_BUILDERS = [
"Webkit Win",
"Webkit Vista",
"Webkit Win7",
"Webkit Win (dbg)(1)",
"Webkit Win (dbg)(2)",
"Webkit Mac10.5 (CG)",
"Webkit Mac10.6 (CG)",
"Webkit Mac10.5 (CG)(dbg)(1)",
"Webkit Mac10.5 (CG)(dbg)(2)",
"Webkit Mac10.6 (CG)(dbg)",
"Webkit Linux",
"Webkit Linux 32",
"Webkit Linux (dbg)(1)",
"Webkit Linux (dbg)(2)",
]
_DEFAULT_MAX_BUILDS = 10
_TEST_PREFIX = "&tests="
_TEST_SUFFIX = '">'
_WEBKIT_TESTS = "webkit_tests"
def _OpenUrl(url):
"""Opens a URL.
Returns:
A file-like object in case of success, an empty list otherwise.
"""
try:
return urllib2.urlopen(url)
except urllib2.URLError, url_error:
message = ""
# Surprisingly, urllib2.URLError has different attributes based on the
# kinds of errors -- "code" for HTTP-level errors, "reason" for others.
if hasattr(url_error, "code"):
message = "Status code: %d" % url_error.code
if hasattr(url_error, "reason"):
message = url_error.reason
print >>sys.stderr, "Failed to open %s: %s" % (url, message)
return []
def _WebkitRevisionInDeps():
"""Returns the WebKit revision specified in DEPS file.
Returns:
Revision number as int. -1 in case of error.
"""
for line in _OpenUrl(_DEPS_FILE_URL):
match = _WEBKIT_REVISION_IN_DEPS_RE.search(line)
if match:
return int(match.group(1))
return -1
class _BuildResult(object):
"""Build result for a builder.
Holds builder name, the last passing revision, the last run revision, and
a list of names of failing tests. Revision nubmer 0 is used to represent
that the revision doesn't exist.
"""
def __init__(self, builder, last_passing_revision, last_run_revision,
failing_tests):
"""Constructs build results."""
self.builder = builder
self.last_passing_revision = last_passing_revision
self.last_run_revision = last_run_revision
self.failing_tests = failing_tests
def _BuilderUrlFor(builder, max_builds):
"""Constructs the URL for a builder to retrieve the last results."""
url = ("http://build.chromium.org/p/chromium.webkit/json/builders/%s/builds" %
urllib2.quote(builder))
if max_builds == -1:
return url + "/_all?as_text=1"
return (url + "?as_text=1&" +
'&'.join(["select=%d" % -i for i in range(1, 1 + max_builds)]))
def _ExtractFailingTests(build):
"""Extracts failing test names from a build result entry JSON object."""
failing_tests = []
for step in build["steps"]:
if step["name"] == _WEBKIT_TESTS:
for text in step["text"]:
prefix = text.find(_TEST_PREFIX)
suffix = text.find(_TEST_SUFFIX)
if prefix != -1 and suffix != -1:
failing_tests += sorted(
text[prefix + len(_TEST_PREFIX): suffix].split(","))
elif "results" in step:
# Existence of "results" entry seems to mean failure.
failing_tests.append(" ".join(step["text"]))
return failing_tests
def _RetrieveBuildResult(builder, max_builds, oldest_revision_to_check):
"""Retrieves build results for a builder.
Checks the last passing revision, the last run revision, and failing tests
for the last builds of a builder.
Args:
builder: Builder name.
max_builds: Maximum number of builds to check.
oldest_revision_to_check: Oldest WebKit revision to check.
Returns:
_BuildResult instance.
"""
last_run_revision = 0
failing_tests = []
succeeded = False
builds_json = _OpenUrl(_BuilderUrlFor(builder, max_builds))
if not builds_json:
return _BuildResult(builder, 0, 0, failing_tests)
builds = [(int(value["number"]), value) for unused_key, value
in json.loads(''.join(builds_json)).items()
if value.has_key("number")]
builds.sort()
builds.reverse()
for unused_key, build in builds:
if not build.has_key("text"):
continue
if len(build["text"]) < 2:
continue
if not build.has_key("sourceStamp"):
continue
if build["text"][1] == "successful":
succeeded = True
elif not failing_tests:
failing_tests = _ExtractFailingTests(build)
revision = 0
if build["sourceStamp"]["branch"] == "trunk":
revision = int(build["sourceStamp"]["changes"][-1]["revision"])
if revision and not last_run_revision:
last_run_revision = revision
if revision and revision < oldest_revision_to_check:
break
if not succeeded or not revision:
continue
return _BuildResult(builder, revision, last_run_revision, failing_tests)
return _BuildResult(builder, 0, last_run_revision, failing_tests)
def _PrintPassingRevisions(results, unused_verbose):
"""Prints passing revisions and the range of such revisions.
Args:
results: A list of build results.
"""
print "**** Passing revisions *****"
min_passing_revision = sys.maxint
max_passing_revision = 0
for result in results:
if result.last_passing_revision:
min_passing_revision = min(min_passing_revision,
result.last_passing_revision)
max_passing_revision = max(max_passing_revision,
result.last_passing_revision)
print 'The last passing run was at r%d on "%s"' % (
result.last_passing_revision, result.builder)
else:
print 'No passing runs on "%s"' % result.builder
if max_passing_revision:
print "Passing revision range: r%d - r%d" % (
min_passing_revision, max_passing_revision)
def _PrintFailingRevisions(results, verbose):
"""Prints failing revisions and the failing tests.
Args:
results: A list of build results.
"""
failing_test_to_builders = {}
print "**** Failing revisions *****"
for result in results:
if result.last_run_revision and result.failing_tests:
print ('The last run was at r%d on "%s" and the following %d tests'
' failed' % (result.last_run_revision, result.builder,
len(result.failing_tests)))
for test in result.failing_tests:
print " " + test
failing_test_to_builders.setdefault(test, set()).add(result.builder)
if verbose:
_PrintFailingTestsForBuilderSubsets(failing_test_to_builders)
class _FailingTestsForBuilderSubset(object):
def __init__(self, subset_size):
self._subset_size = subset_size
self._tests = []
def SubsetSize(self):
return self._subset_size
def Tests(self):
return self._tests
def _PrintFailingTestsForBuilderSubsets(failing_test_to_builders):
"""Prints failing test for builder subsets.
Prints failing tests for each subset of builders, in descending order of the
set size.
"""
print "**** Failing tests ****"
builders_to_tests = {}
for test in failing_test_to_builders:
builders = sorted(failing_test_to_builders[test])
subset_name = ", ".join(builders)
tests = builders_to_tests.setdefault(
subset_name, _FailingTestsForBuilderSubset(len(builders))).Tests()
tests.append(test)
# Sort subsets in descending order of size and then name.
builder_subsets = [(builders_to_tests[subset_name].SubsetSize(), subset_name)
for subset_name in builders_to_tests]
for subset_size, subset_name in reversed(sorted(builder_subsets)):
print "** Tests failing for %d builders: %s **" % (subset_size,
subset_name)
for test in sorted(builders_to_tests[subset_name].Tests()):
print test
def _ParseOptions():
"""Parses command-line options."""
parser = optparse.OptionParser(usage="%prog [options] [builders]")
parser.add_option("-m", "--max_builds", type="int",
default=-1,
help="Maximum number of builds to check for each builder."
" Defaults to all builds for which record is"
" available. Checking is ended either when the maximum"
" number is reached, the remaining builds are older"
" than the DEPS WebKit revision, or a passing"
" revision is found.")
parser.add_option("-v", "--verbose", action="store_true", default=False,
dest="verbose")
return parser.parse_args()
def _Main():
"""The main function."""
options, builders = _ParseOptions()
if not builders:
builders = _DEFAULT_BUILDERS
oldest_revision_to_check = _WebkitRevisionInDeps()
if options.max_builds == -1 and oldest_revision_to_check == -1:
options.max_builds = _DEFAULT_MAX_BUILDS
if options.max_builds != -1:
print "Maxium number of builds to check: %d" % options.max_builds
if oldest_revision_to_check != -1:
print "Oldest revision to check: %d" % oldest_revision_to_check
sys.stdout.flush()
results = []
for builder in builders:
print '"%s"' % builder
sys.stdout.flush()
results.append(_RetrieveBuildResult(
builder, options.max_builds, oldest_revision_to_check))
_PrintFailingRevisions(results, options.verbose)
_PrintPassingRevisions(results, options.verbose)
if __name__ == "__main__":
_Main()
|
bsd-3-clause
|
bruteforce1/cryptopals
|
set2/ch10/implement_aes_cbc.py
|
1
|
2510
|
#!/usr/bin/python3
"""
CBC mode is a block cipher mode that allows us to encrypt irregularly-
sized messages, despite the fact that a block cipher natively only
transforms individual blocks.
In CBC mode, each ciphertext block is added to the next plaintext block
before the next call to the cipher core.
The first plaintext block, which has no associated previous ciphertext
block, is added to a "fake 0th ciphertext block" called the
initialization vector, or IV.
Implement CBC mode by hand by taking the ECB function you wrote
earlier, making it encrypt instead of decrypt (verify this by
decrypting whatever you encrypt to test), and using your XOR function
from the previous exercise to combine them.
The file here is intelligible (somewhat) when CBC decrypted against
"YELLOW SUBMARINE" with an IV of all ASCII 0 (\x00\x00\x00 &c)
"""
import argparse
import os
import sys
from utils.cpset2 import aes_cbc, make_b64_printable
def main(filename, key, iv):
print('Input File: ' + str(filename))
print('Key: ' + str(key))
print('IV: ' + str(iv))
crypt = ''
if not os.path.isfile(filename):
print(filename + ' is not a valid file.')
return -1
with open(filename, 'r') as infile:
for line in infile:
crypt += line
ret = aes_cbc(crypt, key, iv, 0)
if ret:
print('Decrypted Contents in: ' + filename + '.dec')
with open(filename + '.dec', 'w') as tf:
tf.write(ret.decode('utf-8'))
un_ret = make_b64_printable(aes_cbc(ret, key, iv))
if un_ret:
print('Encrypted Contents in: ' + filename + '.enc')
with open(filename + '.enc', 'w') as tf:
tf.write(un_ret.decode('utf-8'))
return 0
print('Error.')
return -1
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Implements AES CBC encryption and decryption manually.')
parser.add_argument('-f', '--inputfile', help='opt. file encrypted \
with AES in CBC mode',
default='10.txt')
parser.add_argument('-i', '--iv', help='opt. 16 byte initialization \
vector',
default=chr(0) * 16)
parser.add_argument('-k', '--key', help='opt. 16 byte encryption or \
decryption key',
default='YELLOW SUBMARINE')
args = parser.parse_args()
sys.exit(main(args.inputfile, args.key, args.iv))
|
mit
|
betrisey/home-assistant
|
homeassistant/components/media_player/kodi.py
|
6
|
10407
|
"""
Support for interfacing with the XBMC/Kodi JSON-RPC API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.kodi/
"""
import logging
import urllib
import voluptuous as vol
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_PLAY_MEDIA, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_STOP,
SUPPORT_TURN_OFF, MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING, CONF_HOST, CONF_NAME,
CONF_PORT, CONF_USERNAME, CONF_PASSWORD)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['jsonrpc-requests==0.3']
_LOGGER = logging.getLogger(__name__)
CONF_TURN_OFF_ACTION = 'turn_off_action'
DEFAULT_NAME = 'Kodi'
DEFAULT_PORT = 8080
TURN_OFF_ACTION = [None, 'quit', 'hibernate', 'suspend', 'reboot', 'shutdown']
SUPPORT_KODI = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SEEK | \
SUPPORT_PLAY_MEDIA | SUPPORT_STOP
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TURN_OFF_ACTION, default=None): vol.In(TURN_OFF_ACTION),
vol.Optional(CONF_USERNAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Kodi platform."""
url = '{}:{}'.format(config.get(CONF_HOST), config.get(CONF_PORT))
jsonrpc_url = config.get('url') # deprecated
if jsonrpc_url:
url = jsonrpc_url.rstrip('/jsonrpc')
add_devices([
KodiDevice(
config.get(CONF_NAME),
url,
auth=(config.get(CONF_USERNAME), config.get(CONF_PASSWORD)),
turn_off_action=config.get(CONF_TURN_OFF_ACTION)),
])
class KodiDevice(MediaPlayerDevice):
"""Representation of a XBMC/Kodi device."""
# pylint: disable=too-many-public-methods, abstract-method
# pylint: disable=too-many-instance-attributes
def __init__(self, name, url, auth=None, turn_off_action=None):
"""Initialize the Kodi device."""
import jsonrpc_requests
self._name = name
self._url = url
self._server = jsonrpc_requests.Server(
'{}/jsonrpc'.format(self._url),
auth=auth,
timeout=5)
self._turn_off_action = turn_off_action
self._players = list()
self._properties = None
self._item = None
self._app_properties = None
self.update()
@property
def name(self):
"""Return the name of the device."""
return self._name
def _get_players(self):
"""Return the active player objects or None."""
import jsonrpc_requests
try:
return self._server.Player.GetActivePlayers()
except jsonrpc_requests.jsonrpc.TransportError:
if self._players is not None:
_LOGGER.warning('Unable to fetch kodi data')
_LOGGER.debug('Unable to fetch kodi data', exc_info=True)
return None
@property
def state(self):
"""Return the state of the device."""
if self._players is None:
return STATE_OFF
if len(self._players) == 0:
return STATE_IDLE
if self._properties['speed'] == 0:
return STATE_PAUSED
else:
return STATE_PLAYING
def update(self):
"""Retrieve latest state."""
self._players = self._get_players()
if self._players is not None and len(self._players) > 0:
player_id = self._players[0]['playerid']
assert isinstance(player_id, int)
self._properties = self._server.Player.GetProperties(
player_id,
['time', 'totaltime', 'speed']
)
self._item = self._server.Player.GetItem(
player_id,
['title', 'file', 'uniqueid', 'thumbnail', 'artist']
)['item']
self._app_properties = self._server.Application.GetProperties(
['volume', 'muted']
)
else:
self._properties = None
self._item = None
self._app_properties = None
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._app_properties is not None:
return self._app_properties['volume'] / 100.0
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
if self._app_properties is not None:
return self._app_properties['muted']
@property
def media_content_id(self):
"""Content ID of current playing media."""
if self._item is not None:
return self._item.get('uniqueid', None)
@property
def media_content_type(self):
"""Content type of current playing media."""
if self._players is not None and len(self._players) > 0:
return self._players[0]['type']
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._properties is not None:
total_time = self._properties['totaltime']
return (
total_time['hours'] * 3600 +
total_time['minutes'] * 60 +
total_time['seconds'])
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._item is not None:
return self._get_image_url()
def _get_image_url(self):
"""Helper function that parses the thumbnail URLs used by Kodi."""
url_components = urllib.parse.urlparse(self._item['thumbnail'])
if url_components.scheme == 'image':
return '{}/image/{}'.format(
self._url,
urllib.parse.quote_plus(self._item['thumbnail']))
@property
def media_title(self):
"""Title of current playing media."""
# find a string we can use as a title
if self._item is not None:
return self._item.get(
'title',
self._item.get('label', self._item.get('file', 'unknown')))
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
supported_media_commands = SUPPORT_KODI
if self._turn_off_action in TURN_OFF_ACTION:
supported_media_commands |= SUPPORT_TURN_OFF
return supported_media_commands
def turn_off(self):
"""Execute turn_off_action to turn off media player."""
if self._turn_off_action == 'quit':
self._server.Application.Quit()
elif self._turn_off_action == 'hibernate':
self._server.System.Hibernate()
elif self._turn_off_action == 'suspend':
self._server.System.Suspend()
elif self._turn_off_action == 'reboot':
self._server.System.Reboot()
elif self._turn_off_action == 'shutdown':
self._server.System.Shutdown()
else:
_LOGGER.warning('turn_off requested but turn_off_action is none')
self.update_ha_state()
def volume_up(self):
"""Volume up the media player."""
assert self._server.Input.ExecuteAction('volumeup') == 'OK'
self.update_ha_state()
def volume_down(self):
"""Volume down the media player."""
assert self._server.Input.ExecuteAction('volumedown') == 'OK'
self.update_ha_state()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._server.Application.SetVolume(int(volume * 100))
self.update_ha_state()
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self._server.Application.SetMute(mute)
self.update_ha_state()
def _set_play_state(self, state):
"""Helper method for play/pause/toggle."""
players = self._get_players()
if len(players) != 0:
self._server.Player.PlayPause(players[0]['playerid'], state)
self.update_ha_state()
def media_play_pause(self):
"""Pause media on media player."""
self._set_play_state('toggle')
def media_play(self):
"""Play media."""
self._set_play_state(True)
def media_pause(self):
"""Pause the media player."""
self._set_play_state(False)
def media_stop(self):
"""Stop the media player."""
players = self._get_players()
if len(players) != 0:
self._server.Player.Stop(players[0]['playerid'])
def _goto(self, direction):
"""Helper method used for previous/next track."""
players = self._get_players()
if len(players) != 0:
self._server.Player.GoTo(players[0]['playerid'], direction)
self.update_ha_state()
def media_next_track(self):
"""Send next track command."""
self._goto('next')
def media_previous_track(self):
"""Send next track command."""
# first seek to position 0, Kodi seems to go to the beginning
# of the current track current track is not at the beginning
self.media_seek(0)
self._goto('previous')
def media_seek(self, position):
"""Send seek command."""
players = self._get_players()
time = {}
time['milliseconds'] = int((position % 1) * 1000)
position = int(position)
time['seconds'] = int(position % 60)
position /= 60
time['minutes'] = int(position % 60)
position /= 60
time['hours'] = int(position)
if len(players) != 0:
self._server.Player.Seek(players[0]['playerid'], time)
self.update_ha_state()
def play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player."""
if media_type == "CHANNEL":
self._server.Player.Open({"item": {"channelid": int(media_id)}})
else:
self._server.Player.Open({"item": {"file": str(media_id)}})
|
mit
|
liqi328/rjrepaircompany
|
django/core/files/uploadedfile.py
|
402
|
4225
|
"""
Classes representing uploaded files.
"""
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.files.base import File
from django.core.files import temp as tempfile
from django.utils.encoding import smart_str
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile',
'SimpleUploadedFile')
class UploadedFile(File):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2**10
def __init__(self, file=None, name=None, content_type=None, size=None, charset=None):
super(UploadedFile, self).__init__(file, name)
self.size = size
self.content_type = content_type
self.charset = charset
def __repr__(self):
return "<%s: %s (%s)>" % (
self.__class__.__name__, smart_str(self.name), self.content_type)
def _get_name(self):
return self._name
def _set_name(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
name = name[:255 - len(ext)] + ext
self._name = name
name = property(_get_name, _set_name)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, content_type, size, charset):
if settings.FILE_UPLOAD_TEMP_DIR:
file = tempfile.NamedTemporaryFile(suffix='.upload',
dir=settings.FILE_UPLOAD_TEMP_DIR)
else:
file = tempfile.NamedTemporaryFile(suffix='.upload')
super(TemporaryUploadedFile, self).__init__(file, name, content_type, size, charset)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
try:
return self.file.close()
except OSError, e:
if e.errno != 2:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.close_called and
# calls self.file.file.close() before the exception
raise
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, field_name, name, content_type, size, charset):
super(InMemoryUploadedFile, self).__init__(file, name, content_type, size, charset)
self.field_name = field_name
def open(self, mode=None):
self.file.seek(0)
def close(self):
pass
def chunks(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, content_type='text/plain'):
content = content or ''
super(SimpleUploadedFile, self).__init__(StringIO(content), None, name,
content_type, len(content), None)
def from_dict(cls, file_dict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(file_dict['filename'],
file_dict['content'],
file_dict.get('content-type', 'text/plain'))
from_dict = classmethod(from_dict)
|
bsd-3-clause
|
phihag/youtube-dl
|
youtube_dl/extractor/karrierevideos.py
|
25
|
3379
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
fix_xml_ampersands,
float_or_none,
xpath_with_ns,
xpath_text,
)
class KarriereVideosIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?karrierevideos\.at(?:/[^/]+)+/(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.karrierevideos.at/berufsvideos/mittlere-hoehere-schulen/altenpflegerin',
'info_dict': {
'id': '32c91',
'ext': 'flv',
'title': 'AltenpflegerIn',
'description': 'md5:dbadd1259fde2159a9b28667cb664ae2',
'thumbnail': r're:^http://.*\.png',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
# broken ampersands
'url': 'http://www.karrierevideos.at/orientierung/vaeterkarenz-und-neue-chancen-fuer-muetter-baby-was-nun',
'info_dict': {
'id': '5sniu',
'ext': 'flv',
'title': 'Väterkarenz und neue Chancen für Mütter - "Baby - was nun?"',
'description': 'md5:97092c6ad1fd7d38e9d6a5fdeb2bcc33',
'thumbnail': r're:^http://.*\.png',
},
'params': {
# rtmp download
'skip_download': True,
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = (self._html_search_meta('title', webpage, default=None) or
self._search_regex(r'<h1 class="title">([^<]+)</h1>', webpage, 'video title'))
video_id = self._search_regex(
r'/config/video/(.+?)\.xml', webpage, 'video id')
# Server returns malformed headers
# Force Accept-Encoding: * to prevent gzipped results
playlist = self._download_xml(
'http://www.karrierevideos.at/player-playlist.xml.php?p=%s' % video_id,
video_id, transform_source=fix_xml_ampersands,
headers={'Accept-Encoding': '*'})
NS_MAP = {
'jwplayer': 'http://developer.longtailvideo.com/trac/wiki/FlashFormats'
}
def ns(path):
return xpath_with_ns(path, NS_MAP)
item = playlist.find('./tracklist/item')
video_file = xpath_text(
item, ns('./jwplayer:file'), 'video url', fatal=True)
streamer = xpath_text(
item, ns('./jwplayer:streamer'), 'streamer', fatal=True)
uploader = xpath_text(
item, ns('./jwplayer:author'), 'uploader')
duration = float_or_none(
xpath_text(item, ns('./jwplayer:duration'), 'duration'))
description = self._html_search_regex(
r'(?s)<div class="leadtext">(.+?)</div>',
webpage, 'description')
thumbnail = self._html_search_meta(
'thumbnail', webpage, 'thumbnail')
if thumbnail:
thumbnail = compat_urlparse.urljoin(url, thumbnail)
return {
'id': video_id,
'url': streamer.replace('rtmpt', 'rtmp'),
'play_path': 'mp4:%s' % video_file,
'ext': 'flv',
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'duration': duration,
}
|
unlicense
|
girishverma/linux_cst_bsp
|
tools/perf/util/setup.py
|
2079
|
1438
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
liblk = getenv('LIBLK')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, liblk],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
gpl-2.0
|
charanpald/features
|
features/test/PrimalCCATest.py
|
1
|
3226
|
import unittest
import numpy
import scipy.linalg
from features.PrimalCCA import PrimalCCA
from features.KernelCCA import KernelCCA
from kernel.LinearKernel import LinearKernel
import logging
class PrimalCCATest(unittest.TestCase):
def setUp(self):
numpy.seterr(all='ignore')
pass
def testLearnModel(self):
numExamples = 50
numFeatures = 10
X = numpy.random.rand(numExamples, numFeatures)
Y = X
tau = 0.0
tol = 10**--6
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
self.assertTrue(numpy.linalg.norm(u-v) < tol)
self.assertTrue(numpy.linalg.norm(lmbdas-numpy.ones(numFeatures)) < tol)
Y = X*2
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
self.assertTrue(numpy.linalg.norm(u-v) < tol)
self.assertTrue(numpy.linalg.norm(lmbdas-numpy.ones(numFeatures)) < tol)
#Rotate X to form Y
Z = numpy.random.rand(numFeatures, numFeatures)
ZZ = numpy.dot(Z.T, Z)
(D, W) = scipy.linalg.eig(ZZ)
Y = numpy.dot(X, W)
u, v, lmbdas = cca.learnModel(X, Y)
self.assertTrue(numpy.linalg.norm(lmbdas-numpy.ones(numFeatures)) < tol)
def testProject(self):
#Test if it is the same as KCCA
numExamples = 50
numFeatures = 10
X = numpy.random.rand(numExamples, numFeatures)
Y = numpy.random.rand(numExamples, numFeatures)
tau = 0.0
tol = 10**--6
k = 5
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
XU, YU = cca.project(X, Y, k)
kernel = LinearKernel()
kcca = KernelCCA(kernel, kernel, tau)
alpha, beta, lmbdas2 = kcca.learnModel(X, Y)
XU2, YU2 = kcca.project(X, Y, k)
#Seem to get an error in this for some reason
#self.assertTrue(numpy.linalg.norm(XU-XU2) < tol)
#self.assertTrue(numpy.linalg.norm(YU-YU2) < tol)
#Now try with different tau
tau = 0.5
cca = PrimalCCA(tau)
u, v, lmbdas = cca.learnModel(X, Y)
XU, YU = cca.project(X, Y, k)
kernel = LinearKernel()
kcca = KernelCCA(kernel, kernel, tau)
alpha, beta, lmbdas = kcca.learnModel(X, Y)
XU2, YU2 = kcca.project(X, Y, k)
self.assertTrue(numpy.linalg.norm(XU-XU2) < tol)
self.assertTrue(numpy.linalg.norm(YU-YU2) < tol)
self.assertTrue(numpy.linalg.norm(numpy.dot(XU.T, XU) - numpy.ones(k)) < tol)
self.assertTrue(numpy.linalg.norm(numpy.dot(YU.T, YU) - numpy.ones(k)) < tol)
def testGetY(self):
#Test if we can recover Y from X
numExamples = 10
numFeatures = 5
X = numpy.random.rand(numExamples, numFeatures)
Z = numpy.random.rand(numFeatures, numFeatures)
ZZ = numpy.dot(Z.T, Z)
(D, W) = scipy.linalg.eig(ZZ)
Y = numpy.dot(X, W)
tau = 0.0
cca = PrimalCCA(tau)
U, V, lmbdas = cca.learnModel(X, Y)
Yhat = X.dot(U).dot(V.T).dot(numpy.linalg.inv(numpy.dot(V, V.T)))
logging.debug((numpy.abs(Yhat- Y)))
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
myt00seven/svrg
|
cifar/alexnet_lasagne/lasagne-googlenet-master/googlenet/layers/bn.py
|
1
|
4243
|
import numpy as np
import theano.tensor as T
import theano
from lasagne import init # from .. import init
from lasagne import nonlinearities # from .. import nonlinearities
from lasagne.layers.base import Layer # from .base import Layer
__all__ = [
"BNLayer",
]
class BNLayer(Layer):
"""
lasagne.layers.BNLayer(incoming, nonlinearity=lasagne.nonlinearities.rectify, **kwargs)
A batch normalization layer.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
W : Theano shared variable, numpy array or callable
An initializer for the weights of the layer. If a shared variable or a
numpy array is provided the shape should be (num_inputs, num_units).
See :meth:`Layer.create_param` for more information.
b : Theano shared variable, numpy array, callable or None
An initializer for the biases of the layer. If a shared variable or a
numpy array is provided the shape should be (num_units,).
If None is provided the layer will have no biases.
See :meth:`Layer.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
Examples
--------
>>> from lasagne.layers import InputLayer, DenseLayer
>>> l_in = InputLayer((100, 20))
>>> l1 = BNLayer(l_in)
Notes
-----
If the input to this layer has more than two axes, it will flatten the
trailing axes. This is useful for when a dense layer follows a
convolutional layer, for example. It is not necessary to insert a
:class:`FlattenLayer` in this case.
"""
def __init__(self, incoming, gamma=1.0, beta=0., nonlinearity=None, epsilon=1e-6,
**kwargs):
super(BNLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
# get output shape of incoming
#self.n_channels = self.input_shape[1]
#print self.input_shape
#raise NameError("Hi")
self.epsilon = epsilon
if len(self.input_shape) is 4:
self.gamma = self.add_param(init.Constant(gamma), (self.input_shape[1],), name='gamma', regularizable=False).dimshuffle(('x',0,'x','x'))
self.beta = self.add_param(init.Constant(beta), (self.input_shape[1],), name='beta', regularizable=False).dimshuffle(('x',0,'x','x'))
elif len(self.input_shape) is 2:
self.gamma = self.add_param(init.Constant(gamma), (self.input_shape[1],), name='gamma', regularizable=False).dimshuffle(('x',0))
self.beta = self.add_param(init.Constant(beta), (self.input_shape[1],), name='beta', regularizable=False).dimshuffle(('x',0))
else: # input should be 4d tensor or 2d matrix
raise ValueError('input of BNLayer should be 4d tensor or 2d matrix')
# done init
def get_output_shape_for(self, input_shape):
#return (input_shape[0], self.num_units)
return input_shape
def get_output_for(self, input, **kwargs):
if input.ndim is 4: # 4d tensor
self.mean = T.mean(input, axis=[0, 2, 3], keepdims=True) #self.mean = T.mean(input, axis=[0, 2, 3]).dimshuffle(('x', 0, 'x', 'x'))
#self.var = T.std(input, axis=[0, 2, 3], keepdims=True)
self.var = T.sum(T.sqr(input - self.mean), axis=[0, 2, 3], keepdims=True) / np.array([self.input_shape[0] * self.input_shape[2] * self.input_shape[3]], dtype=theano.config.floatX)
else: # elif input.ndim is 2: # 2d matrix
self.mean = T.mean(input, axis=0, keepdims=True) #self.mean = T.mean(input, axis=0).dimshuffle(('x',0))
#self.var = T.std(input, axis=0, keepdims=True)
self.var = T.sum(T.sqr(input - self.mean), axis=0, keepdims=True) / np.array([self.input_shape[0]], dtype=theano.config.floatX)
activation = (input - self.mean) / T.sqrt(self.var + self.epsilon)
activation = self.gamma * activation + self.beta
return self.nonlinearity(activation)
|
mit
|
qrsforever/workspace
|
python/learn/thinkstats/rankit.py
|
1
|
1807
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import random
import thinkstats
import myplot
import matplotlib.pyplot as pyplot
def Sample(n=6):
"""Generates a sample from a standard normal variate.
n: sample size
Returns: list of n floats
"""
t = [random.normalvariate(0.0, 1.0) for i in range(n)]
t.sort()
return t
def Samples(n=6, m=1000):
"""Generates m samples with size n each.
n: sample size
m: number of samples
Returns: list of m samples
"""
t = [Sample(n) for i in range(m)]
return t
def EstimateRankits(n=6, m=1000):
"""Estimates the expected values of sorted random samples.
n: sample size
m: number of iterations
Returns: list of n rankits
"""
t = Samples(n, m)
t = zip(*t)
means = [thinkstats.Mean(x) for x in t]
return means
def MakeNormalPlot(ys, root=None, line_options={}, **options):
"""Makes a normal probability plot.
Args:
ys: sequence of values
line_options: dictionary of options for pyplot.plot
options: dictionary of options for myplot.Save
"""
# TODO: when n is small, generate a larger sample and desample
n = len(ys)
xs = [random.normalvariate(0.0, 1.0) for i in range(n)]
pyplot.clf()
pyplot.plot(sorted(xs), sorted(ys), 'b.', markersize=3, **line_options)
myplot.Save(root,
xlabel = 'Standard normal values',
legend=False,
**options)
def main():
means = EstimateRankits(84)
print(means)
if __name__ == "__main__":
main()
|
mit
|
spvkgn/youtube-dl
|
youtube_dl/extractor/archiveorg.py
|
24
|
2489
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
unified_strdate,
clean_html,
)
class ArchiveOrgIE(InfoExtractor):
IE_NAME = 'archive.org'
IE_DESC = 'archive.org videos'
_VALID_URL = r'https?://(?:www\.)?archive\.org/(?:details|embed)/(?P<id>[^/?#]+)(?:[?].*)?$'
_TESTS = [{
'url': 'http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
'md5': '8af1d4cf447933ed3c7f4871162602db',
'info_dict': {
'id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect',
'ext': 'ogg',
'title': '1968 Demo - FJCC Conference Presentation Reel #1',
'description': 'md5:da45c349df039f1cc8075268eb1b5c25',
'upload_date': '19681210',
'uploader': 'SRI International'
}
}, {
'url': 'https://archive.org/details/Cops1922',
'md5': '0869000b4ce265e8ca62738b336b268a',
'info_dict': {
'id': 'Cops1922',
'ext': 'mp4',
'title': 'Buster Keaton\'s "Cops" (1922)',
'description': 'md5:89e7c77bf5d965dd5c0372cfb49470f6',
}
}, {
'url': 'http://archive.org/embed/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://archive.org/embed/' + video_id, video_id)
jwplayer_playlist = self._parse_json(self._search_regex(
r"(?s)Play\('[^']+'\s*,\s*(\[.+\])\s*,\s*{.*?}\)",
webpage, 'jwplayer playlist'), video_id)
info = self._parse_jwplayer_data(
{'playlist': jwplayer_playlist}, video_id, base_url=url)
def get_optional(metadata, field):
return metadata.get(field, [None])[0]
metadata = self._download_json(
'http://archive.org/details/' + video_id, video_id, query={
'output': 'json',
})['metadata']
info.update({
'title': get_optional(metadata, 'title') or info.get('title'),
'description': clean_html(get_optional(metadata, 'description')),
})
if info.get('_type') != 'playlist':
info.update({
'uploader': get_optional(metadata, 'creator'),
'upload_date': unified_strdate(get_optional(metadata, 'date')),
})
return info
|
unlicense
|
codeforamerica/heroku-buildpack-pygeo
|
vendor/distribute-0.6.36/setuptools/tests/test_markerlib.py
|
71
|
2237
|
import os
import unittest
from setuptools.tests.py26compat import skipIf
try:
import ast
except ImportError:
pass
class TestMarkerlib(unittest.TestCase):
@skipIf('ast' not in globals(),
"ast not available (Python < 2.6?)")
def test_markers(self):
from _markerlib import interpret, default_environment, compile
os_name = os.name
self.assertTrue(interpret(""))
self.assertTrue(interpret("os.name != 'buuuu'"))
self.assertTrue(interpret("python_version > '1.0'"))
self.assertTrue(interpret("python_version < '5.0'"))
self.assertTrue(interpret("python_version <= '5.0'"))
self.assertTrue(interpret("python_version >= '1.0'"))
self.assertTrue(interpret("'%s' in os.name" % os_name))
self.assertTrue(interpret("'buuuu' not in os.name"))
self.assertFalse(interpret("os.name == 'buuuu'"))
self.assertFalse(interpret("python_version < '1.0'"))
self.assertFalse(interpret("python_version > '5.0'"))
self.assertFalse(interpret("python_version >= '5.0'"))
self.assertFalse(interpret("python_version <= '1.0'"))
self.assertFalse(interpret("'%s' not in os.name" % os_name))
self.assertFalse(interpret("'buuuu' in os.name and python_version >= '5.0'"))
environment = default_environment()
environment['extra'] = 'test'
self.assertTrue(interpret("extra == 'test'", environment))
self.assertFalse(interpret("extra == 'doc'", environment))
def raises_nameError():
try:
interpret("python.version == '42'")
except NameError:
pass
else:
raise Exception("Expected NameError")
raises_nameError()
def raises_syntaxError():
try:
interpret("(x for x in (4,))")
except SyntaxError:
pass
else:
raise Exception("Expected SyntaxError")
raises_syntaxError()
statement = "python_version == '5'"
self.assertEqual(compile(statement).__doc__, statement)
|
mit
|
hubsaysnuaa/odoo
|
addons/hw_scanner/controllers/main.py
|
158
|
7486
|
# -*- coding: utf-8 -*-
import logging
import os
import time
from os import listdir
from os.path import join
from threading import Thread, Lock
from select import select
from Queue import Queue, Empty
import openerp
import openerp.addons.hw_proxy.controllers.main as hw_proxy
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
try:
import evdev
except ImportError:
_logger.error('Odoo module hw_scanner depends on the evdev python module')
evdev = None
class Scanner(Thread):
def __init__(self):
Thread.__init__(self)
self.lock = Lock()
self.status = {'status':'connecting', 'messages':[]}
self.input_dir = '/dev/input/by-id/'
self.barcodes = Queue()
self.keymap = {
2: ("1","!"),
3: ("2","@"),
4: ("3","#"),
5: ("4","$"),
6: ("5","%"),
7: ("6","^"),
8: ("7","&"),
9: ("8","*"),
10:("9","("),
11:("0",")"),
12:("-","_"),
13:("=","+"),
# 14 BACKSPACE
# 15 TAB
16:("q","Q"),
17:("w","W"),
18:("e","E"),
19:("r","R"),
20:("t","T"),
21:("y","Y"),
22:("u","U"),
23:("i","I"),
24:("o","O"),
25:("p","P"),
26:("[","{"),
27:("]","}"),
# 28 ENTER
# 29 LEFT_CTRL
30:("a","A"),
31:("s","S"),
32:("d","D"),
33:("f","F"),
34:("g","G"),
35:("h","H"),
36:("j","J"),
37:("k","K"),
38:("l","L"),
39:(";",":"),
40:("'","\""),
41:("`","~"),
# 42 LEFT SHIFT
43:("\\","|"),
44:("z","Z"),
45:("x","X"),
46:("c","C"),
47:("v","V"),
48:("b","B"),
49:("n","N"),
50:("m","M"),
51:(",","<"),
52:(".",">"),
53:("/","?"),
# 54 RIGHT SHIFT
57:(" "," "),
}
def lockedstart(self):
with self.lock:
if not self.isAlive():
self.daemon = True
self.start()
def set_status(self, status, message = None):
if status == self.status['status']:
if message != None and message != self.status['messages'][-1]:
self.status['messages'].append(message)
else:
self.status['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('Barcode Scanner Error: '+message)
elif status == 'disconnected' and message:
_logger.info('Disconnected Barcode Scanner: %s', message)
def get_device(self):
try:
if not evdev:
return None
devices = [ device for device in listdir(self.input_dir)]
keyboards = [ device for device in devices if ('kbd' in device) and ('keyboard' not in device.lower())]
scanners = [ device for device in devices if ('barcode' in device.lower()) or ('scanner' in device.lower())]
if len(scanners) > 0:
self.set_status('connected','Connected to '+scanners[0])
return evdev.InputDevice(join(self.input_dir,scanners[0]))
elif len(keyboards) > 0:
self.set_status('connected','Connected to '+keyboards[0])
return evdev.InputDevice(join(self.input_dir,keyboards[0]))
else:
self.set_status('disconnected','Barcode Scanner Not Found')
return None
except Exception as e:
self.set_status('error',str(e))
return None
def get_barcode(self):
""" Returns a scanned barcode. Will wait at most 5 seconds to get a barcode, and will
return barcode scanned in the past if they are not older than 5 seconds and have not
been returned before. This is necessary to catch barcodes scanned while the POS is
busy reading another barcode
"""
self.lockedstart()
while True:
try:
timestamp, barcode = self.barcodes.get(True, 5)
if timestamp > time.time() - 5:
return barcode
except Empty:
return ''
def get_status(self):
self.lockedstart()
return self.status
def run(self):
""" This will start a loop that catches all keyboard events, parse barcode
sequences and put them on a timestamped queue that can be consumed by
the point of sale's requests for barcode events
"""
self.barcodes = Queue()
barcode = []
shift = False
device = None
while True: # barcodes loop
if device: # ungrab device between barcodes and timeouts for plug & play
try:
device.ungrab()
except Exception as e:
device = None
self.set_status('error',str(e))
else:
time.sleep(5) # wait until a suitable device is plugged
device = self.get_device()
if not device:
continue
try:
device.grab()
shift = False
barcode = []
while True: # keycode loop
r,w,x = select([device],[],[],5)
if len(r) == 0: # timeout
break
events = device.read()
for event in events:
if event.type == evdev.ecodes.EV_KEY:
#_logger.debug('Evdev Keyboard event %s',evdev.categorize(event))
if event.value == 1: # keydown events
if event.code in self.keymap:
if shift:
barcode.append(self.keymap[event.code][1])
else:
barcode.append(self.keymap[event.code][0])
elif event.code == 42 or event.code == 54: # SHIFT
shift = True
elif event.code == 28: # ENTER, end of barcode
self.barcodes.put( (time.time(),''.join(barcode)) )
barcode = []
elif event.value == 0: #keyup events
if event.code == 42 or event.code == 54: # LEFT SHIFT
shift = False
except Exception as e:
self.set_status('error',str(e))
scanner_thread = None
if evdev:
scanner_thread = Scanner()
hw_proxy.drivers['scanner'] = scanner_thread
class ScannerDriver(hw_proxy.Proxy):
@http.route('/hw_proxy/scanner', type='json', auth='none', cors='*')
def scanner(self):
return scanner_thread.get_barcode() if scanner_thread else None
|
agpl-3.0
|
deiga/robotframework-selenium2library
|
src/Selenium2Library/locators/tableelementfinder.py
|
31
|
3986
|
from selenium.common.exceptions import NoSuchElementException
from Selenium2Library import utils
from elementfinder import ElementFinder
class TableElementFinder(object):
def __init__(self, element_finder=None):
if not element_finder:
element_finder = ElementFinder()
self._element_finder = element_finder
self._locator_suffixes = {
('css', 'default'): [''],
('css', 'content'): [''],
('css', 'header'): [' th'],
('css', 'footer'): [' tfoot td'],
('css', 'row'): [' tr:nth-child(%s)'],
('css', 'col'): [' tr td:nth-child(%s)', ' tr th:nth-child(%s)'],
('jquery', 'default'): [''],
('jquery', 'content'): [''],
('jquery', 'header'): [' th'],
('jquery', 'footer'): [' tfoot td'],
('jquery', 'row'): [' tr:nth-child(%s)'],
('jquery', 'col'): [' tr td:nth-child(%s)', ' tr th:nth-child(%s)'],
('sizzle', 'default'): [''],
('sizzle', 'content'): [''],
('sizzle', 'header'): [' th'],
('sizzle', 'footer'): [' tfoot td'],
('sizzle', 'row'): [' tr:nth-child(%s)'],
('sizzle', 'col'): [' tr td:nth-child(%s)', ' tr th:nth-child(%s)'],
('xpath', 'default'): [''],
('xpath', 'content'): ['//*'],
('xpath', 'header'): ['//th'],
('xpath', 'footer'): ['//tfoot//td'],
('xpath', 'row'): ['//tr[%s]//*'],
('xpath', 'col'): ['//tr//*[self::td or self::th][%s]']
};
def find(self, browser, table_locator):
locators = self._parse_table_locator(table_locator, 'default')
return self._search_in_locators(browser, locators, None)
def find_by_content(self, browser, table_locator, content):
locators = self._parse_table_locator(table_locator, 'content')
return self._search_in_locators(browser, locators, content)
def find_by_header(self, browser, table_locator, content):
locators = self._parse_table_locator(table_locator, 'header')
return self._search_in_locators(browser, locators, content)
def find_by_footer(self, browser, table_locator, content):
locators = self._parse_table_locator(table_locator, 'footer')
return self._search_in_locators(browser, locators, content)
def find_by_row(self, browser, table_locator, col, content):
locators = self._parse_table_locator(table_locator, 'row')
locators = [locator % str(col) for locator in locators]
return self._search_in_locators(browser, locators, content)
def find_by_col(self, browser, table_locator, col, content):
locators = self._parse_table_locator(table_locator, 'col')
locators = [locator % str(col) for locator in locators]
return self._search_in_locators(browser, locators, content)
def _parse_table_locator(self, table_locator, location_method):
if table_locator.startswith('xpath='):
table_locator_type = 'xpath'
elif table_locator.startswith('jquery=') or table_locator.startswith('sizzle='):
table_locator_type = 'sizzle'
else:
if not table_locator.startswith('css='):
table_locator = "css=table#%s" % table_locator
table_locator_type = 'css'
locator_suffixes = self._locator_suffixes[(table_locator_type, location_method)]
return map(
lambda locator_suffix: table_locator + locator_suffix,
locator_suffixes)
def _search_in_locators(self, browser, locators, content):
for locator in locators:
elements = self._element_finder.find(browser, locator)
for element in elements:
if content is None: return element
element_text = element.text
if element_text and content in element_text:
return element
return None
|
apache-2.0
|
janiheikkinen/irods
|
packaging/update_configuration_schema.py
|
2
|
2771
|
from __future__ import print_function
import json
import os
import subprocess
import sys
import time
DEBUG = True
DEBUG = False
def print_debug(*args, **kwargs):
if DEBUG:
print(*args, **kwargs)
def print_error(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def get_current_schema_version():
# read from version file
if os.path.isfile('/var/lib/irods/packaging/binary_installation.flag'):
version_file = os.path.abspath('/var/lib/irods/VERSION.json')
else:
version_file = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/VERSION.json'
with open(version_file) as fh:
data = json.load(fh)
current_schema_version = data['previous_version']['configuration_schema_version']
print_debug('current_schema_version: {0}'.format(current_schema_version))
return current_schema_version
def get_target_schema_version():
# read from version file
if os.path.isfile('/var/lib/irods/packaging/binary_installation.flag'):
version_file = os.path.abspath('/var/lib/irods/VERSION.json')
else:
version_file = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/VERSION.json'
with open(version_file) as fh:
data = json.load(fh)
target_schema_version = data['configuration_schema_version']
print_debug('target_schema_version: {0}'.format(target_schema_version))
return target_schema_version
def update_configuration_files(version):
print('Updating to Configuration Schema... %d' % version)
# manipulate the configuration files
print_debug('PLACEHOLDER')
# success
print_debug('SUCCESS, updated configuration_schema_version to %d' % version)
def update_configuration_to_latest_version():
# get current version
current_schema_version = get_current_schema_version()
# get target version
target_schema_version = get_target_schema_version()
# check if any work to be done
if current_schema_version > target_schema_version:
print_error('Configuration Schema Version is from the future (current=%d > target=%d).' % (
current_schema_version, target_schema_version))
return
if current_schema_version == target_schema_version:
print('Configuration Schema Version is already up to date (version=%d).' % target_schema_version)
return
# surgically alter existing version with any new information, with defaults
update_configuration_files(target_schema_version)
# done
print('Done.')
def main():
print_debug('-------------------- DEBUG IS ON --------------------')
update_configuration_to_latest_version()
print_debug('-------------------- DEBUG IS ON --------------------')
if __name__ == '__main__':
main()
|
bsd-3-clause
|
baliga-lab/weeder_patched
|
python/seqtools.py
|
1
|
3069
|
HAMMING_MAX = 9999
def read_sequences_from_fasta_string(fasta_string):
"""reads the sequences contained in a FASTA string"""
lines = fasta_string.split('\n')
sequences = []
seqbuffer = ""
seqname = None
for line in lines:
line = line.strip()
if line.startswith('>'):
if len(seqbuffer) > 0:
sequences.append((seqname, seqbuffer))
seqbuffer = ""
seqname = line[1:]
elif line and len(line) > 0:
seqbuffer += line
# add the last line
if len(seqbuffer) > 0:
sequences.append((seqname, seqbuffer))
return sequences
def read_sequences_from_fasta_file(filepath):
"""Read the sequences from the specified FASTA file"""
with open(filepath) as inputfile:
fasta_string = inputfile.read()
return read_sequences_from_fasta_string(fasta_string)
def revcomp(sequence):
"""compute the reverse complement of the input string"""
return "".join([revchar(c) for c in sequence[::-1]])
def overlap(str1, str2, checkreverse):
result = False
overlapping = True
for l in range(1, 3):
for i in range(len(str1) - l):
if i >= len(str2) or str1[i + l] != str2[i]:
overlapping = False
break
if overlapping:
result = True
overlapping = True
for i in range(len(str1) - l):
if (i + l) >= len(str2) or str1[i] != str2[i + l]:
overlapping = False
break
if overlapping:
result = True
if checkreverse:
rev_result = overlap(str1[::-1], str2, False)
if rev_result:
result = True
return result
def hamming_distance(str1, str2, checkreverse):
dist_forward = 0
dist_reverse = HAMMING_MAX
if len(str1) != len(str2) or str1 == str2:
return HAMMING_MAX
for i in range(len(str1)):
if str1[i] != str2[i]:
dist_forward += 1
if not checkreverse:
return dist_forward
else:
rev = str1[::-1]
for i in range(len(str1)):
if rev[i] != str2[i]:
dist_reverse += 1
if dist_reverse < dist_forward:
return dist_reverse
else:
return dist_forward
def inside(str1, str2, checkreverse):
len1 = len(str1)
len2 = len(str2)
result = False
if (len2 - len1) != 2:
return False
for i in range(len2 - len1 + 1):
match = True
for j in range(i, i + len1):
if str1[j - i] != str2[j]:
match = False
break
if match:
result = True
if checkreverse:
rev_result = inside(str1[::-1], str2, False)
if rev_result:
result = True
return result
def char_to_int(c):
c = c.lower()
if c == 'a':
return 0;
elif c == 'c':
return 1;
elif c == 'g':
return 2;
elif c == 't':
return 3;
elif c == '$':
return 4;
else:
return -1;
|
gpl-3.0
|
mohanprasath/Course-Work
|
data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part03-e05_correlation/test/test_correlation.py
|
1
|
2795
|
#!/usr/bin/env python3
import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load, get_out
module_name="src.correlation"
correlations = load(module_name, "correlations")
lengths = load(module_name, "lengths")
def patch_name(m, d):
import importlib
parts=d.split(".")
try:
getattr(importlib.import_module(m), parts[-1])
p=".".join([m, parts[-1]])
except ModuleNotFoundError:
raise
except AttributeError:
if len(parts) == 1:
raise
try:
getattr(importlib.import_module(m), parts[-2])
p=".".join([m] + parts[-2:])
except AttributeError:
if len(parts) == 2:
raise
getattr(importlib.import_module(m), parts[-3])
p=".".join([m] + parts[-3:])
return p
class Correlation(unittest.TestCase):
@points('p03-05.1')
def test_lengths(self):
result = lengths()
self.assertAlmostEqual(result, 0.8717537758865832, places=4, msg="Wrong correlation!")
@points('p03-05.1')
def test_lengths_calls(self):
with patch(patch_name(module_name, "scipy.stats.pearsonr")) as pcorr:
result = lengths()
pcorr.assert_called()
@points('p03-05.2')
def test_correlations(self):
result = correlations()
n, m = result.shape
for r in range(n):
for c in range(r):
self.assertAlmostEqual(result[r,c], result[c,r], places=4,
msg="The correlation matrix is not symmetric!")
self.assertAlmostEqual(result[r,r], 1, places=4, msg="Values on the diagonal should be one!")
self.assertAlmostEqual(result[0,1], -0.11756978, places=4,
msg="Incorrect value in position [0,1]!")
self.assertAlmostEqual(result[0,2], 0.87175378, places=4,
msg="Incorrect value in position [0,2]!")
self.assertAlmostEqual(result[0,3], 0.81794113, places=4,
msg="Incorrect value in position [0,3]!")
self.assertAlmostEqual(result[1,2], -0.4284401, places=4,
msg="Incorrect value in position [1,2]!")
self.assertAlmostEqual(result[1,3], -0.36612593, places=4,
msg="Incorrect value in position [1,3]!")
self.assertAlmostEqual(result[2,3], 0.96286543, places=4,
msg="Incorrect value in position [2,3]!")
@points('p03-05.2')
def test_lengths_calls(self):
with patch(patch_name(module_name, "np.corrcoef")) as pcorr:
result = correlations()
pcorr.assert_called()
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
hobarrera/todoman
|
tests/test_filtering.py
|
1
|
8808
|
from datetime import datetime
from datetime import timedelta
from todoman.cli import cli
from todoman.model import Database
from todoman.model import Todo
def test_priority(tmpdir, runner, create):
result = runner.invoke(cli, ["list"], catch_exceptions=False)
assert not result.exception
assert not result.output.strip()
create("one.ics", "SUMMARY:haha\nPRIORITY:4\n")
create("two.ics", "SUMMARY:hoho\nPRIORITY:9\n")
create("three.ics", "SUMMARY:hehe\nPRIORITY:5\n")
create("four.ics", "SUMMARY:huhu\n")
result_high = runner.invoke(cli, ["list", "--priority=high"])
assert not result_high.exception
assert "haha" in result_high.output
assert "hoho" not in result_high.output
assert "huhu" not in result_high.output
assert "hehe" not in result_high.output
result_medium = runner.invoke(cli, ["list", "--priority=medium"])
assert not result_medium.exception
assert "haha" in result_medium.output
assert "hehe" in result_medium.output
assert "hoho" not in result_medium.output
assert "huhu" not in result_medium.output
result_low = runner.invoke(cli, ["list", "--priority=low"])
assert not result_low.exception
assert "haha" in result_low.output
assert "hehe" in result_low.output
assert "hoho" in result_low.output
assert "huhu" not in result_low.output
result_none = runner.invoke(cli, ["list", "--priority=none"])
assert not result_none.exception
assert "haha" in result_none.output
assert "hehe" in result_none.output
assert "hoho" in result_none.output
assert "huhu" in result_none.output
result_error = runner.invoke(cli, ["list", "--priority=blah"])
assert result_error.exception
def test_location(tmpdir, runner, create):
result = runner.invoke(cli, ["list"], catch_exceptions=False)
assert not result.exception
assert not result.output.strip()
create("one.ics", "SUMMARY:haha\nLOCATION: The Pool\n")
create("two.ics", "SUMMARY:hoho\nLOCATION: The Dungeon\n")
create("two.ics", "SUMMARY:harhar\n")
result = runner.invoke(cli, ["list", "--location", "Pool"])
assert not result.exception
assert "haha" in result.output
assert "hoho" not in result.output
assert "harhar" not in result.output
def test_category(tmpdir, runner, create):
result = runner.invoke(cli, ["list"], catch_exceptions=False)
assert not result.exception
assert not result.output.strip()
create("one.ics", "SUMMARY:haha\nCATEGORIES:work,trip\n")
create("two.ics", "CATEGORIES:trip\nSUMMARY:hoho\n")
create("three.ics", "SUMMARY:harhar\n")
result = runner.invoke(cli, ["list", "--category", "work"])
assert not result.exception
assert "haha" in result.output
assert "hoho" not in result.output
assert "harhar" not in result.output
def test_grep(tmpdir, runner, create):
result = runner.invoke(cli, ["list"], catch_exceptions=False)
assert not result.exception
assert not result.output.strip()
create(
"one.ics",
"SUMMARY:fun\nDESCRIPTION: Have fun!\n",
)
create(
"two.ics",
"SUMMARY:work\nDESCRIPTION: The stuff for work\n",
)
create(
"three.ics",
"SUMMARY:buy sandwiches\nDESCRIPTION: This is for the Duke\n",
)
create(
"four.ics",
"SUMMARY:puppies\nDESCRIPTION: Feed the puppies\n",
)
create(
"five.ics",
"SUMMARY:research\nDESCRIPTION: Cure cancer\n",
)
create("six.ics", "SUMMARY:hoho\n")
result = runner.invoke(cli, ["list", "--grep", "fun"])
assert not result.exception
assert "fun" in result.output
assert "work" not in result.output
assert "sandwiches" not in result.output
assert "puppies" not in result.output
assert "research" not in result.output
assert "hoho" not in result.output
def test_filtering_lists(tmpdir, runner, create):
tmpdir.mkdir("list_one")
tmpdir.mkdir("list_two")
tmpdir.mkdir("list_three")
runner.invoke(cli, ["new", "-l", "list_one", "todo one"])
runner.invoke(cli, ["new", "-l", "list_two", "todo two"])
runner.invoke(cli, ["new", "-l", "list_three", "todo three"])
# No filter
result = runner.invoke(cli, ["list"])
assert not result.exception
assert len(result.output.splitlines()) == 3
assert "todo one" in result.output
assert "@list_one" in result.output
assert "todo two" in result.output
assert "@list_two" in result.output
assert "todo three" in result.output
assert "@list_three" in result.output
# One filter
result = runner.invoke(cli, ["list", "list_two"])
assert not result.exception
assert len(result.output.splitlines()) == 1
assert "todo two" in result.output
assert "@list_two" not in result.output
# Several filters
result = runner.invoke(cli, ["list", "list_one", "list_two"])
assert not result.exception
assert len(result.output.splitlines()) == 2
assert "todo one" in result.output
assert "todo two" in result.output
assert "@list_one" in result.output
assert "@list_two" in result.output
def test_due_aware(tmpdir, runner, create, now_for_tz):
db = Database([tmpdir.join("default")], tmpdir.join("cache.sqlite"))
list_ = next(db.lists())
for tz in ["CET", "HST"]:
for i in [1, 23, 25, 48]:
todo = Todo(new=True)
todo.due = now_for_tz(tz) + timedelta(hours=i)
todo.summary = "{}".format(i)
todo.list = list_
db.save(todo)
todos = list(db.todos(due=24))
assert len(todos) == 4
assert todos[0].summary == "23"
assert todos[1].summary == "23"
assert todos[2].summary == "1"
assert todos[3].summary == "1"
def test_due_naive(tmpdir, runner, create):
now = datetime.now()
for i in [1, 23, 25, 48]:
due = now + timedelta(hours=i)
create(
"test_{}.ics".format(i),
"SUMMARY:{}\nDUE;VALUE=DATE-TIME:{}\n".format(
i,
due.strftime("%Y%m%dT%H%M%S"),
),
)
db = Database([tmpdir.join("default")], tmpdir.join("cache.sqlite"))
todos = list(db.todos(due=24))
assert len(todos) == 2
assert todos[0].summary == "23"
assert todos[1].summary == "1"
def test_filtering_start(tmpdir, runner, todo_factory):
today = datetime.now()
now = today.strftime("%Y-%m-%d")
tomorrow = (today + timedelta(days=1)).strftime("%Y-%m-%d")
yesterday = (today + timedelta(days=-1)).strftime("%Y-%m-%d")
result = runner.invoke(cli, ["list", "--start", "before", now])
assert not result.exception
assert not result.output.strip()
result = runner.invoke(cli, ["list", "--start", "after", now])
assert not result.exception
assert not result.output.strip()
todo_factory(summary="haha", start=today)
todo_factory(summary="hoho", start=today)
todo_factory(summary="hihi", start=today - timedelta(days=2))
todo_factory(summary="huhu")
result = runner.invoke(cli, ["list", "--start", "after", yesterday])
assert not result.exception
assert "haha" in result.output
assert "hoho" in result.output
assert "hihi" not in result.output
assert "huhu" not in result.output
result = runner.invoke(cli, ["list", "--start", "before", yesterday])
assert not result.exception
assert "haha" not in result.output
assert "hoho" not in result.output
assert "hihi" in result.output
assert "huhu" not in result.output
result = runner.invoke(cli, ["list", "--start", "after", tomorrow])
assert not result.exception
assert "haha" not in result.output
assert "hoho" not in result.output
assert "hihi" not in result.output
assert "huhu" not in result.output
def test_statuses(todo_factory, todos):
cancelled = todo_factory(status="CANCELLED").uid
completed = todo_factory(status="COMPLETED").uid
in_process = todo_factory(status="IN-PROCESS").uid
needs_action = todo_factory(status="NEEDS-ACTION").uid
no_status = todo_factory(status="NEEDS-ACTION").uid
all_todos = set(todos(status=["ANY"]))
cancelled_todos = set(todos(status=["CANCELLED"]))
completed_todos = set(todos(status=["COMPLETED"]))
in_process_todos = set(todos(status=["IN-PROCESS"]))
needs_action_todos = set(todos(status=["NEEDS-ACTION"]))
assert {t.uid for t in all_todos} == {
cancelled,
completed,
in_process,
needs_action,
no_status,
}
assert {t.uid for t in cancelled_todos} == {cancelled}
assert {t.uid for t in completed_todos} == {completed}
assert {t.uid for t in in_process_todos} == {in_process}
assert {t.uid for t in needs_action_todos} == {needs_action, no_status}
|
isc
|
rimbalinux/MSISDNArea
|
django/contrib/gis/geos/prototypes/__init__.py
|
12
|
1349
|
"""
This module contains all of the GEOS ctypes function prototypes. Each
prototype handles the interaction between the GEOS library and Python
via ctypes.
"""
# Coordinate sequence routines.
from django.contrib.gis.geos.prototypes.coordseq import create_cs, get_cs, \
cs_clone, cs_getordinate, cs_setordinate, cs_getx, cs_gety, cs_getz, \
cs_setx, cs_sety, cs_setz, cs_getsize, cs_getdims
# Geometry routines.
from django.contrib.gis.geos.prototypes.geom import from_hex, from_wkb, from_wkt, \
create_point, create_linestring, create_linearring, create_polygon, create_collection, \
destroy_geom, get_extring, get_intring, get_nrings, get_geomn, geom_clone, \
geos_normalize, geos_type, geos_typeid, geos_get_srid, geos_set_srid, \
get_dims, get_num_coords, get_num_geoms, \
to_hex, to_wkb, to_wkt
# Miscellaneous routines.
from django.contrib.gis.geos.prototypes.misc import *
# Predicates
from django.contrib.gis.geos.prototypes.predicates import geos_hasz, geos_isempty, \
geos_isring, geos_issimple, geos_isvalid, geos_contains, geos_crosses, \
geos_disjoint, geos_equals, geos_equalsexact, geos_intersects, \
geos_intersects, geos_overlaps, geos_relatepattern, geos_touches, geos_within
# Topology routines
from django.contrib.gis.geos.prototypes.topology import *
|
bsd-3-clause
|
nyasara/azuremono-docker
|
IronPython-2.7.4/Lib/binhex.py
|
55
|
14984
|
"""Macintosh binhex compression/decompression.
easy interface:
binhex(inputfilename, outputfilename)
hexbin(inputfilename, outputfilename)
"""
#
# Jack Jansen, CWI, August 1995.
#
# The module is supposed to be as compatible as possible. Especially the
# easy interface should work "as expected" on any platform.
# XXXX Note: currently, textfiles appear in mac-form on all platforms.
# We seem to lack a simple character-translate in python.
# (we should probably use ISO-Latin-1 on all but the mac platform).
# XXXX The simple routines are too simple: they expect to hold the complete
# files in-core. Should be fixed.
# XXXX It would be nice to handle AppleDouble format on unix
# (for servers serving macs).
# XXXX I don't understand what happens when you get 0x90 times the same byte on
# input. The resulting code (xx 90 90) would appear to be interpreted as an
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
#
import sys
import os
import struct
import binascii
__all__ = ["binhex","hexbin","Error"]
class Error(Exception):
pass
# States (what have we written)
[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
# Various constants
REASONABLY_LARGE=32768 # Minimal amount we pass the rle-coder
LINELEN=64
RUNCHAR=chr(0x90) # run-length introducer
#
# This code is no longer byte-order dependent
#
# Workarounds for non-mac machines.
try:
from Carbon.File import FSSpec, FInfo
from MacOS import openrf
def getfileinfo(name):
finfo = FSSpec(name).FSpGetFInfo()
dir, file = os.path.split(name)
# XXX Get resource/data sizes
fp = open(name, 'rb')
fp.seek(0, 2)
dlen = fp.tell()
fp = openrf(name, '*rb')
fp.seek(0, 2)
rlen = fp.tell()
return file, finfo, dlen, rlen
def openrsrc(name, *mode):
if not mode:
mode = '*rb'
else:
mode = '*' + mode[0]
return openrf(name, mode)
except ImportError:
#
# Glue code for non-macintosh usage
#
class FInfo:
def __init__(self):
self.Type = '????'
self.Creator = '????'
self.Flags = 0
def getfileinfo(name):
finfo = FInfo()
# Quick check for textfile
fp = open(name)
data = open(name).read(256)
for c in data:
if not c.isspace() and (c<' ' or ord(c) > 0x7f):
break
else:
finfo.Type = 'TEXT'
fp.seek(0, 2)
dsize = fp.tell()
fp.close()
dir, file = os.path.split(name)
file = file.replace(':', '-', 1)
return file, finfo, dsize, 0
class openrsrc:
def __init__(self, *args):
pass
def read(self, *args):
return ''
def write(self, *args):
pass
def close(self):
pass
class _Hqxcoderengine:
"""Write data to the coder in 3-byte chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
self.hqxdata = ''
self.linelen = LINELEN-1
def write(self, data):
self.data = self.data + data
datalen = len(self.data)
todo = (datalen//3)*3
data = self.data[:todo]
self.data = self.data[todo:]
if not data:
return
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
self._flush(0)
def _flush(self, force):
first = 0
while first <= len(self.hqxdata)-self.linelen:
last = first + self.linelen
self.ofp.write(self.hqxdata[first:last]+'\n')
self.linelen = LINELEN
first = last
self.hqxdata = self.hqxdata[first:]
if force:
self.ofp.write(self.hqxdata + ':\n')
def close(self):
if self.data:
self.hqxdata = \
self.hqxdata + binascii.b2a_hqx(self.data)
self._flush(1)
self.ofp.close()
del self.ofp
class _Rlecoderengine:
"""Write data to the RLE-coder in suitably large chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
def write(self, data):
self.data = self.data + data
if len(self.data) < REASONABLY_LARGE:
return
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.data = ''
def close(self):
if self.data:
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.ofp.close()
del self.ofp
class BinHex:
def __init__(self, name_finfo_dlen_rlen, ofp):
name, finfo, dlen, rlen = name_finfo_dlen_rlen
if type(ofp) == type(''):
ofname = ofp
ofp = open(ofname, 'w')
ofp.write('(This file must be converted with BinHex 4.0)\n\n:')
hqxer = _Hqxcoderengine(ofp)
self.ofp = _Rlecoderengine(hqxer)
self.crc = 0
if finfo is None:
finfo = FInfo()
self.dlen = dlen
self.rlen = rlen
self._writeinfo(name, finfo)
self.state = _DID_HEADER
def _writeinfo(self, name, finfo):
nl = len(name)
if nl > 63:
raise Error, 'Filename too long'
d = chr(nl) + name + '\0'
d2 = finfo.Type + finfo.Creator
# Force all structs to be packed with big-endian
d3 = struct.pack('>h', finfo.Flags)
d4 = struct.pack('>ii', self.dlen, self.rlen)
info = d + d2 + d3 + d4
self._write(info)
self._writecrc()
def _write(self, data):
self.crc = binascii.crc_hqx(data, self.crc)
self.ofp.write(data)
def _writecrc(self):
# XXXX Should this be here??
# self.crc = binascii.crc_hqx('\0\0', self.crc)
if self.crc < 0:
fmt = '>h'
else:
fmt = '>H'
self.ofp.write(struct.pack(fmt, self.crc))
self.crc = 0
def write(self, data):
if self.state != _DID_HEADER:
raise Error, 'Writing data at the wrong time'
self.dlen = self.dlen - len(data)
self._write(data)
def close_data(self):
if self.dlen != 0:
raise Error, 'Incorrect data size, diff=%r' % (self.rlen,)
self._writecrc()
self.state = _DID_DATA
def write_rsrc(self, data):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Writing resource data at the wrong time'
self.rlen = self.rlen - len(data)
self._write(data)
def close(self):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Close at the wrong time'
if self.rlen != 0:
raise Error, \
"Incorrect resource-datasize, diff=%r" % (self.rlen,)
self._writecrc()
self.ofp.close()
self.state = None
del self.ofp
def binhex(inp, out):
"""(infilename, outfilename) - Create binhex-encoded copy of a file"""
finfo = getfileinfo(inp)
ofp = BinHex(finfo, out)
ifp = open(inp, 'rb')
# XXXX Do textfile translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close_data()
ifp.close()
ifp = openrsrc(inp, 'rb')
while 1:
d = ifp.read(128000)
if not d: break
ofp.write_rsrc(d)
ofp.close()
ifp.close()
class _Hqxdecoderengine:
"""Read data via the decoder in 4-byte chunks"""
def __init__(self, ifp):
self.ifp = ifp
self.eof = 0
def read(self, totalwtd):
"""Read at least wtd bytes (or until EOF)"""
decdata = ''
wtd = totalwtd
#
# The loop here is convoluted, since we don't really now how
# much to decode: there may be newlines in the incoming data.
while wtd > 0:
if self.eof: return decdata
wtd = ((wtd+2)//3)*4
data = self.ifp.read(wtd)
#
# Next problem: there may not be a complete number of
# bytes in what we pass to a2b. Solve by yet another
# loop.
#
while 1:
try:
decdatacur, self.eof = \
binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if not newdata:
raise Error, \
'Premature EOF on binhex file'
data = data + newdata
decdata = decdata + decdatacur
wtd = totalwtd - len(decdata)
if not decdata and not self.eof:
raise Error, 'Premature EOF on binhex file'
return decdata
def close(self):
self.ifp.close()
class _Rledecoderengine:
"""Read data via the RLE-coder"""
def __init__(self, ifp):
self.ifp = ifp
self.pre_buffer = ''
self.post_buffer = ''
self.eof = 0
def read(self, wtd):
if wtd > len(self.post_buffer):
self._fill(wtd-len(self.post_buffer))
rv = self.post_buffer[:wtd]
self.post_buffer = self.post_buffer[wtd:]
return rv
def _fill(self, wtd):
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd+4)
if self.ifp.eof:
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer)
self.pre_buffer = ''
return
#
# Obfuscated code ahead. We have to take care that we don't
# end up with an orphaned RUNCHAR later on. So, we keep a couple
# of bytes in the buffer, depending on what the end of
# the buffer looks like:
# '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
# '?\220' - Keep 2 bytes: repeated something-else
# '\220\0' - Escaped \220: Keep 2 bytes.
# '?\220?' - Complete repeat sequence: decode all
# otherwise: keep 1 byte.
#
mark = len(self.pre_buffer)
if self.pre_buffer[-3:] == RUNCHAR + '\0' + RUNCHAR:
mark = mark - 3
elif self.pre_buffer[-1] == RUNCHAR:
mark = mark - 2
elif self.pre_buffer[-2:] == RUNCHAR + '\0':
mark = mark - 2
elif self.pre_buffer[-2] == RUNCHAR:
pass # Decode all
else:
mark = mark - 1
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer[:mark])
self.pre_buffer = self.pre_buffer[mark:]
def close(self):
self.ifp.close()
class HexBin:
def __init__(self, ifp):
if type(ifp) == type(''):
ifp = open(ifp)
#
# Find initial colon.
#
while 1:
ch = ifp.read(1)
if not ch:
raise Error, "No binhex data found"
# Cater for \r\n terminated lines (which show up as \n\r, hence
# all lines start with \r)
if ch == '\r':
continue
if ch == ':':
break
if ch != '\n':
dummy = ifp.readline()
hqxifp = _Hqxdecoderengine(ifp)
self.ifp = _Rledecoderengine(hqxifp)
self.crc = 0
self._readheader()
def _read(self, len):
data = self.ifp.read(len)
self.crc = binascii.crc_hqx(data, self.crc)
return data
def _checkcrc(self):
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
#self.crc = binascii.crc_hqx('\0\0', self.crc)
# XXXX Is this needed??
self.crc = self.crc & 0xffff
if filecrc != self.crc:
raise Error, 'CRC error, computed %x, read %x' \
%(self.crc, filecrc)
self.crc = 0
def _readheader(self):
len = self._read(1)
fname = self._read(ord(len))
rest = self._read(1+4+4+2+4+4)
self._checkcrc()
type = rest[1:5]
creator = rest[5:9]
flags = struct.unpack('>h', rest[9:11])[0]
self.dlen = struct.unpack('>l', rest[11:15])[0]
self.rlen = struct.unpack('>l', rest[15:19])[0]
self.FName = fname
self.FInfo = FInfo()
self.FInfo.Creator = creator
self.FInfo.Type = type
self.FInfo.Flags = flags
self.state = _DID_HEADER
def read(self, *n):
if self.state != _DID_HEADER:
raise Error, 'Read data at wrong time'
if n:
n = n[0]
n = min(n, self.dlen)
else:
n = self.dlen
rv = ''
while len(rv) < n:
rv = rv + self._read(n-len(rv))
self.dlen = self.dlen - n
return rv
def close_data(self):
if self.state != _DID_HEADER:
raise Error, 'close_data at wrong time'
if self.dlen:
dummy = self._read(self.dlen)
self._checkcrc()
self.state = _DID_DATA
def read_rsrc(self, *n):
if self.state == _DID_HEADER:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Read resource data at wrong time'
if n:
n = n[0]
n = min(n, self.rlen)
else:
n = self.rlen
self.rlen = self.rlen - n
return self._read(n)
def close(self):
if self.rlen:
dummy = self.read_rsrc(self.rlen)
self._checkcrc()
self.state = _DID_RSRC
self.ifp.close()
def hexbin(inp, out):
"""(infilename, outfilename) - Decode binhexed file"""
ifp = HexBin(inp)
finfo = ifp.FInfo
if not out:
out = ifp.FName
ofp = open(out, 'wb')
# XXXX Do translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close_data()
d = ifp.read_rsrc(128000)
if d:
ofp = openrsrc(out, 'wb')
ofp.write(d)
while 1:
d = ifp.read_rsrc(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close()
def _test():
fname = sys.argv[1]
binhex(fname, fname+'.hqx')
hexbin(fname+'.hqx', fname+'.viahqx')
#hexbin(fname, fname+'.unpacked')
sys.exit(1)
if __name__ == '__main__':
_test()
|
mit
|
mcanthony/nupic
|
src/nupic/regions/ImageSensor.py
|
17
|
128455
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file defines ImageSensor, an extensible sensor for images.
"""
import os
import re
import shutil
import inspect
import cPickle as pickle
import copy
from base64 import b64encode, b64decode
from unicodedata import normalize
from PyRegion import PyRegion
import numpy
from PIL import (Image,
ImageChops,
ImageDraw)
from nupic.bindings.math import GetNTAReal
RealNumpyDType = GetNTAReal()
from nupic.image import (serializeImage,
deserializeImage,
imageExtensions)
def containsConvolutionPostFilter(postFilters):
"""Determine if the post filters contain a convolution filter"""
for p in postFilters:
if p[0].endswith('Convolution'):
return True
return False
class ImageSensor(PyRegion):
"""
ImageSensor is an extensible sensor for grayscale and black and white images.
It uses 'filter' and 'explorer' plugins to do advanced image processing and
training.
It loads images either from files on disk or directly from the Numenta tools.
There are several commands for loading images:
- loadSingleImage, for loading a single image file from disk
- loadMultipleImages, for loading multiple image files from disk
- loadSerializedImage, for receiving a serialized image directly
The loadSingleImage and loadMultipleImage commands don't actually load images
into memory until the images are needed. Furthermore, the filters (see below)
are not run until needed. This keeps ImageSensor's memory usage low, making
it possible to use large datasets and run many filters.
There is also a 'memoryLimit' parameter, which caps the total amount of
memory to be used for storing images. ImageSensor will automatically unload
images and filter outputs as necessary to stay within the limit.
ImageSensor does not necessarily present each image to the bottom nodes of
the network once; rather, the explorer plugin dictates the movement of the
sensor across the image. Typically, the sensor will sweep over each image for
many iterations, in order to help the network generate invariance.
The filter plugins are located in regions/ImageSensorFilters. Bundled filters
include scaling, contrast normalization, and Gabor filters. To make a new
filter, subclass BaseFilter (using the other filters as examples), and drop
the new filter in the ImageSensorFilters directory.
The explorer plugins, located in regions/ImageSensorExplorers, control how
the sensor moves through the set of input images, their filtered versions,
and possible locations of the sensor window on the image. You may create new
explorers by subclassing BaseExplorer and putting them in the
ImageSensorExplorers directory. The documentation for the BaseExplorer class
contains detailed information on explorers.
Only the 'width', 'height', and 'depth' parameters need to be set when the
sensor is first constructed, though other parameters may be set at that time.
Some ImageSensor parameters may need to be changed for each level of
level-by-level training. For example, Pictures trains a single node at each
level and then clones the trained node to all other nodes at the same level.
Only some bottom-level nodes are enabled, except when training the top node.
Thus, the Pictures example changes ImageSensor's enabledWidth and
enabledHeight parameters with each level of training. In many cases, users
may wish to change the explorer for each level of training.
Some explorers can calculate ahead of time how many iterations will be
necessary to explorer all the images, though explorers that move randomly
cannot. If you are using one of the deterministic explorers, such as
ExhaustiveSweep, you can use ImageSensor's numIterations parameter to get the
total number of iterations for the loaded images. Changing the explorer or
images will change the number of iterations, so make sure to check the
parameter right before running the network.
The simplest explorer, 'Flash', presents each image once without any
sweeping. If you have trained a network with a different explorer and wish to
perform "flash inference" for testing, just set the explorer to 'Flash'.
All of ImageSensor's public commands and parameters are documented through
its NodeSpec. You can view the NodeSpec with the following Python commands:
from nupic.network import nodeHelp
nodeHelp("py.ImageSensor")
"""
def _init(self, width=1, height=1, depth=1, mode='gray',
blankWithReset=False, background=255, invertOutput=False,
filters=[], postFilters=[], explorer="Flash",
categoryOutputFile="", logText=False, logOutputImages=False,
logOriginalImages=False, logFilteredImages=False,
logLocationImages=False, logLocationOnOriginalImage=False,
logBoundingBox=False, logDir="imagesensor_log",
automaskingTolerance=0, automaskingPadding=0, memoryLimit=100,
minimalBoundingBox=False, dataOut=None, categoryOut=None,
partitionOut=None, resetOut=None, bboxOut=None, alphaOut=None,
useAux=False, auxDataOut=None, auxDataWidth=None, **keywds):
"""
width -- Width of the sensor's output to the network (pixels).
height -- Height of the sensor's output to the network (pixels).
depth -- Optional parameter used to send multiple versions of an image
out at the same time.
mode -- Current options are 'gray' (8-bit grayscale) and 'bw' (1-bit
black and white).
blankWithReset -- ** DEPRECATED ** Whether to send a blank output every
time the explorer generates a reset signal (such as when beginning
a new sweep). Turning on blanks increases the number of iterations.
background -- Pixel value of background, used for padding a cropped
image, and also for finding the bounding box in the absence of a mask.
invertOutput -- Inverts the output of the node (e.g. white pixels
become black).
filters -- List of filters to apply to each image. Each element in
the list should be either a string (just the filter name) or a list
containing both the filter name and a dictionary specifying its
arguments.
explorer -- Explorer (used to move the sensor through the input
space). Specify as a string (just the explorer name) or a list
containing both the explorer name and a dictionary specifying its
arguments.
categoryOutputFile -- Name of file to which to write category number
on each compute (useful for analyzing network accuracy after inference).
logText -- Toggle for verbose logging to imagesensor_log.txt.
logOutputImages -- Toggle for writing each output to disk (as an image)
on each iteration.
logOriginalImages -- Toggle for writing the original, unfiltered version
of the current image to disk on each iteration.
logFilteredImages -- Toggle for writing the intermediate versions of
images to disk as they pass through the filter chain.
logLocationImages -- Toggle for writing an image to disk on each
iteration which shows the location of the sensor window.
logLocationOnOriginalImage -- Whether to overlay the location rectangle
on the original image instead of the filtered image. Does not work if
the two images do not have the same size, and may be nonsensical
even if they do (for example, if a filter moved the object within the
image).
logBoundingBox -- Toggle for writing a log containing the bounding
box information for each output image.
automaskingTolerance -- Affects the process by which bounding box masks
are automatically generated from images based on similarity to the
specified 'background' pixel value. The bounding box will enclose all
pixels in the image that differ from 'background' by more than
the value specified in 'automaskingTolerance'. Default is 0, which
generates bounding boxes that enclose all pixels that differ at all
from the background. In general, increasing the value of
'automaskingTolerance' will produce tighter (smaller) bounding box masks.
automaskingPadding -- Affects the process by which bounding box masks
are automatically generated from images. After computing the
bounding box based on image similarity with respect to the background,
the box will be expanded by 'automaskPadding' pixels in all four
directions (constrained by the original size of the image.)
memoryLimit -- Maximum amount of memory that ImageSensor should use
for storing images, in megabytes. ImageSensor will unload images and
filter outputs to stay beneath this ceiling. Set to -1 for no limit.
minimalBoundingBox -- Whether the bounding box found by looking at the
image background should be set even if it touches one of the sides of
the image. Set to False to avoid chopping edges off certain images, or
True if that is not an issue and you wish to use a sweeping explorer.
dataOut -- The output element count of the 'dataOut' output.
categoryOut -- The output element count of the 'categoryOut' output (NuPIC 1 only).
resetOut -- The output element count of the 'resetOut' output (NuPIC 1 only).
bboxOut -- The output element count of the 'bboxOut' output (NuPIC 1 only).
alphaOut -- The output element count of the 'alphaOut' output (NuPIC 1 only)
auxDataWidth -- The output element count of the 'auxData' output (NuPIC2 only).
"""
PyRegion.__init__(self, **keywds)
# Validate the output element counts
if dataOut:
if hasattr(dataOut, "__iter__"):
if ([1] * (3 - len(dataOut)) + list(dataOut)) == [depth, height, width]:
pass
elif dataOut == (depth * height * width):
pass
else:
if not containsConvolutionPostFilter(postFilters):
raise RuntimeError("The 'dataOut' output element count must be equal"
" to depth * height * width.")
# In NuPIC 2, these are all None
if categoryOut and categoryOut != 1:
raise RuntimeError("The 'categoryOut' output element count must be 1.")
if partitionOut and partitionOut != 1:
raise RuntimeError("The 'partitionOut' output element count must be 1.")
if resetOut and resetOut != 1:
raise RuntimeError("The 'resetOut' output element count must be 1.")
if bboxOut and bboxOut != 4:
raise RuntimeError("The 'bboxOut' output element count must be 4.")
if alphaOut and alphaOut != width * height:
raise RuntimeError("The 'alphaOut' output element count must be equal "
"to width * height")
self.useAux = useAux
self.width = width
self.height = height
self.depth = depth
self.mode = mode
self.blankWithReset = blankWithReset
self.background = background
self.automaskingTolerance = automaskingTolerance
self.automaskingPadding = automaskingPadding
if self.mode == 'bw' and self.background != 0:
self.background = 255
self.invertOutput = invertOutput
self.categoryOutputFile = categoryOutputFile
self.logFile = None
self.bboxLogFile = None
self.logText = logText
self.logOutputImages = logOutputImages
self.logOriginalImages = logOriginalImages
self.logFilteredImages = logFilteredImages
self.logLocationImages = logLocationImages
self.logLocationOnOriginalImage = logLocationOnOriginalImage
self.logBoundingBox = logBoundingBox
self.logDir = logDir
self.memoryLimit = memoryLimit
self.minimalBoundingBox = minimalBoundingBox
self.enabledWidth = self.width
self.enabledHeight = self.height
# The imageList data structure contains all the information about all the
# images which have been loaded via and of the load* methods. Some images
# may not be in memory, but their metadata is always kept in imageList.
# imageList[imageIndex] returns all the information about the image with
# the specified index, in a dictionary. The keys in the dictionary are:
# 'image': The unfiltered image.
# 'imagePath': The path from which the image was loaded.
# 'maskPath': The path from which the mask was loaded.
# 'categoryName': The name of the image's category.
# 'categoryIndex': The index of the image's category.
# 'filtered': A dictionary of filtered images created from this image.
# In general, images are only loaded once they are needed. But if an image
# is loaded via loadSerializedImage, then its entry in imageList has an
# 'image' value but no 'imagePath' value. Thus, it will never be deleted
# from memory because it cannot be recovered. All other images are fair
# game.
# The 'filtered' dictionary requires more explanation. Each key in the
# dictionary is a tuple specifying the positions of the filters that
# generated the image. (Filters can generate multiple outputs, so an
# image that comes out of the filter pipeline must be referenced by its
# position in the outputs of each filter in the pipeline). The dictionary
# also contains images that have been run through only part of the filter
# pipeline, which are kept around for us as inputs for the remaining
# filters.
# Here is an example with 3 filters in the pipeline:
# 0: A Resize filter that generates 3 outputs (small, medium, large)
# 1: An EqualizeHistogram filter that generates 1 output
# 2: A Rotation2D filter that generates 5 outputs (5 rotation angles)
# A typical key for an image would be (0, 0, 2), specifying the smallest
# scale from the Resize filter (0), the only output from the
# EqualizeHistogram filter (0), and the middle rotation angle (2).
# Another valid key would be (1), specifying an image that has gone through
# the Resize filter (the middle scale), but which has not been through
# the other filters yet. This image would neven be shown to the network,
# but it would be used by ImageSensor to compute other images.
# The _getFilteredImages method is the only method which directly accesses
# the filtered images in imageList. Filtering is only done on-demand.
# If _getFilteredImages is called and the requested images have not yet
# been created, _applyFilter is called to run each filter, and the
# resulting images are stored in imageList for later use. They may be
# deleted due to the memoryLimit parameter, in which case they will be
# recreated later if necessary.
self._imageList = []
self.categoryInfo = [] # (categoryName, canonicalImage) for each category
self._imageQueue = [] # Queue of image indices for managing memory
self._filterQueue = [] # Queue of filter outputs for mananging memory
self._pixelCount = 0 # Count of total loaded pixels for mananging memory
self.outputImage = None # Copy of the last image sent to the network
self.locationImage = None # Copy of the location image for the last output
self.prevPosition = None # Position used for the last compute iteration
self._categoryOutputFile = None # To write the category on each iteration
self._iteration = 0 # Internal iteration counter
self.explorer = None
self._setFilters(filters)
self._setPostFilters(postFilters)
self._setExplorer(explorer)
self._holdForOffset = 0
self._cubeOutputs = not containsConvolutionPostFilter(postFilters)
self._auxDataWidth = auxDataWidth
def __init__(self, *args, **kw):
self._init(*args, **kw)
def loadSingleImage(self, imagePath, maskPath=None, categoryName=None,
clearImageList=True, skipExplorerUpdate=False, auxPath=None, userAuxData=None,
sequenceIndex=None, frameIndex=None):
"""
Add the specified image to the list of images.
Images are not loaded into memory until they are needed.
imagePath -- Path to the image to load.
auxPath -- Path to the auxiliary data for the image.
maskPath -- Path to the mask to load with this image.
categoryName -- Name of the category of this image.
clearImageList -- If True, all loaded images are removed before this
image is loaded. If False, this image is appended to the list of
images.
sequenceIndex -- Unique sequence index.
frameIndex -- The frame number within the sequence.
"""
if categoryName is not None and type(categoryName) is not str:
categoryName = str(categoryName)
if clearImageList:
self.clearImageList(skipExplorerUpdate=True)
if userAuxData is not None:
manualAux = True
else:
manualAux = False
self._addImage(imagePath=imagePath, maskPath=maskPath,
categoryName=categoryName, auxPath=auxPath, manualAux = manualAux,
userAuxData=userAuxData, sequenceIndex=sequenceIndex, frameIndex=frameIndex)
if not skipExplorerUpdate:
self.explorer[2].update(numImages=len(self._imageList))
self._logCommand([('index', len(self._imageList)-1)])
if clearImageList:
self.explorer[2].first()
return self.getParameter('numImages'), self.getParameter('numMasks')
def loadSpecificImages(self, imagePaths, categoryNames=None,
clearImageList=True):
"""
Add multiple images to the list of images.
See the loadMultipleImages to load images which have been organized by
category on disk.
This command is equivalent to calling loadSingleImage repeatedly, but it
is faster because it avoids updating the explorer between each image, and
because it only involves one call to the runtime engine.
imagePaths -- List with the path of each image.
categoryNames -- Category name for each image (or can be a single string
with the category name that should be applied to all images).
clearImageList -- If True, all loaded images are removed before this
image is loaded. If False, this image is appended to the list of
images.
"""
if categoryNames is not None and isinstance(categoryNames, basestring):
categoryNames = [categoryNames] * len(imagePaths)
if clearImageList:
self.clearImageList(skipExplorerUpdate=True)
for i in xrange(len(imagePaths)):
if categoryNames is not None:
categoryName = categoryNames[i]
else:
categoryName = None
self.loadSingleImage(imagePath=imagePaths[i],
categoryName=categoryName,
clearImageList=False,
skipExplorerUpdate=True)
self.explorer[2].update(numImages=len(self._imageList))
return self.getParameter('numImages'), self.getParameter('numMasks')
def _walk(self, top):
"""
Directory tree generator lifted from python 2.6 and then
stripped down. It improves on the 2.5 os.walk() by adding
the 'followlinks' capability.
"""
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = os.listdir(top)
except OSError, e:
raise RuntimeError("Unable to get a list of files due to an OS error.\nDirectory: "+top+"\nThis may be due to an issue with Snow Leopard.")
#raise
except:
return
dirs, nondirs = [], []
for name in names:
if os.path.isdir(os.path.join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
yield top, dirs, nondirs
for name in dirs:
path = os.path.join(top, name)
for x in self._walk(path):
yield x
def loadMultipleImages(self, imagePath, extension=None, maskPath=None,
first=None, last=None, subsample=1, clearImageList=True,
strictMaskLocations=True, categoryNameFilter=None, pattern=None,
skipInterval=None, skipOffset=None, useCategories=True, auxPath=None,
auxType=None):
"""
Add images from multiple categories to the list of images.
Images are not loaded into memory until they are needed.
imagePath -- Path from which to load the images (see note below).
auxPath -- Path from which to load the auxiliary data for each image.
auxType -- Type of auxiliary data: the file extension.
extension -- Extension of images files to accept (or None to accept all
valid images).
maskPath -- Path from which to load masks that correspond to the loaded
images (see note below).
first -- Index of the first image in each category to load. If
first == 1, ImageSensor skips the first image and loads the rest.
last -- Index of the last image in each category to load. If
last == 1, ImageSensor loads the first two images.
subsample -- ImageSensor loads 1/subsample of the images in each
category. If subsample == 3, loads the first image, the fourth, the
seventh, and so on.
clearImageList -- If True, ImageSensor removes all loaded images when
loading these new images. If False, the images loaded by this
method will be appended to the existing list of images.
strictMaskLocations -- If True, ImageSensor will only load masks whose
path (in the mask directory) exactly parallels the path to the image
(in the image directory). If False, ImageSensor will attempt to find
the masks even if there aren't category subdirectories, or all the
mask files are in the root mask directory, etc.
categoryNameFilter -- String or list of strings that will be matched
against category names. Only categories that match one of the strings
will be processed. Each string can be a regular expression.
pattern -- Regular expression for filtering images. Only images which
match the regular expression (via re.search()) will be accepted.
The path provided to pattern is the absolute path to the image file.
skipInterval -- The inverse of 'subsample'; this parameter directs
ImageSensor to skip every Nth image that it would otherwise load.
For example, if 'skipInterval' is 2, then ImageSensor will load
only every other image. The default is None, which directs
ImageSensor to skip no images. Note that a 'skipInterval' of 1
implies to skip every image, which is nonsensical; therefore,
non-None values of 'skipInterval' which are less than 2 cause
RuntimeErrors to be raised.
skipOffset -- Operates in conjunction with 'skipInterval'. Specifies
an offset to use for the purpose of skipping. For example, if 'skipInterval'
was 10 (skip every 10th image) and 'skipOffset' was 0 (or None), then
the first 9 images would be loaded, the 10th would be skipped, etc.
But if 'skipOffset' were 2, then the first 7 images would be loaded,
the 8th skipped, the next 9 loaded, the 17th skipped, etc. Defaults
to None (equivalent to zero.) If both 'skipOffset' and 'skipInterval'
are non-None, then 'skipOffset' must be non-negative and less than
'skipInterval'.
useCategories -- True for normal behavior, or False to load any image found
in imagePath, without looking for nested directory folders.
Returns a tuple containing the number of images loaded and the number of
masks loaded.
This method expects a directory structure like the following:
imagePath/categoryName1/image01.ext
image02.ext
categoryName2/image01.ext
image02.ext
Optionally, images can be nested arbitrarily deep. For instance:
imagePath/categoryName1/objectName1/image01.ext
image02.ext
objectName2/image01.ext
image02.ext
categoryName2/objectName1/image01.ext
A depth-first search is performed to find images.
Directories and images are processed in sorted order.
The nested directory structure with category names is necessary, but the
names of the image files are unimportant.
"""
if first is not None and type(first) != int:
raise RuntimeError("'first' must be None or a nonnegative integer")
if last is not None and type(last) != int:
raise RuntimeError("'last' must be None or a nonnegative integer")
if subsample is not None and type(subsample) != int:
raise RuntimeError("'subsample' must be None or a positive integer")
if skipInterval is not None and (type(skipInterval) != int
or skipInterval < 2):
raise RuntimeError("'skipInterval' must be None or an integer >= 2")
if skipOffset is not None and skipInterval is not None and \
(type(skipOffset) != int or skipOffset < 0
or skipOffset >= skipInterval):
raise RuntimeError("'skipOffset' must be None or a non-negative integer "
"< 'skipInterval'")
self._logCommand()
filterLogDir = os.path.join(self.logDir, 'output_from_filters')
if self.logFilteredImages:
if clearImageList and os.path.exists(filterLogDir):
shutil.rmtree(filterLogDir)
if self.filters and not os.path.exists(filterLogDir):
os.makedirs(filterLogDir)
if clearImageList:
self.clearImageList(skipExplorerUpdate=True)
if extension:
# Only look for the extension specified by the user
if not extension.startswith('.'):
extension = '.' + extension
extensions = [extension]
else:
extensions = imageExtensions
# NTA_DATA_DIR may be set in the autotest environment
if "NTA_DATA_DIR" in os.environ and not os.path.abspath(imagePath):
imagePath = os.path.join(os.environ["NTA_DATA_DIR"], imagePath)
print "ImageSensor: looking for data in NTA_DATA_DIR=%s" % os.environ["NTA_DATA_DIR"]
imagePath = os.path.abspath(imagePath)
if auxPath is not None:
if type(auxPath) is not list:
auxPath = [auxPath]
for k in range(0, len(auxPath)):
auxPath[k] = os.path.abspath(auxPath[k])
if maskPath:
maskPath = os.path.abspath(maskPath)
if not os.path.exists(maskPath):
maskPath = None
# Convert 'first', 'last', and 'subsample' to proper Python names/format
start = first
stop = None
if last is not None:
stop = last + 1
step = subsample
# Handle skipping images that would otherwise be loaded
if skipOffset is None:
skipOffset = 0
images = []
categoryList = [None]
if useCategories:
# Assume each directory in imagePath is its own category
categoryList = [c for c in sorted(os.listdir(imagePath))
if c[0] != '.' and
os.path.isdir(os.path.join(imagePath, c))]
if categoryList:
# Filter categories if specified
if categoryNameFilter:
# Need to convert to NFC and re-encode to UTF-8 or else paths may not
# match the category filter
categoryList = [normalize('NFC', unicode(c, 'utf8')).encode('utf8')
for c in categoryList]
if isinstance(categoryNameFilter, basestring):
categoryNameFilter = [categoryNameFilter]
else:
categoryNameFilter = list(categoryNameFilter)
# With a large number of categories, the regular expression
# match can be very expensive. Determine whether there are
# any regular expressions in the filter list. If there are,
# then do the full regex match, otherwise just compare strings
# directly.
hasRegex = False
# use a regex to see if it has any regexes
isTextRegex = re.compile("[a-zA-Z_]+")
hasRegex = False in [isTextRegex.match(r) is not None for r in categoryNameFilter]
if not hasRegex:
categoryList = [c for c in categoryList if c in categoryNameFilter]
else:
for i, r in enumerate(categoryNameFilter):
if r[-1] != '$':
categoryNameFilter[i] += '$'
matchers = [re.compile(r) for r in categoryNameFilter]
categoryList = [c for c in categoryList if True in
[r.match(c) is not None for r in matchers]]
for category in categoryList:
skipCounter = skipOffset
# Call loadSingleImage on every image with the correct extension at any
# depth, using a depth first search
categoryFilenames = []
if category:
walkPath = os.path.join(imagePath, category)
else:
walkPath = imagePath
category = os.path.split(imagePath)[1]
#if float(".".join([str(x) for x in sys.version_info[:2]])) >= 2.6:
# w = os.walk(walkPath, followlinks=True)
#else:
# w = os.walk(walkPath)
w = self._walk(walkPath)
while True:
try:
dirpath, dirnames, filenames = w.next()
except StopIteration:
break
# Don't enter directories that begin with '.'
for d in dirnames[:]:
if d.startswith('.'):
dirnames.remove(d)
dirnames.sort()
# Ignore files that begin with '.'
filenames = [f for f in filenames if not f.startswith('.')]
# Only load images with the right extension
filenames = [f for f in filenames
if os.path.splitext(f)[1].lower() in extensions]
if pattern:
# Filter images with regular expression
filenames = [f for f in filenames
if re.search(pattern, os.path.join(dirpath, f))]
filenames.sort()
imageFilenames = [os.path.join(dirpath, f) for f in filenames]
# Get the corresponding path to the masks
if maskPath:
maskdirpath = os.path.join(maskPath, dirpath[len(imagePath)+1:])
maskFilenames = [os.path.join(maskdirpath, f) for f in filenames]
if strictMaskLocations:
# Only allow masks with parallel filenames
for i, filename in enumerate(maskFilenames):
if not os.path.exists(filename):
maskFilenames[i] = None
else:
# Find the masks even if the path does not match exactly
for i, filename in enumerate(maskFilenames):
while True:
if os.path.exists(filename):
maskFilenames[i] = filename
break
if os.path.split(filename)[0] == maskPath:
# Failed to find the mask
maskFilenames[i] = None
break
# Try to find the mask by eliminating subdirectories
body, tail = os.path.split(filename)
head, body = os.path.split(body)
while not os.path.exists(head):
tail = os.path.join(body, tail)
head, body = os.path.split(head)
filename = os.path.join(head, tail)
else:
maskFilenames = [None for f in filenames]
# Add our new images and masks to the list for this category
categoryFilenames.extend(zip(imageFilenames, maskFilenames))
# We have the full list of filenames for this category
for f in categoryFilenames[start:stop:step]:
skipCounter += 1
if not skipInterval or skipCounter % skipInterval:
images.append((f[0], f[1], category))
# Load all images and masks
if not hasattr(auxType,'__iter__'):
auxType = [auxType]
if not hasattr(auxPath,'__iter__'):
auxPath = [auxPath]
sequenceInfo = self._computeSequenceInfo(images)
for i in xrange(len(images)):
# Generate the auxiliary data path
imageName = images[i][0].split(imagePath)
if auxPath[0] is not None and len(auxPath)>=1:
currentAuxPath = []
for k in range(0, len(auxPath)):
currentAuxPath.append("".join([auxPath[k],imageName[1]+auxType[k]]))
else:
currentAuxPath = None
self.loadSingleImage(imagePath=images[i][0], maskPath=images[i][1],
categoryName=images[i][2], clearImageList=False,
skipExplorerUpdate=True, auxPath=currentAuxPath,
sequenceIndex=sequenceInfo[i][0], frameIndex=sequenceInfo[i][1])
self.explorer[2].update(numImages=len(self._imageList), sequenceCount=sequenceInfo[-1][0], frameCount=len(self._imageList))
return self.getParameter('numImages'), self.getParameter('numMasks')
def _computeSequenceInfo(self, images):
"""
Generates the set of sequence IDs and frameIndexs
for the images in the dataset.
"""
sequenceInfo = []
seqAlias = None
seqID = -1
frameIndex = -1
for image in images:
parentDir = os.path.split(os.path.split(image[0])[0])[1]
frameIndex += 1
if parentDir == image[2]:
seqID += 1
frameIndex = 0
seqAlias = None
elif parentDir != seqAlias:
seqID += 1
frameIndex = 0
seqAlias = parentDir
sequenceInfo.append((seqID, frameIndex))
return sequenceInfo
def loadSerializedImage(self, s, categoryName=None, clearImageList=True,
info=None, erode=None,
userAuxData=None, auxPath=None, manualAux=False):
"""
Receive a serialized image (as a string) and add to the list of images.
s -- Serialized version of the image.
categoryName -- Name of the category of the image.
clearImageList -- If True, all loaded images are removed before this
image is loaded. If False, this image is appended to the list of
images.
info -- an optional dict of attribute-value pairs to insert into the
image's .info field, after deserialization
erode -- Use this value for the erode flag (True or False) rather than
calculating it.
To serialize an image before passing it to this command, do the following:
from nupic.image import serializeImage
s = serializeImage(image)
"""
if clearImageList:
self.clearImageList(skipExplorerUpdate=True)
self._addImage(image=deserializeImage(s, info), categoryName=categoryName,
erode=erode, userAuxData=userAuxData, auxPath=auxPath,
manualAux=manualAux)
self.explorer[2].update(numImages=len(self._imageList))
if clearImageList:
self.explorer[2].first()
self._meetMemoryLimit()
return self.getParameter('numImages'), self.getParameter('numMasks')
def clearImageList(self, skipExplorerUpdate=False):
"""
Clear the list of images.
"""
self._imageList = []
self._imageQueue = []
self._filterQueue = []
self._pixelCount = 0
self.prevPosition = None
if not skipExplorerUpdate:
self.explorer[2].update(numImages=0)
def seek(self, iteration=None, image=None, filters=None, offset=None,
reset=None, sequenceIndex=None, frameIndex=None):
"""
Seek to the specified iteration, image, filter position, or offset.
iteration -- Iteration number.
image -- Image number.
filters -- Tuple specifying a position for each filter.
offset -- Tuple of integers specifying the offset as (x,y).
sequenceIndex -- The sequence to seek to.
frameIndex -- The frame within a sequence to seek to.
Examples:
seek(0) -- Reset to the first iteration.
seek(image=100, filters=(0,0,..,0)) -- Seek to image 100 and position
0 for each filter.
seek(offset=(0,0)) -- Seek to the original position of the current image.
The 'iteration' parameter cannot be combined with the other parameters.
"""
self._logCommand()
# Combine image, filters, and offset into position
position = None
if image is None and sequenceIndex is not None:
image = self.getIterationFromSequence(sequenceIndex, frameIndex)
if image is not None or filters is not None or offset is not None \
or reset is not None:
position = {'image': image, 'filters': filters, 'offset': offset,
'reset': reset}
# Validate inputs
if iteration is not None and position is not None:
raise RuntimeError("Cannot combine 'iteration' with other arguments")
if iteration is None and position is None:
raise RuntimeError("Must specify at least one argument")
if position is not None:
if position['offset'] and type(position['offset']) is tuple:
position['offset'] = list(position['offset'])
if position['image'] is not None:
if position['image'] < 0:
raise RuntimeError("'image' must be nonnegative")
if position['image'] >= len(self._imageList):
raise RuntimeError("'image' exceeds number of loaded images")
if position['filters'] is not None:
if type(position['filters']) != list:
raise RuntimeError("'filters' must be a list of nonnegative values")
if len(position['filters']) != len(self.filters):
raise RuntimeError("Length of 'filters' does not match numFilters")
# Account for holdFor as best we can. This won't be exact because it doesn't take into
# account the current position within the holdFor
if iteration is not None:
self._holdForOffset = iteration % self.explorer[2].holdFor
iteration //= self.explorer[2].holdFor
self.explorer[2].seek(iteration=iteration, position=position)
def getNumIterations(self, image=None):
"""
Calculate how many samples the explorer will provide.
image -- If None, returns the sum of the iterations for all the loaded
images. Otherwise, image should be an integer specifying the image for
which to calculate iterations.
"""
if image is not None and type(image) != int:
raise RuntimeError("'image' must be None or a nonnegative integer")
return self.explorer[2].getNumIterations(image) * self.explorer[2].holdFor
def getSequenceCount(self):
"""
Calculates how many sequences the sensor will provide.
"""
if self._imageList is None:
return -1
else:
return self._imageList[-1]['sequenceIndex']+1
def getFrameCount(self, sequenceIndex):
"""
Calculates the number of frames in a sequence.
"""
if sequenceIndex<0:
raise RuntimeError("'sequenceIndex' must be a non-negative integer.")
if sequenceIndex>self._imageList[-1]['sequenceIndex']:
raise RuntimeError("'sequenceIndex' out of range.")
if self._imageList is None:
return -1
elif sequenceIndex==self._imageList[-1]['sequenceIndex']:
return self._imageList[-1]['frameIndex']+1
else:
ID = 0
while sequenceIndex>=self._imageList[ID]['sequenceIndex']:
ID+=1
return self._imageList[ID-1]['frameIndex']+1
def getIterationRange(self, sequenceIndex=None):
"""
Returns the range of the iteration numbers for
the given sequence ID. If sequenceIndex is None, then
the total range of iterations is returned.
"""
if sequenceIndex is None:
return 0, len(self._imageList)
else:
startIteration = self.getIterationFromSequence(sequenceIndex)
stopIteration = self.getIterationFromSequence(sequenceIndex, self.getFrameCount(sequenceIndex)-1)
return startIteration, stopIteration
def getIterationFromSequence(self, sequenceIndex, frameIndex=0):
"""
Returns the iteration number for the given
frame in the sequence.
"""
if sequenceIndex<0:
raise RuntimeError("'sequenceIndex' must be a non-negative integer.")
if sequenceIndex>self._imageList[-1]['sequenceIndex']:
raise RuntimeError("'sequenceIndex' out of range.")
if self._imageList is None:
return -1
else:
ID = 0
while sequenceIndex>self._imageList[ID]['sequenceIndex']:
ID+=1
while frameIndex>self._imageList[ID]['frameIndex']:
ID+=1
if self._imageList[ID]['sequenceIndex'] != sequenceIndex:
raise RuntimeError("'frameIndex' out of range.")
return ID
def getSequenceFromIteration(self, iteration):
"""
Returns the sequence information given the
iteration number.
"""
if iteration < 0:
raise RuntimeError("'iteration' must be a non-negative integer.")
if iteration>len(self._imageList):
raise RuntimeError("'iteration' out of range.")
else:
return self._imageList[iteration]['sequenceIndex'], self._imageList[iteration]['frameIndex']
def saveImagesToFile(self, filename):
"""
Save imageList, categoryInfo, and filters to the specified file.
Loads all images and runs all filters first.
This method can be used to speed up image loading when expensive filters
are run. After loading images once and passing them through the filters,
use saveImagesToFile to dump the filtered versions to disk. On subsequent
runs with the same images and filters, call loadImagesFromFile to load
the filtered images and avoid rerunning the filters.
"""
# Load all images and run all filters
for i in xrange(len(self._imageList)):
self._applyAllFilters(i)
# Create serializable versions for pickling
sImageList = _serializeImageList(self._imageList)
filters = self.getParameter('filters')
sCategoryInfo = self.getParameter('categoryInfo')
# Pickle serializable objects to file
f = open(filename, 'wb')
pickle.dump((sImageList, filters, sCategoryInfo), f,
protocol=pickle.HIGHEST_PROTOCOL)
f.close()
def loadImagesFromFile(self, filename):
"""
Load from a file created with saveImagesToFile.
Loads imageList and categoryInfo. Also loads the filters used to create
the saved images, and overwrites ImageSensor.filters.
"""
f = open(filename, 'rb')
sImageList, filters, sCategoryInfo = pickle.load(f)
f.close()
self.setParameter('filters', -1, filters)
self._imageList = _deserializeImageList(sImageList)
self.explorer[2].update(numImages=len(self._imageList))
self.setParameter('categoryInfo', -1, sCategoryInfo)
return self.getParameter('numImages'), self.getParameter('numMasks')
def _addImage(self, image=None, imagePath=None, maskPath=None,
categoryName=None, erode=None, userAuxData=None, auxPath=None,
manualAux=False, sequenceIndex=None, frameIndex=None):
"""
Create a dictionary for an image and metadata and add to the imageList.
"""
item = {'image': image,
'imagePath': imagePath,
'auxData': userAuxData,
'auxPath': auxPath,
'manualAux': manualAux,
'maskPath': maskPath,
'erode': True,
'categoryName': categoryName,
'categoryIndex': None,
'partitionID': None,
'filtered': {},
'sequenceIndex': sequenceIndex,
'frameIndex': frameIndex}
self._imageList.append(item)
if erode is not None:
item['erode'] = erode
setErodeFlag = False
else:
setErodeFlag = True
# Look up category index from name
if item['categoryName'] is None:
# Unspecified category
item['categoryName'] = ""
item['categoryIndex'] = -1
else:
# Look up the category in categoryInfo
for i in xrange(len(self.categoryInfo)):
if self.categoryInfo[i][0] == item['categoryName']:
item['categoryIndex'] = i
break
if item['categoryIndex'] is None:
# This is the first image of this category (blank categories ignored)
item['categoryIndex'] = len(self.categoryInfo)
# Load the image in order to use it for categoryInfo
original = self._loadImage(len(self._imageList) - 1, returnOriginal=True,
setErodeFlag=setErodeFlag)
if not image:
self._imageQueue.insert(0, len(self._imageList) - 1)
# Append this category to categoryInfo
self.categoryInfo.append((item['categoryName'], original))
elif image:
# Image is already present, just prepare it
# Not necessary if it was already loaded for categoryInfo
self._loadImage(len(self._imageList) - 1, setErodeFlag=setErodeFlag)
def _loadImage(self, index, returnOriginal=False, setErodeFlag=True, userAuxData=None):
"""
Load an image that exists in the imageList but is not loaded into memory.
index -- Index of the image to load.
returnOriginal -- Whether to return an unmodified version of the image
for categoryInfo.
"""
item = self._imageList[index]
if not item['image']:
# Load the image from disk
f = open(item['imagePath'], 'rb')
item['image'] = Image.open(f)
item['image'].load()
f.close()
# Update the pixel count
self._pixelCount += item['image'].size[0] * item['image'].size[1]
# Extract auxiliary data
if item['manualAux'] is False:
if item['auxPath'] is not None:
if item['auxData'] is None:
# Load the auxiliary data from disk
auxPath = item['auxPath']
numAuxInput = len(auxPath)
for k in range(0,numAuxInput):
if item['auxData'] is None:
item['auxData'] = numpy.fromfile(item['auxPath'][k])
else:
item['auxData'] = numpy.concatenate([item['auxData'],numpy.fromfile(item['auxPath'][k])])
# Extract partition ID if it exists
partitionID = item['image'].info.get('partitionID')
if partitionID is None:
partitionID = -1
item['partitionID'] = int(partitionID)
# Convert to grayscale
if item['image'].mode not in ('L', 'LA'):
if 'A' in item['image'].getbands():
# Convert to grayscale but preserve alpha channel
item['image'] = item['image'].convert('LA')
else:
item['image'] = item['image'].convert('L')
if returnOriginal:
# Keep copy of original image
original = item['image'].copy()
bbox = None
if item['maskPath'] is not None:
# Load the mask image and add it to the image as the alpha channel
# If the image already has an alpha channel, it will be overwritten
f = open(item['maskPath'], 'rb')
mask = Image.open(f)
mask.load()
if mask.mode != 'L':
mask = mask.convert('L')
f.close()
item['image'].putalpha(mask)
elif item['image'].mode != 'LA':
diffImage = ImageChops.difference(item['image'],
ImageChops.constant(item['image'], self.background))
if self.automaskingTolerance:
diffImage = ImageChops.subtract(diffImage,
ImageChops.constant(item['image'],
self.automaskingTolerance))
bbox = diffImage.getbbox()
if not bbox:
bbox = (0, 0, item['image'].size[0], item['image'].size[1])
elif self.automaskingPadding:
bbox = ( max(0, bbox[0] - self.automaskingPadding),
max(0, bbox[1] - self.automaskingPadding),
min(item['image'].size[0], bbox[2] + self.automaskingPadding),
min(item['image'].size[1], bbox[3] + self.automaskingPadding),
)
if not self.minimalBoundingBox:
# Do not use the bounding box found from the background color unless
# it does not touch any of the sides of the image
if not (bbox[0] > 0
and bbox[1] > 0
and bbox[2] < item['image'].size[0]
and bbox[3] < item['image'].size[1]):
# Bounding box was not brought in on all four sides
# Set it back to the full image
bbox = (0, 0, item['image'].size[0], item['image'].size[1])
mask = ImageChops.constant(item['image'], 0)
mask.paste(255, bbox)
item['image'].putalpha(mask)
if setErodeFlag:
# Check if the image has a nonuniform alpha channel
# If so, set the 'erode' option to False, indicating that the alpha
# channel is meaningful and it does not need to be eroded by GaborNode
# to avoid "phantom edges"
# If a bounding box was used to generated the alpha channel, use the box
# directly to avoid the expense of scanning the pixels
if bbox:
# Bounding box was used
# Set to dilate mode if the bounding box doesn't touch any of the edges
if bbox[0] != 0 \
and bbox[1] != 0 \
and bbox[2] != item['image'].size[0] \
and bbox[3] != item['image'].size[1]:
# Nonuniform alpha channel (from bounding box)
item['erode'] = False
else:
extrema = item['image'].split()[1].getextrema()
if extrema[0] != extrema[1]:
# Nonuniform alpha channel
item['erode'] = False
if returnOriginal:
return original
def _applyFilter(self, image, imageIndex, filterIndex):
"""Apply the specified filter to the image."""
filtered = self.filters[filterIndex][2].process(image)
if type(filtered) is not list:
filtered = [filtered]
for i, item in enumerate(filtered):
if type(item) is not list:
filtered[i] = [item]
# Verify that the filter produced the correct number of outputs
outputCount = self.filters[filterIndex][2].getOutputCount()
if type(outputCount) not in (tuple, list):
outputCount = (outputCount, 1)
if len(filtered) != outputCount[0] or \
False in [len(outputs) == outputCount[1] for outputs in filtered]:
raise RuntimeError("The %s filter " % self.filters[filterIndex][0] +
"did not return the correct number of outputs. The number of images " +
"that it returned does not match the return value of the filter's " +
"getOutputCount() method.")
for item in filtered:
for image in item:
# Verify that the image has the correct mode
if image.mode != 'LA':
s = """Filtered image returned by the "%s" filter (index %d) has
illegal mode '%s'. Images must be mode 'LA' (grayscale with alpha
channel containing the mask).""" % (self.filters[filterIndex][0],
filterIndex, image.mode)
if image.mode == 'L':
s += " The filter may have removed the alpha channel."
raise RuntimeError(s)
# Update the pixel count
self._pixelCount += image.size[0] * image.size[1]
if self.logFilteredImages:
# Save filter output to disk
filterLogDir = os.path.join(self.logDir, 'output_from_filters')
path = os.path.join(filterLogDir, '%02d_' % filterIndex +
self.filters[filterIndex][0], '%09d' % imageIndex)
# Create the output directory if it does not exist
if not os.path.exists(path):
os.makedirs(path)
index = 0
pathContents = [x for x in sorted(os.listdir(path)) if re.match('\d', x)]
if pathContents:
index = int(re.match('(\d*)', pathContents[-1]).groups()[0]) + 1
for f in filtered:
if len(f) > 1:
# Simultaneous outputs
for i, image in enumerate(f):
filename = os.path.join(path, '%02d_%02d.png' % (index, i))
image.split()[0].save(filename)
else:
# Single output
filename = os.path.join(path, '%02d.png' % index)
f[0].split()[0].save(filename)
index += 1
return filtered
def _applyPostFilters(self, image, filterIndex=0):
"""
Recursively apply the postFilters to the image and return a list of images.
"""
# Filter the image
raw_output = None
filtered = self.postFilters[filterIndex][2].process(image)
# Handle special case where the post filter wants to control the output
# of the image sensor (e.g convolution post filters)
if type(filtered) is tuple:
assert len(filtered) == 2
raw_output = filtered[1]
assert type(raw_output) == numpy.ndarray
filtered = filtered[0][0]
# Flatten all responses into a single list
if type(filtered) is not list:
# One response
filtered = [filtered]
else:
if type(filtered[0]) is list:
# Simultaneous responses
filtered2 = []
for responses in filtered:
filtered2.extend(responses)
filtered = filtered2
# Verify that the filter produced the correct number of outputs
outputCount = self.postFilters[filterIndex][2].getOutputCount()
if type(outputCount) in (tuple, list):
if len(outputCount) == 1:
outputCount = outputCount[0]
else:
outputCount = outputCount[0] * outputCount[1]
if len(filtered) != outputCount:
raise RuntimeError("%s postFilter " % self.postFilters[filterIndex][0] +
"did not return the correct number of outputs")
for image in filtered:
# Verify that the image has the correct mode
if image.mode != 'LA':
s = """Filtered image returned by the "%s" postFilter (index %d) has
illegal mode '%s'. Images must be mode 'LA' (grayscale with alpha
channel containing the mask).""" % (self.postFilters[filterIndex][0],
filterIndex, image.mode)
if image.mode == 'L':
s += " The filter may have removed the alpha channel."
raise RuntimeError(s)
if self.logFilteredImages:
# Save intermediate outputs to disk
filterLogDir = os.path.join(self.logDir, 'output_from_post_filters')
path = os.path.join(filterLogDir, '%02d_' % filterIndex +
self.postFilters[filterIndex][0])
# Create the output directory if it does not exist
if not os.path.exists(path):
os.makedirs(path)
# Save the images
if len(filtered) > 1:
for i, image in enumerate(filtered):
name = os.path.join(path, "%09d_%02d.png" % (self._iteration, i))
image.save(name)
else:
name = os.path.join(path, "%09d.png" % self._iteration)
filtered[0].save(name)
if filterIndex == len(self.postFilters) - 1:
return filtered, raw_output
# Concatenate all responses into one flat list of simultaneous responses
responses = []
for image in filtered:
response = self._applyPostFilters(image, filterIndex+1)
if raw_output is not None:
assert (response[1] is None) # Only one post-filter can determine raw_output
responses.extend(response[0])
return responses, raw_output
def _applyAllFilters(self, image=None):
"""
Run all filters on the specified image or all images.
"""
numFilterOutputs = self._getNumFilterOutputs(self.filters)
if image is None:
images = xrange(len(self._imageList))
else:
images = [image]
for image in images:
filterPosition = [0] * len(self.filters)
while True:
self._getFilteredImages({'image': image, 'filters': filterPosition})
for i in xrange(len(self.filters)-1, -1, -1):
filterPosition[i] += 1
if filterPosition[i] == numFilterOutputs[i]:
filterPosition[i] = 0
else:
break
if filterPosition == [0] * len(self.filters):
break
def _getOriginalImage(self, index=None):
"""
Get the specified image, loading it if necessary.
index -- Index of the image to retrieve. Retrieves the current image if
not specified.
"""
if index is None:
index = self.explorer[2].position['image']
if not self._imageList[index]['image']:
# Image needs to be loaded
self._loadImage(index)
return self._imageList[index]['image']
def _getFilteredImages(self, position=None):
"""
Get the filtered images specified by the position.
position -- Position to use. Uses current position if not specified.
"""
if not position:
position = self.explorer[2].position
if not self._imageList[position['image']]['image']:
# Image needs to be loaded
self._loadImage(position['image'])
if not self.filters:
# No filters - return original version
return [self._imageList[position['image']]['image']]
# Iterate through the specified list of filter positions
# Run filters as necessary
allFilteredImages = self._imageList[position['image']]['filtered']
filterPosition = tuple()
for filterIndex, pos in enumerate(position['filters']):
filterPosition += (pos,)
if not filterPosition in allFilteredImages:
# Run the filter
if len(filterPosition) > 1:
# Use the first of the simultaneous responses
imageToFilter = allFilteredImages[filterPosition[:-1]][0]
else:
imageToFilter = self._imageList[position['image']]['image']
# Inject the original image path to the Image's info
# dict in case the filter wants to use it.
imageToFilter.info['path'] = self._imageList[position['image']]['imagePath']
newFilteredImages = self._applyFilter(imageToFilter, position['image'],
filterIndex)
for j, image in enumerate(newFilteredImages):
# Store in the dictionary of filtered images
thisFilterPosition = filterPosition[:-1] + (j,)
allFilteredImages[thisFilterPosition] = image
# Update the filter queue
thisFilterTuple = (position['image'], thisFilterPosition)
if thisFilterTuple in self._filterQueue:
self._filterQueue.remove(thisFilterTuple)
self._filterQueue.insert(0, thisFilterTuple)
# Update the queues to mark this image as recently accessed
# Only mark the original image if it could be loaded from disk again
if self._imageList[position['image']]['imagePath']:
if position['image'] in self._imageQueue:
self._imageQueue.remove(position['image'])
self._imageQueue.insert(0, position['image'])
# Mark all precursors to the current filter
for i in xrange(1, len(position['filters']) + 1):
partialFilterTuple = (position['image'], tuple(position['filters'][:i]))
if partialFilterTuple in self._filterQueue:
self._filterQueue.remove(partialFilterTuple)
self._filterQueue.insert(0, partialFilterTuple)
self._meetMemoryLimit()
return allFilteredImages[filterPosition]
def _getImageInfo(self, imageIndex=None):
"""
Get the dictionary of info for the image, excluding actual PIL images.
image -- Image index to use. Uses current position if not specified.
"""
if imageIndex is None:
imageIndex = self.explorer[2].position['image']
item = self._imageList[imageIndex].copy()
item.pop('image')
item.pop('filtered')
return item
def _getOutputImages(self):
"""Get the current image(s) to send out, based on the current position.
A post filter may want to provide the finall output of the node. In
this case it will return a non-None final output that the ImageSensor will
use as the output of the node regardless of the output images.
"""
if self.prevPosition['reset'] and self.blankWithReset:
# Blank
return ([Image.new('LA', (self.enabledWidth, self.enabledHeight))] \
* self.depth, None)
else:
# Get the image(s) to send out
allImages = self._getFilteredImages()
# Calculate a scale factor in each dimension for adjusting the offset
scaleX = [image.size[0] / float(allImages[0].size[0])
for image in allImages]
scaleY = [image.size[1] / float(allImages[0].size[1])
for image in allImages]
offset = self.explorer[2].position['offset']
# Normally, the enabledSize is smaller than the sensor size. But, there are some
# configurations where the user might want to explore in a larger size, then run
# it through a post-filter to get the end sensor size (for example, when using a
# fish-eye post filter). If we detect that the enabledSize is greater than the
# sensor size, then change our crop bounds
dstImgWidth = max(self.width, self.enabledWidth)
dstImgHeight = max(self.height, self.enabledHeight)
# Cut out the relevant part of each image
newImages = []
for i, image in enumerate(allImages):
x = int(offset[0] * scaleX[i])
y = int(offset[1] * scaleY[i])
cropBounds = (max(0, x),
max(0, y),
min(x + dstImgWidth, image.size[0]),
min(y + dstImgHeight, image.size[1]))
croppedImage = image.crop(cropBounds)
newImage = Image.new(croppedImage.split()[0].mode,
(dstImgWidth, dstImgHeight),
self.background)
if newImage.mode == 'L':
newImage.putalpha(Image.new('L', newImage.size))
newImage.paste(croppedImage, (max(0, -x), max(0, -y)))
newImages.append(newImage)
# Crop the shifted images back to the enabled size
croppedImages = [image.crop((0, 0,
int(round(self.enabledWidth * scaleX[i])),
int(round(self.enabledHeight * scaleY[i]))))
for i, image in enumerate(newImages)]
# Filter through the post filters
final_output = None
if self.postFilters:
newCroppedImages = []
for i in xrange(len(croppedImages)):
(responses, raw_output) = self._applyPostFilters(croppedImages[i])
if raw_output is not None:
assert final_output is None
final_output = raw_output
while type(responses[0]) == list:
responses = responses[0]
newCroppedImages.extend(responses)
croppedImages = newCroppedImages
# Check that the number of images matches the depth
if len(croppedImages) != self.depth:
raise RuntimeError("The filters and postFilters created %d images to"
" send out simultaneously, which does not match ImageSensor's"
" depth parameter, set to %d." % (len(croppedImages), self.depth))
# Invert output if necessary
if self.invertOutput:
if croppedImages[0].mode == '1':
croppedImages = [ImageChops.invert(image) for image in croppedImages]
else:
for i, croppedImage in enumerate(croppedImages):
grayscale = croppedImage.split()[0]
alpha = croppedImage.split()[1]
inverted = ImageChops.invert(grayscale)
inverted.putalpha(alpha)
croppedImages[i] = inverted
return (croppedImages, final_output)
def _logCommand(self, reportList=None, argList='auto'):
"""
Print information about the calling command to the ImageSensor log file.
Without arguments, prints the calling command's name and arguments. Add
extra information to print with reportList. If necessary, override the
list of arguments with argList.
reportList -- Extra data to print, as a list of tuples (like an
ordered dictionary).
argList -- Arguments to print, as a list of tuples. Default value
'auto' specifies that they should be obtained automatically.
ImageSensor has a very specific log file format that is machine-readable.
A typical section looks like this:
('seek', {'iteration': 0, 'image': None, 'position': None}, {})
('compute', {}, {'iteration': 0, 'position': {'image': 0,
'filters': [0,0,0], 'offset': [0,0]}, 'isBlank': False})
The log snippet above indicates that the 'seek' command was issued, with
the argument iteration=0. The command executed and has nothing extra to
report. Then the runtime engine called 'compute'. The compute command
reports back that this was iteration 0, and the explorer chose image 0,
filter position [0,0,0], and offset [0,0].
Since each call generates one line of properly-formatted Python code, it
is easy to read in the report file and parse it with a Python script.
Does not print if self.logText is False. Opens the file if necessary.
"""
if not self.logText:
return
# Set up the log directory and log file if necessary
if not os.path.exists(self.logDir):
os.makedirs(self.logDir)
if self.logFile is None:
self.logFile = open(os.path.join(self.logDir, 'imagesensor_log.txt'), 'w')
# Get the caller's name
callerInfo = inspect.stack()[1]
callerName = callerInfo[3]
# Automatically get the caller's arguments, unless they were specified
if argList == 'auto':
callerFrame = callerInfo[0]
callerArgs, a, k, callerLocals = inspect.getargvalues(callerFrame)
argList = [(name, callerLocals[name]) for name in callerArgs
if name != 'self']
# Create strings to print
# argList and reportList are lists of tuples
# Convert each into a string form of a dictionary, but preserve the order
argStr = reportStr = {}
if argList:
argStr = '{'
for key, value in argList:
argStr += "'%s': %s, " % (key, repr(value))
argStr = argStr[:-2] + '}'
if reportList:
reportStr = '{'
for key, value in reportList:
reportStr += "'%s': %s, " % (key, repr(value))
reportStr = reportStr[:-2] + '}'
# Print to the file
print >>self.logFile, '(%s, %s, %s)' \
% (repr(callerName), argStr, reportStr) + os.linesep
self.logFile.flush()
def _logOutputImages(self):
"""
Save the output images to disk.
"""
# Create output directory if necessary
outputLogDir = os.path.join(self.logDir, 'output_to_network')
if not os.path.exists(outputLogDir):
os.makedirs(outputLogDir)
# Save the sensor's output images
if self.depth > 1:
for i in xrange(self.depth):
outputImageName = "%09d_%02d.png" % (self._iteration, i)
name = os.path.join(outputLogDir, outputImageName)
self.outputImage[i].split()[0].save(name)
else:
outputImageName = "%09d.png" % self._iteration
name = os.path.join(outputLogDir, outputImageName)
self.outputImage.split()[0].save(name)
def _logBoundingBox(self, bbox):
"""
Log the current bounding box
"""
# Create the log directory and log file if necessary
if not os.path.exists(self.logDir):
os.makedirs(self.logDir)
if self.bboxLogFile is None:
self.bboxLogFile = open(os.path.join(self.logDir, 'imagesensor_bbox_log.txt'), 'w')
# Log the bounding box
print >>self.bboxLogFile, '%d %d %d %d' % (bbox[0], bbox[1], bbox[2], bbox[3])
self.bboxLogFile.flush()
def _logOriginalImage(self):
"""
Save the original, unfiltered image to disk.
"""
# Create output directory if necessary
originalLogDir = os.path.join(self.logDir, 'original_images')
if not os.path.exists(originalLogDir):
os.makedirs(originalLogDir)
# Save the original image
originalImageName = "%09d.png" % self._iteration
image = self._getOriginalImage().split()[0]
image.save(os.path.join(originalLogDir, originalImageName))
def _logLocationImage(self):
"""
Save the location of the sensor window to disk (as an image).
"""
# Create output directory if necessary
locationLogDir = os.path.join(self.logDir, 'output_locations')
if not os.path.exists(locationLogDir):
os.makedirs(locationLogDir)
# Save the location image
if not self.locationImage:
self.locationImage = self._createLocationImage()
locationImageName = "%09d.png" % self._iteration
self.locationImage.save(os.path.join(locationLogDir, locationImageName))
def _createLocationImage(self):
"""
Create the 'location' image, with a rectangle around the sensor window.
"""
if self.prevPosition['reset'] and self.blankWithReset:
# Create a blank image
locationImage = Image.new('1', (self.width, self.height))
if self.invertOutput:
locationImage = ImageChops.invert(locationImage)
else:
# Get the filtered image
firstImage = self._getFilteredImages(self.prevPosition)[0]
# Select backdrop upon which sensor position will be overlaid
if self.logLocationOnOriginalImage:
filteredImage = firstImage
firstImage = self._getOriginalImage(self.prevPosition['image'])
if firstImage.size != filteredImage.size:
raise RuntimeError("logLocationOnOriginalImage is True, but the"
" filtered image does not match the size of the original"
" image, so the location image would be invalid")
locationImage = Image.new('RGB', firstImage.size)
locationImage.paste(firstImage, (0,0))
locationImageDraw = ImageDraw.Draw(locationImage)
x, y = self.prevPosition['offset']
x2, y2 = x + self.enabledWidth - 1, y + self.enabledHeight - 1
locationImageDraw.rectangle((x-1, y-1, x2+1, y2+1), outline='red')
if locationImage.size[0] > 32 or locationImage.size[1] > 32:
# Draw again to create a thicker border
locationImageDraw.rectangle((x-2, y-2, x2+2, y2+2), outline='red')
return locationImage
def _writeCategoryToFile(self, category):
"""
Write the specified category index to the file at self.categoryOutputFile.
category -- Category index (integer).
"""
if self.categoryOutputFile: # Only write if we have a valid filename
if not self._categoryOutputFile:
self._categoryOutputFile = open(self.categoryOutputFile, 'w')
# Write a 1 to the first line to specify one entry per line
self._categoryOutputFile.write('1' + os.linesep)
self._categoryOutputFile.write(str(category) + os.linesep)
self._categoryOutputFile.flush()
def _setFilters(self, filters):
"""
Change one or more filters, and recompute the ones that changed.
filters -- List of filters, where each filter is a list [classname,
parameters] (or just a string with the class name, if the filter needs
no parameters).
Filters should be located in the regions/ImageSensorFilters directory.
"""
if filters:
if not isinstance(filters, list):
raise TypeError("'filters' must be a list of one or more filters")
if isinstance(filters, list) and len(filters) == 2 \
and isinstance(filters[1], dict):
raise TypeError("'filters' must be a _list_ of filters. If you "
"are specifying a filter with the [name, {args}] "
"syntax, nest it within a list: [[name, {args}]]")
filters = copy.deepcopy(filters)
if self.logFilteredImages:
# Remove the filter log directory if it exists
filterLogDir = os.path.join(self.logDir, 'output_from_filters')
if os.path.exists(filterLogDir):
shutil.rmtree(filterLogDir)
if filters is None:
filters = []
elif type(filters) is tuple:
filters = list(filters)
for i, filter in enumerate(filters):
if type(filter) is str:
filters[i] = [filter, {}]
elif type(filter) is tuple:
filters[i] = list(filters[i])
if len(filters[i]) == 1:
filters[i].append({})
self.filters = filters
self._importFilters(self.filters)
# Validate no filter except the last returns simultaneous responses
for i in xrange(len(self.filters)-1):
outputCount = self.filters[i][2].getOutputCount()
if type(outputCount) in (tuple, list) and len(outputCount) > 1 \
and outputCount[1] > 1:
raise RuntimeError("Only the last filter can return a nested list of "
"images (multiple simultaneous responses). "
"The %s filter, " % self.filters[i][0] +
"index %d of %d, " % (i, len(self.filters)-1) +
"creates %d simultaneous responses." % outputCount[1])
# Invalidate the filtered versions of all images
for item in self._imageList:
if item['filtered']:
item['filtered'] = {}
self._filterQueue = []
# Update the pixel count to only count to the original images
self._pixelCount = 0
for i in self._imageQueue:
image = self._imageList[i]['image']
self._pixelCount += image.size[0] * image.size[1]
# Tell the explorer about these new filters
if type(self.explorer) == list and len(self.explorer) > 2:
self.explorer[2].update(numFilters=len(filters),
numFilterOutputs=self._getNumFilterOutputs(self.filters))
def _setPostFilters(self, postFilters):
"""
Change one or more postFilters, and recompute the ones that changed.
postFilters -- List of filters, where each filter is a list
[classname, parameters] (or just a string with the class name, if the
filter needs no parameters).
Filters should be located in the regions/ImageSensorFilters directory.
"""
if postFilters:
if not isinstance(postFilters, list):
raise TypeError("'postFilters' must be a list of one or more filters")
if isinstance(postFilters, list) and len(postFilters) == 2 \
and isinstance(postFilters[1], dict):
raise TypeError("'postFilters' must be a _list_ of filters. If you "
"are specifying a filter with the [name, {args}] "
"syntax, nest it within a list: [[name, {args}]]")
postFilters = copy.deepcopy(postFilters)
if postFilters is None:
postFilters = []
elif type(postFilters) is tuple:
postFilters = list(postFilters)
for i, filter in enumerate(postFilters):
if type(filter) is str:
postFilters[i] = [filter, {}]
elif type(filter) is tuple:
postFilters[i] = list(postFilters[i])
if len(postFilters[i]) == 1:
postFilters[i].append({})
self.postFilters = postFilters
self._importFilters(self.postFilters)
def _getNumFilterOutputs(self, filters):
"""
Return the number of outputs for each filter.
Ignores simultaneous outputs.
"""
numFilterOutputs = []
for f in filters:
n = f[2].getOutputCount()
if type(n) in (tuple, list):
numFilterOutputs.append(n[0])
elif type(n) is int:
numFilterOutputs.append(n)
else:
raise RuntimeError("%s filter must return an int or a " % f[0]
+ "list/tuple of two ints from getOutputCount()")
return numFilterOutputs
def _importFilters(self, filters):
"""
Import and instantiate all the specified filters.
This method lives on its own so that it can be used by both _setFilters
and _setPostFilters.
"""
for i in xrange(len(filters)):
# Import the filter
# If name is just the class name, such as 'PadToFit', we assume the same
# name for the module: names = ['PadToFit', 'PadToFit']
# If name is of the form 'ModuleName.ClassName' (useful to try multiple
# versions of the same filter): names = ['ModuleName', 'ClassName']
# By default, ImageSensor searches for filters in
# nupic.regions.ImageSensorFilters. If the import fails, it tries the
# import unmodified - so you may use filters that are located anywhere
# that Python knows about.
if not '.' in filters[i][0]:
moduleName = className = filters[i][0]
else:
components = filters[i][0].split('.')
moduleName = '.'.join(components[:-1])
className = components[-1]
try:
# Search in ImageSensorFilters first
filterModule = __import__('nupic.regions.ImageSensorFilters.%s'
% moduleName, {}, {}, className)
except:
try:
filterModule = __import__(moduleName, {}, {}, className)
except:
raise RuntimeError('Could not find filter "%s"' % filters[i][0])
filterClass = getattr(filterModule, className)
# Instantiate the filter
filters[i].append(filterClass(**copy.deepcopy(filters[i][1])))
filters[i][2].update(mode=self.mode, background=self.background)
def _setExplorer(self, explorer):
"""
Set the explorer (algorithm used to explore the input space).
explorer -- List containing the explorer name and parameters.
"""
if explorer is None:
raise RuntimeError("Must specify explorer (try 'Flash' for no sweeping)")
explorer = copy.deepcopy(explorer)
if type(explorer) is str:
explorer = [explorer, {}]
elif type(explorer) is tuple:
explorer = list(explorer)
if len(explorer) == 1:
explorer.append({})
# Import the explorer
# If name is just the class name, such as 'Flash', we assume the same
# name for the module: names = ['Flash', 'Flash']
# If name is of the form 'ModuleName.ClassName' (useful to try multiple
# versions of the same explorer): names = ['ModuleName', 'ClassName']
# By default, ImageSensor searches for explorers in
# nupic.regions.ImageSensorExplorers. If the import fails, it tries the
# import unmodified - so you may use explorers that are located anywhere
# that Python knows about.
if not '.' in explorer[0]:
moduleName = className = explorer[0]
else:
components = explorer[0].split('.')
moduleName = '.'.join(components[:-1])
className = components[-1]
try:
# Search in ImageSensorExplorers first
explorerModule = __import__('nupic.regions.ImageSensorExplorers.%s'
% moduleName, {}, {}, className)
except ImportError:
try:
explorerModule = __import__(moduleName, {}, {}, className)
except ImportError:
raise RuntimeError('Could not find explorer "%s"' % explorer[0])
explorerClass = getattr(explorerModule, className)
explorerArgs = copy.deepcopy(explorer[1])
# Append the image accessor methods to the arguments
explorerArgs.update({
'getOriginalImage': self._getOriginalImage,
'getFilteredImages': self._getFilteredImages,
'getImageInfo': self._getImageInfo
})
# Instantiate the explorer
self.explorer = explorer
self.explorer.append(explorerClass(**explorerArgs))
self.explorer[2].update(numImages=len(self._imageList),
numFilters=len(self.filters),
numFilterOutputs=self._getNumFilterOutputs(self.filters),
enabledWidth=self.enabledWidth, enabledHeight=self.enabledHeight,
blankWithReset=self.blankWithReset)
def _meetMemoryLimit(self):
"""
Unload images as necessary to stay within the memory limit.
"""
if self.memoryLimit < 0:
return
while self._pixelCount * 4 / 1000000.0 > self.memoryLimit:
if len(self._filterQueue) > 1:
# Unload the filtered image used least recently
imageIndex, filterPosition = self._filterQueue.pop()
filtered = self._imageList[imageIndex]['filtered'][filterPosition]
for i in xrange(len(filtered)):
self._pixelCount -= filtered[i].size[0] * filtered[i].size[1]
self._imageList[imageIndex]['filtered'].pop(filterPosition)
elif self._imageQueue:
if len(self._imageQueue) == 1 and not self.filters:
# No filters and this is the current image - don't unload it
break
# Unload the original image used least recently
imageIndex = self._imageQueue.pop()
size = self._imageList[imageIndex]['image'].size
self._pixelCount -= size[0] * size[1]
self._imageList[imageIndex]['image'] = None
else:
break
def _updatePrevPosition(self):
"""
Deep copy position to self.prevPosition.
"""
position = self.explorer[2].position
self.prevPosition = {
'image': position['image'],
'filters': copy.copy(position['filters']),
'offset': copy.copy(position['offset']),
'reset': position['reset']
}
def compute(self, inputs=None, outputs=None):
"""
Generate the next sensor output and send it out.
This method is called by the runtime engine.
"""
#from dbgp.client import brk; brk(port=9019)
if len(self._imageList) == 0:
raise RuntimeError("ImageSensor can't run compute: no images loaded")
# Check to see if the new image belongs to a new sequence, if so force Reset
prevPosition = self.prevPosition
if prevPosition is not None:
prevSequenceID = self._imageList[prevPosition['image']]['sequenceIndex']
else:
prevSequenceID = None
self._updatePrevPosition()
newPosition = self.prevPosition
if newPosition is not None:
newSequenceID = self._imageList[newPosition['image']]['sequenceIndex']
else:
newSequenceID = None
if newSequenceID != prevSequenceID:
self.prevPosition['reset'] = True
# Get the image(s) to send out
outputImages, final_output = self._getOutputImages()
# Compile information about this iteration and log it
imageInfo = self._getImageInfo()
if imageInfo['imagePath'] is None:
filename = ""
else:
filename = os.path.split(imageInfo['imagePath'])[1]
category = imageInfo['categoryIndex']
if category == -1:
categoryName = ""
else:
categoryName = self.categoryInfo[category][0]
self._logCommand([
('iteration', self._iteration),
('position', self.explorer[2].position),
('filename', filename),
('categoryIndex', category),
('categoryName', categoryName),
('erode', imageInfo['erode']),
('blank', bool(self.prevPosition['reset'] and self.blankWithReset))
], None)
# If we don't have a partition ID at this point (e.g., because
# of memory limits), then we need to try and pull from the
# just-loaded image
if imageInfo['partitionID'] is None:
imgPosn = self.explorer[2].position['image']
imageInfo['partitionID'] = self._imageList[imgPosn].get('partitionID')
if self.depth == 1:
self.outputImage = outputImages[0]
else:
self.outputImage = outputImages
# Invalidate the old location image
self.locationImage = None
# Log the images and locations if specified
if self.logOutputImages:
self._logOutputImages()
if self.logOriginalImages:
self._logOriginalImage()
if self.logLocationImages:
self._logLocationImage()
holdFor = self.explorer[2].holdFor
self._holdForOffset += 1
if self._holdForOffset >= holdFor:
self._holdForOffset = 0
self.explorer[2].next()
self._iteration += 1
# Save category to file
self._writeCategoryToFile(category)
if outputs:
# Convert the output images to a numpy vector
croppedArrays = [numpy.asarray(image.split()[0], RealNumpyDType)
for image in outputImages]
# Pad the images to fit the full output size if necessary generating
# a stack of images, each of them self.width X self.height
pad = self._cubeOutputs and \
(self.depth > 1 or
croppedArrays[0].shape != (self.height, self.width))
if pad:
fullArrays = [numpy.zeros((self.height, self.width), RealNumpyDType)
for i in xrange(self.depth)]
for i in xrange(self.depth):
fullArrays[i][:croppedArrays[i].shape[0],:croppedArrays[i].shape[1]] \
= croppedArrays[i]
else:
fullArrays = croppedArrays
# Flatten and concatenate the arrays
outputArray = numpy.concatenate([a.flat for a in fullArrays])
# Send black and white images as binary (0, 1) instead of (0..255)
if self.mode == 'bw':
outputArray /= 255
outputArray = outputArray.round()
# dataOut - main output
if final_output is None:
outputs['dataOut'][:] = outputArray
else:
outputs['dataOut'][:] = final_output
# categoryOut - category index
outputs['categoryOut'][:] = \
numpy.array([float(category)], RealNumpyDType)
# auxDataOut - auxiliary data
auxDataOut = imageInfo['auxData']
if auxDataOut is not None:
outputs['auxDataOut'][:] = auxDataOut
# resetOut - reset flag
if 'resetOut' in outputs:
outputs['resetOut'][:] = \
numpy.array([float(self.prevPosition['reset'])],RealNumpyDType)
# bboxOut - bounding box
if 'bboxOut' in outputs and len(outputs['bboxOut']) == 4:
bbox = outputImages[0].split()[1].getbbox()
if bbox is None:
bbox = (0, 0, 0, 0)
outputs['bboxOut'][:] = numpy.array(bbox, RealNumpyDType)
# Optionally log the bounding box information
if self.logBoundingBox:
self._logBoundingBox(bbox)
# alphaOut - alpha channel
if 'alphaOut' in outputs and len(outputs['alphaOut']) > 1:
alphaOut = \
numpy.asarray(outputImages[0].split()[1], RealNumpyDType).flatten()
if not imageInfo['erode']:
# Change the 0th element of the output to signal that the alpha
# channel should be dilated, not eroded
alphaOut[0] = -alphaOut[0] - 1
outputs['alphaOut'][:alphaOut.shape[0]] = alphaOut
# partitionOut - partition ID (defaults to zero)
if 'partitionOut' in outputs:
partition = imageInfo.get('partitionID')
if partition is None:
partition = 0
outputs['partitionOut'][:] = \
numpy.array([float(partition)], RealNumpyDType)
def getParameter(self, parameterName, index=-1):
"""Get the value of an ImageSensor parameter."""
if parameterName == 'filters':
# Remove filter objects
return [filter[:2] for filter in self.filters]
elif parameterName == 'postFilters':
# Remove filter objects
return [filter[:2] for filter in self.postFilters]
elif parameterName == 'explorer':
# Remove explorer object
return self.explorer[:2]
elif parameterName == 'numImages':
return len(self._imageList)
elif parameterName == 'numMasks':
return len([True for x in self._imageList if x['maskPath']])
elif parameterName in ('numIterations', 'maxOutputVectorCount'):
return self.getNumIterations()
elif parameterName == 'activeOutputCount':
return self.width * self.height * self.depth
elif parameterName == 'position':
return self.explorer[2].position
elif parameterName == 'imageInfo':
return [self._getImageInfo(i) for i in xrange(len(self._imageList))]
elif parameterName == 'prevImageInfo':
if self.prevPosition and self._imageList:
return self._getImageInfo(self.prevPosition['image'])
else:
return None
elif parameterName == 'nextImageInfo':
if self.explorer[2].position and self._imageList:
return self._getImageInfo()
else:
return None
elif parameterName == 'categoryInfo':
return serializeCategoryInfo(self.categoryInfo)
elif parameterName == 'outputImage':
if self._iteration == 0:
return
if self.depth == 1:
return serializeImage(self.outputImage.split()[0])
else:
return [serializeImage(image.split()[0]) for image in self.outputImage]
elif parameterName == 'outputImageWithAlpha':
if self._iteration == 0:
return
if self.depth == 1:
return serializeImage(self.outputImage)
else:
return [serializeImage(image) for image in self.outputImage]
elif parameterName == 'originalImage':
if not self._imageList or self._iteration == 0:
return
return serializeImage(
self._getOriginalImage(self.prevPosition['image']).split()[0])
elif parameterName == 'locationImage':
if not self._imageList or self._iteration == 0 or not self.prevPosition:
return
if not self.locationImage:
self.locationImage = self._createLocationImage()
return serializeImage(self.locationImage)
elif parameterName == 'background':
if self.mode == 'bw':
return self.background / 255
else:
return self.background
elif parameterName =='auxData':
auxData = [numpy.array(imageList['auxData']) for imageList in self._imageList]
return auxData
elif parameterName == 'sequenceCount':
return self.getSequenceCount()
elif parameterName == 'metadata':
metadata = dict()
# Compute the position relative to center
imageIdx = self.prevPosition['image']
image = self._getOriginalImage(imageIdx)
centerX = (image.size[0] - self.enabledWidth) / 2
centerY = (image.size[1] - self.enabledHeight) / 2
(posX, posY) = self.prevPosition['offset']
metadata['posnY'] = centerY - posY
metadata['posnX'] = centerX - posX
metadata['catIndex'] = self._getImageInfo(imageIdx)['categoryIndex']
metadata['catName'] = self.categoryInfo[metadata['catIndex']][0]
return str(metadata)
else:
return PyRegion.getParameter(self, parameterName, index)
def setParameter(self, parameterName, index, parameterValue):
"""Set the value of an ImageSensor parameter."""
if parameterName == 'filters':
self._setFilters(parameterValue)
elif parameterName == 'postFilters':
self._setPostFilters(parameterValue)
elif parameterName == 'explorer':
self._setExplorer(parameterValue)
elif parameterName == 'enabledWidth':
self.enabledWidth = parameterValue
self.explorer[2].update(enabledWidth=parameterValue)
elif parameterName == 'enabledHeight':
self.enabledHeight = parameterValue
self.explorer[2].update(enabledHeight=parameterValue)
elif parameterName == 'width':
self.width = parameterValue
elif parameterName == 'height':
self.height = parameterValue
elif parameterName == 'blankWithReset':
self.blankWithReset = parameterValue
self.explorer[2].update(blankWithReset=parameterValue)
elif parameterName == 'categoryOutputFile':
if self._categoryOutputFile:
self._categoryOutputFile.close()
self._categoryOutputFile = None
self.categoryOutputFile = parameterValue
elif parameterName == 'categoryInfo':
self.categoryInfo = deserializeCategoryInfo(parameterValue)
# TODO change the names and indices of the loaded image?
elif parameterName == 'background':
self.background = parameterValue
if self.mode == 'bw':
self.background *= 255
for filter in self.filters + self.postFilters:
filter[2].update(background=self.background)
elif parameterName == 'logDir':
if self.logFile is not None and self.logDir != parameterValue:
self.logFile.close()
self.logFile = None
if self.bboxLogFile is not None and self.logDir != parameterValue:
self.bboxLogFile.close()
self.bboxLogFile = None
self.logDir = parameterValue
elif parameterName == 'logText':
self.logText = parameterValue
if self.logFile is not None and not self.logText:
self.logFile.close()
self.logFile = None
elif parameterName == 'memoryLimit':
self.memoryLimit = parameterValue
self._meetMemoryLimit()
else:
if not hasattr(self, parameterName):
raise Exception("%s is not a valid parameter of the ImageSensor" \
% parameterName)
setattr(self, parameterName, parameterValue)
def __getstate__(self):
"""Get serializable state."""
# Serialize images stored in categoryInfo
serializedCategoryInfo = [(name, b64encode(imageStr)) for name, imageStr
in self.getParameter('categoryInfo')]
# Get the object-less filters and explorer
resetFilters = self.getParameter('filters')
resetPostFilters = self.getParameter('postFilters')
resetExplorer = self.getParameter('explorer')
# Compile a dictionary of attributes to save
state = dict()
for name in ['width', 'height', 'depth', 'mode',
'blankWithReset', 'enabledWidth', 'enabledHeight', 'invertOutput',
'background', 'automaskingTolerance', 'automaskingPadding',
'memoryLimit', 'minimalBoundingBox', '_cubeOutputs', '_auxDataWidth']:
state[name] = getattr(self, name)
# Add attributes that have been manipulated
state.update({'serializedCategoryInfo': serializedCategoryInfo,
'resetExplorer': resetExplorer, 'resetFilters': resetFilters,
'resetPostFilters': resetPostFilters})
# Save a version number
state['version'] = 1.7
return state
def __setstate__(self, state):
"""Set state from serialized state."""
# Register a global variable for scanning or other tomfoolery
#PyNodeModule.nodes = getattr(PyNodeModule, 'nodes', []) + [self]
if type(state) is tuple:
raise RuntimeError("Cannot convert legacy ImageSensor state")
# Get the version number
version = state.pop('version')
# Get attributes that need to be manipulated
serializedCategoryInfo = state.pop('serializedCategoryInfo')
resetFilters = state.pop('resetFilters')
resetPostFilters = state.pop('resetPostFilters')
resetExplorer = state.pop('resetExplorer')
for name in state:
setattr(self, name, state[name])
# Deserialize images stored in categoryInfo (not base64-encoded)
if version >= 1.64:
# Undo base64 encoding
serializedCategoryInfo = [(name, b64decode(imageStr)) for name, imageStr
in serializedCategoryInfo]
self.setParameter('categoryInfo', -1, serializedCategoryInfo)
# Set variables that weren't saved
self._imageList = []
self._imageQueue = []
self._filterQueue = []
self._pixelCount = 0
self._iteration = 0
self.logFile = None
self.bboxLogFile = None
self.logText = False
self.logOutputImages = False
self.logOriginalImages = False
self.logFilteredImages = False
self.logLocationImages = False
self.logLocationOnOriginalImage = False
self.logBoundingBox = False
self.logDir = "imagesensor_log"
self.categoryOutputFile = None
self._categoryOutputFile = None
self.outputImage = None
self.locationImage = None
self.prevPosition = None
# Set up the filters and explorer
self.explorer = None
self._setFilters(resetFilters)
self._setPostFilters(resetPostFilters)
self._setExplorer(resetExplorer)
self._cubeOutputs = not containsConvolutionPostFilter(resetPostFilters)
# Backward compatibility
if version < 1.63:
if not hasattr(self, 'automaskingTolerance'):
self.automaskingTolerance = 0
if not hasattr(self, 'automaskingPadding'):
self.automaskingPadding = 0
if not hasattr(self, '_holdForOffset'):
self._holdForOffset = 0
if not hasattr(self, '_auxDataWidth'):
self._auxDataWidth = 0
if version < 1.65:
# Set to True, the old behavior, though it is set to False by default
# in new networks
self.minimalBoundingBox = True
@classmethod
def getSpec(cls):
"""Return the Spec for this Region."""
ns = dict(
description=ImageSensor.__doc__,
singleNodeOnly=False,
inputs = {},
outputs = dict(
dataOut=dict(
description="""Pixels of the image.""",
dataType='Real32',
count=0,
regionLevel=False,
isDefaultOutput=True),
categoryOut=dict(
description="""Index of the current image's category.""",
dataType='Real32',
count=1,
regionLevel=True,
isDefaultOutput=False),
resetOut=dict(
description="""Boolean reset output.""",
dataType='Real32',
count=1,
regionLevel=True,
isDefaultOutput=False),
bboxOut=dict(
description="""Bounding box output (4-tuple).""",
dataType='Real32',
count=4,
regionLevel=True,
isDefaultOutput=False),
alphaOut=dict(
description="""Alpha channel output.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
partitionOut=dict(
description="""Index of the leave-one-out partition associated with the current image.""",
dataType='Real32',
count=1,
regionLevel=True,
isDefaultOutput=False),
auxDataOut=dict(
description="""Auxiliary data sent directly to the classifier.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
),
parameters = dict(
outputImageWithAlpha=dict(
description="""Serialized version of the current output image(s) with the alpha channel.
If depth > 1, multiple serialized images will be returned in a list. To deserialize:
from nupic.image import deserializeImage
outputImage = deserializeImage(sensor.getParameter('outputImageWithAlpha'))""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
originalImage=dict(
description="""Serialized version of the original, unfiltered version of the
current image. To deserialize:
from nupic.image import deserializeImage
originalImage = deserializeImage(sensor.getParameter('originalImage'))""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
locationImage=dict(
description="""Serialized version of the current 'location image', which shows the
position of the sensor overlaid on the filtered image (optionally, the
original image). To deserialize:
from nupic.image import deserializeImage
locationImage = deserializeImage(sensor.getParameter('locationImage'))""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
height=dict(
description="""Height of the image, in pixels.""",
dataType='UInt32',
count=1,
constraints='interval: [1, ...]',
accessMode='ReadWrite'
),
automaskingPadding=dict(
description="""Affects the process by which bounding box masks
are automatically generated from images. After computing the
bounding box based on image similarity with respect to the background,
the box will be expanded by 'automaskPadding' pixels in all four
directions (constrained by the original size of the image.)""",
dataType='UInt32',
count=1,
constraints='interval: [0, ...]',
accessMode='ReadWrite'
),
numMasks=dict(
description="""Number of masks that the sensor has loaded.""",
dataType='UInt32',
count=1,
constraints='',
accessMode='Read'
),
filters=dict(
description="""List of filters to apply to each image. Each element in the
list should be either a string (just the filter name) or a list containing
both the filter name and a dictionary specifying its arguments.""",
dataType='Byte',
count=0,
constraints='',
accessMode='ReadWrite'
),
logOutputImages=dict(
description="""Toggle for writing each output to disk (as an image)
on each iteration.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
prevPosition=dict(
description="""The position of the sensor from the *previous* compute, as a
dictionary. Because "outputImage" and "locationImage" match the output of the
previous compute (not the upcoming one), they do not correlate with the
"position" parameter; use this parameter instead.""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
minimalBoundingBox=dict(
description="""Whether the bounding box found by looking at the
image background should be set even if it touches one of the sides of
the image. Set to False to avoid chopping edges off certain images, or
True if that is not an issue and you wish to use a sweeping explorer.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
numImages=dict(
description="""Number of images that the sensor has loaded.""",
dataType='UInt32',
count=1,
constraints='',
accessMode='Read'
),
logLocationOnOriginalImage=dict(
description="""Whether to overlay the location rectangle on the original image instead
of the filtered image. Does not work if the two images do not have the
same size, and may be nonsensical even if they do (for example, if a filter
moved the object within the image).""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
outputImage=dict(
description="""Serialized version of the current output image(s). If depth > 1,
multiple serialized images will be returned in a list. To deserialize:
from nupic.image import deserializeImage
outputImage = deserializeImage(sensor.getParameter('outputImage'))""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
logFilteredImages=dict(
description="""Toggle for writing the intermediate versions of images to disk
as they pass through the filter chain.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
width=dict(
description="""Width of the image, in pixels.""",
dataType='UInt32',
count=1,
constraints='interval: [1, ...]',
accessMode='ReadWrite'
),
auxDataWidth=dict(
description="""The number of elements in in the auxiliary data vector.""",
dataType='int',
count=1,
constraints='',
accessMode='ReadWrite'
),
categoryOutputFile=dict(
description="""Name of file to which to write category number on each compute.""",
dataType='Byte',
count=0,
constraints='',
accessMode='ReadWrite'
),
logLocationImages=dict(
description="""Toggle for writing an image to disk on each iteration which shows
the location of the sensor window.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
nextImageInfo=dict(
description="""Dictionary of information for the image which will be used for the next
compute.""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
enabledWidth=dict(
description="""Width of the enabled 'window', in pixels.""",
dataType='UInt32',
count=1,
constraints='interval: [1, ...]',
accessMode='ReadWrite'
),
numIterations=dict(
description="""Number of iterations necessary to fully explore all loaded images. Only
some explorers support this. Use the getNumIterations command if you wish to
get the number of iterations for a particular image.""",
dataType='UInt32',
count=1,
constraints='',
accessMode='Read'
),
logText=dict(
description="""Toggle for verbose logging to imagesensor_log.txt.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
explorer=dict(
description="""Explorer (used to move the sensor through the input space).
Specify as a string (just the explorer name) or a list containing both the
explorer name and a dictionary specifying its arguments.""",
dataType='Byte',
count=0,
constraints='',
accessMode='ReadWrite'
),
imageInfo=dict(
description="""A list with a dictionary of information for each image that has
been loaded.""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
useAux=dict(
description="""Use auxiliary input data at the classifier level""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
automaskingTolerance=dict(
description="""Controls the process by which bounding box masks
are automatically generated from images based on similarity to the
specified 'background' pixel value. The bounding box will enclose all
pixels in the image that differ from 'background' by more than
the value specified in 'automaskingTolerance'. Default is 0, which
generates bounding boxes that enclose all pixels that differ at all
from the background. In general, increasing the value of
'automaskingTolerance' will produce tighter (smaller) bounding box masks.""",
dataType='UInt32',
count=1,
constraints='interval: [0, 255]',
accessMode='ReadWrite'
),
activeOutputCount=dict(
description="""The number of active elements in the dataOut output.""",
dataType='UInt32',
count=1,
constraints='',
accessMode='Read'
),
memoryLimit=dict(
description="""Maximum amount of memory that ImageSensor should use for storing images,
in megabytes. ImageSensor will unload images and filter outputs to stay beneath
this ceiling. Set to -1 for no limit.""",
dataType='int',
count=1,
constraints='interval: [-1, ...]',
accessMode='ReadWrite'
),
logDir=dict(
description="""Name of the imagesensor log directory, which is created in the session
bundle if any logging options are enabled. Default is imagesensor_log.""",
dataType='Byte',
count=0,
constraints='',
accessMode='ReadWrite'
),
background=dict(
description="""Value of "background" pixels. May be used to pad images during sweeping,
as well as to find the bounds of an object if no mask is available.""",
dataType='UInt32',
count=1,
constraints='interval: [0, 255]',
accessMode='ReadWrite'
),
position=dict(
description="""The position of the sensor that will be used for the *next* compute,
as a dictionary.""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
auxData=dict(
description="""List of Auxiliary Data for every image in the image list""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
invertOutput=dict(
description="""Whether to invert the pixel values before sending an image to the
network. If invertOutput is enabled, a white object on a black background
becomes a black object on a white background.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
categoryInfo=dict(
description="""A list with a tuple for each category that the sensor has learned. The
tuple contains the category name (i.e. 'dog') and a serialized version of
an example image for the category. To deserialize:
from nupic.regions.ImageSensor import deserializeCategoryInfo
categoryInfo = deserializeCategoryInfo(sensor.getParameter('categoryInfo'))""",
dataType='Byte',
count=0,
constraints='',
accessMode='ReadWrite'
),
prevImageInfo=dict(
description="""Dictionary of information for the image used during the previous compute.""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
logOriginalImages=dict(
description="""Toggle for writing the original, unfiltered version of the current
image to disk on each iteration.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
enabledHeight=dict(
description="""Height of the enabled 'window', in pixels.""",
dataType='UInt32',
count=1,
constraints='interval: [1, ...]',
accessMode='ReadWrite'
),
depth=dict(
description="""Number of images to send out simultaneously.""",
dataType='UInt32',
count=1,
constraints='interval: [1, ...]',
accessMode='Read'
),
mode=dict(
description="""'gray' (8-bit grayscale) or 'bw' (1-bit black and white).""",
dataType='Byte',
count=0,
constraints='enum: gray, bw',
accessMode='Read'
),
logBoundingBox=dict(
description="""Toggle for logging the bounding box information on each iteration.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
blankWithReset=dict(
description="""** DEPRECATED ** Whether to send a blank output every time the explorer
generates a reset signal (such as when beginning a new sweep). Turning
on blanks increases the number of iterations.""",
dataType='bool',
count=1,
constraints='bool',
accessMode='ReadWrite'
),
metadata=dict(
description="""Parameter that contains a dict of metadata for the most
recently generated output image.""",
dataType='Byte',
count=0,
constraints='',
accessMode='Read'
),
postFilters=dict(
description="""List of filters to apply to each image just before the image
is sent to the network. Each element in the list should either be a string
(just the filter name) or a list containing both the filter name and a
dictionary specifying its arguments.""",
dataType='Byte',
count=0,
constraints='',
accessMode='ReadWrite'
),
maxOutputVectorCount=dict(
description="""(alias for numIterations) Number of iterations necessary to fully explore
all loaded images. Only some explorers support this. Use the getNumIterations command
if you wish to get the number of iterations for a particular image.""",
dataType='UInt32',
count=1,
constraints='',
accessMode='Read'
)
),
commands=dict(
loadSingleImage=dict(description='load a single image'),
loadMultipleImages=dict(description='load multiple images'),
)
)
return ns
#def getSpec(self):
# """Return the NodeSpec for this PyNode."""
#
# parent = PyNode.getSpec(self)
# out = NodeSpec(
# description=ImageSensor.__doc__,
# singleNodeOnly=False,
# inputs = [],
# outputs = [
# NodeSpecItem(name="dataOut", type=RealTypeName, elementCount=0,
# isDefaultOutput2=True,
# description="""Pixels of the image."""),
# NodeSpecItem(name="categoryOut", type=RealTypeName, regionLevel2=True,
# description="""Index of the current image's category."""),
# NodeSpecItem(name="resetOut", type=RealTypeName, regionLevel2=True,
# description="""Boolean reset output."""),
# NodeSpecItem(name="bboxOut", type=RealTypeName, regionLevel2=True,
# elementCount=4,
# description="""Bounding box output (4-tuple)."""),
# NodeSpecItem(name="alphaOut", type=RealTypeName,
# elementCount=0,
# description="""Alpha channel output."""),
# NodeSpecItem(name="partitionOut", type=RealTypeName, regionLevel2=True,
# description="""Index of the leave-one-out partition associated with the current image."""),
# NodeSpecItem(name="auxDataOut", type=RealTypeName, elementCount=0,
# regionLevel2=True,
# description="""Auxiliary data sent directly to the classifier.""")
# ],
# parameters = [
# NodeSpecItem(name="useAux", type="bool", constraints="bool", access="cgs",
# value=False,
# description="Use auxiliary input data at the classifier level"),
# NodeSpecItem(name="width", type="uint", access="cg",
# constraints="interval: [1, ...]", value=1,
# description="""Width of the image, in pixels."""),
# NodeSpecItem(name="height", type="uint", access="cg",
# constraints="interval: [1, ...]", value=1,
# description="""Height of the image, in pixels."""),
# NodeSpecItem(name="depth", type="uint", access="cg",
# constraints="interval: [1, ...]", value=1,
# description="""Number of images to send out simultaneously."""),
# NodeSpecItem(name="mode", type="string", access="cg",
# constraints="enum: gray, bw", value='gray',
# description="""'gray' (8-bit grayscale) or 'bw' (1-bit black and white)."""),
# NodeSpecItem(name="enabledWidth", type="uint", access="gs",
# constraints="interval: [1, ...]",
# description="""Width of the enabled 'window', in pixels."""),
# NodeSpecItem(name="enabledHeight", type="uint", access="gs",
# constraints="interval: [1, ...]",
# description="""Height of the enabled 'window', in pixels."""),
# NodeSpecItem(name="activeOutputCount", type="uint", access="g",
# description="""The number of active elements in the dataOut output."""),
# NodeSpecItem(name="background", type="uint", access="cgs",
# constraints="interval: [0, 255]", value=0,
# description="""Value of "background" pixels. May be used to pad images during sweeping,
# as well as to find the bounds of an object if no mask is available."""),
# NodeSpecItem(name="automaskingTolerance", type="uint", access="cgs",
# constraints="interval: [0, 255]", value=0,
# description="""Controls the process by which bounding box masks
# are automatically generated from images based on similarity to the
# specified 'background' pixel value. The bounding box will enclose all
# pixels in the image that differ from 'background' by more than
# the value specified in 'automaskingTolerance'. Default is 0, which
# generates bounding boxes that enclose all pixels that differ at all
# from the background. In general, increasing the value of
# 'automaskingTolerance' will produce tighter (smaller) bounding box masks."""),
# NodeSpecItem(name="automaskingPadding", type="uint", access="cgs",
# constraints="interval: [0, ...]", value=0,
# description="""Affects the process by which bounding box masks
# are automatically generated from images. After computing the
# bounding box based on image similarity with respect to the background,
# the box will be expanded by 'automaskPadding' pixels in all four
# directions (constrained by the original size of the image.)"""),
# NodeSpecItem(name="invertOutput", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Whether to invert the pixel values before sending an image to the
# network. If invertOutput is enabled, a white object on a black background
# becomes a black object on a white background."""),
# NodeSpecItem(name="filters", type="PyObject", access="cgs",
# value=[],
# description="""List of filters to apply to each image. Each element in the
# list should be either a string (just the filter name) or a list containing
# both the filter name and a dictionary specifying its arguments."""),
# NodeSpecItem(name="postFilters", type="PyObject", access="cgs",
# value=[],
# description="""List of filters to apply to each image just before the image
# is sent to the network. Each element in the list should either be a string
# (just the filter name) or a list containing both the filter name and a
# dictionary specifying its arguments."""),
# NodeSpecItem(name="explorer", type="PyObject", access="cgs",
# value="Flash",
# description="""Explorer (used to move the sensor through the input space).
# Specify as a string (just the explorer name) or a list containing both the
# explorer name and a dictionary specifying its arguments."""),
# NodeSpecItem(name="categoryOutputFile", type="string", access="cgs",
# value="",
# description="""Name of file to which to write category number on each compute."""),
# NodeSpecItem(name="logText", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Toggle for verbose logging to imagesensor_log.txt."""),
# NodeSpecItem(name="logOutputImages", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Toggle for writing each output to disk (as an image)
# on each iteration."""),
# NodeSpecItem(name="logOriginalImages", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Toggle for writing the original, unfiltered version of the current
# image to disk on each iteration."""),
# NodeSpecItem(name="logFilteredImages", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Toggle for writing the intermediate versions of images to disk
# as they pass through the filter chain."""),
# NodeSpecItem(name="logLocationImages", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Toggle for writing an image to disk on each iteration which shows
# the location of the sensor window."""),
# NodeSpecItem(name="logLocationOnOriginalImage", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Whether to overlay the location rectangle on the original image instead
# of the filtered image. Does not work if the two images do not have the
# same size, and may be nonsensical even if they do (for example, if a filter
# moved the object within the image)."""),
# NodeSpecItem(name="logBoundingBox", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""Toggle for logging the bounding box information on each iteration."""),
# NodeSpecItem(name="logDir", type="string", access="cgs",
# value="imagesensor_log",
# description="""Name of the imagesensor log directory, which is created in the session
# bundle if any logging options are enabled. Default is imagesensor_log."""),
# NodeSpecItem(name="memoryLimit", type="int", access="cgs",
# constraints="interval: [-1, ...]", value=100,
# description="""Maximum amount of memory that ImageSensor should use for storing images,
# in megabytes. ImageSensor will unload images and filter outputs to stay beneath
# this ceiling. Set to -1 for no limit."""),
# NodeSpecItem(name="numImages", type="uint", access="g",
# description="""Number of images that the sensor has loaded."""),
# NodeSpecItem(name="numMasks", type="uint", access="g",
# description="""Number of masks that the sensor has loaded."""),
# NodeSpecItem(name="numIterations", type="uint", access="g",
# description="""Number of iterations necessary to fully explore all loaded images. Only
# some explorers support this. Use the getNumIterations command if you wish to
# get the number of iterations for a particular image."""),
# NodeSpecItem(name="maxOutputVectorCount", type="uint", access="g",
# description="""(alias for numIterations) Number of iterations necessary to fully explore
# all loaded images. Only some explorers support this. Use the getNumIterations command
# if you wish to get the number of iterations for a particular image."""),
# NodeSpecItem(name="blankWithReset", type="bool", constraints="bool", access="cgs",
# value=False,
# description="""** DEPRECATED ** Whether to send a blank output every time the explorer
# generates a reset signal (such as when beginning a new sweep). Turning
# on blanks increases the number of iterations."""),
# NodeSpecItem(name="position", type="PyObject", access="g",
# description="""The position of the sensor that will be used for the *next* compute,
# as a dictionary."""),
# NodeSpecItem(name="prevPosition", type="PyObject", access="g",
# description="""The position of the sensor from the *previous* compute, as a
# dictionary. Because "outputImage" and "locationImage" match the output of the
# previous compute (not the upcoming one), they do not correlate with the
# "position" parameter; use this parameter instead."""),
# NodeSpecItem(name="imageInfo", type="PyObject", access="g",
# description="""A list with a dictionary of information for each image that has
# been loaded."""),
# NodeSpecItem(name="prevImageInfo", type="PyObject", access="g",
# description="""Dictionary of information for the image used during the previous compute."""),
# NodeSpecItem(name="nextImageInfo", type="PyObject", access="g",
# description="""Dictionary of information for the image which will be used for the next
# compute."""),
# NodeSpecItem(name="categoryInfo", type="PyObject", access="gs",
# description="""A list with a tuple for each category that the sensor has learned. The
# tuple contains the category name (i.e. 'dog') and a serialized version of
# an example image for the category. To deserialize:
# from nupic.regions.ImageSensor import deserializeCategoryInfo
# categoryInfo = deserializeCategoryInfo(sensor.getParameter('categoryInfo'))"""),
# NodeSpecItem(name="outputImage", type="PyObject", access="g",
# description="""Serialized version of the current output image(s). If depth > 1,
# multiple serialized images will be returned in a list. To deserialize:
# from nupic.image import deserializeImage
# outputImage = deserializeImage(sensor.getParameter('outputImage'))"""),
# NodeSpecItem(name="outputImageWithAlpha", type="PyObject", access="g",
# description="""Serialized version of the current output image(s) with the alpha channel.
# If depth > 1, multiple serialized images will be returned in a list. To deserialize:
# from nupic.image import deserializeImage
# outputImage = deserializeImage(sensor.getParameter('outputImageWithAlpha'))"""),
# NodeSpecItem(name="originalImage", type="string", access="g",
# description="""Serialized version of the original, unfiltered version of the
# current image. To deserialize:
# from nupic.image import deserializeImage
# originalImage = deserializeImage(sensor.getParameter('originalImage'))"""),
# NodeSpecItem(name="locationImage", type="string", access="g",
# description="""Serialized version of the current 'location image', which shows the
# position of the sensor overlaid on the filtered image (optionally, the
# original image). To deserialize:
# from nupic.image import deserializeImage
# locationImage = deserializeImage(sensor.getParameter('locationImage'))"""),
# NodeSpecItem(name="minimalBoundingBox", type="bool", constraints="bool", access="cgs",
# description="""Whether the bounding box found by looking at the
# image background should be set even if it touches one of the sides of
# the image. Set to False to avoid chopping edges off certain images, or
# True if that is not an issue and you wish to use a sweeping explorer."""),
# NodeSpecItem(name="auxDataWidth", type="int", access="cgs",
# description="""The number of elements in in the auxiliary data vector."""),
# NodeSpecItem(name="auxData", type="PyObject", access="g",
# description="""List of Auxiliary Data for every image in the image list"""),
# NodeSpecItem(name="metadata", type="string", access="g",
# description="""Parameter that contains a dict of metadata for the most
# recently generated output image."""),
# ]
# )
# return out + parent
def initialize(self, dims, splitterMaps):
pass
def getOutputElementCount(self, name):
if name == 'auxDataOut':
return self._auxDataWidth if self._auxDataWidth else 0
elif name == 'dataOut':
return self.width * self.height * self.depth
elif name == 'alphaOut':
return 1
else:
raise Exception('Unknown output: ' + name)
#def interpret2(self, command):
# """NuPIC 2 replacement for interpret in NuPIC 1 nodes"""
# # This process effectively strips out one level of quotes; manifests
# # as a problem with pathnames on windows
# exec(command.replace("\\", "\\\\"))
def serializeCategoryInfo(categoryInfo):
return [[name, serializeImage(image)] for name, image in categoryInfo]
def deserializeCategoryInfo(sCategoryInfo):
if sCategoryInfo is None: return []
return [[name, (deserializeImage(sImage) if sImage is not None else None)]
for name, sImage in sCategoryInfo]
def _serializeImageList(imageList):
sImageList = []
for i in xrange(len(imageList)):
sImageList.append(imageList[i].copy())
if sImageList[i]['image']:
sImageList[i]['image'] = serializeImage(sImageList[i]['image'])
if sImageList[i]['filtered']:
sImageList[i]['filtered'] = _serializeAllImages(sImageList[i]['filtered'])
return sImageList
def _deserializeImageList(sImageList):
imageList = sImageList
for i in xrange(len(imageList)):
if imageList[i]['image']:
imageList[i]['image'] = deserializeImage(imageList[i]['image'])
if imageList[i]['filtered']:
imageList[i]['filtered'] = _deserializeAllImages(imageList[i]['filtered'])
return imageList
def _serializeAllImages(old):
new = {}
for key in old:
new[key] = [serializeImage(image) for image in old[key]]
return new
def _deserializeAllImages(old):
new = {}
for key in old:
new[key] = [deserializeImage(sImage) for sImage in old[key]]
return new
|
agpl-3.0
|
fidomason/kbengine
|
kbe/res/scripts/common/Lib/site-packages/pip/locations.py
|
390
|
6202
|
"""Locations where we look for configs, install stuff, etc"""
import sys
import site
import os
import tempfile
from distutils.command.install import install, SCHEME_KEYS
import getpass
from pip.backwardcompat import get_python_lib, get_path_uid, user_site
import pip.exceptions
DELETE_MARKER_MESSAGE = '''\
This file is placed here by pip to indicate the source was put
here by pip.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
'''
PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
def write_delete_marker_file(directory):
"""
Write the pip delete marker file into this directory.
"""
filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
marker_fp = open(filepath, 'w')
marker_fp.write(DELETE_MARKER_MESSAGE)
marker_fp.close()
def running_under_virtualenv():
"""
Return True if we're running inside a virtualenv, False otherwise.
"""
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
return False
def virtualenv_no_global():
"""
Return True if in a venv and no system site packages.
"""
#this mirrors the logic in virtualenv.py for locating the no-global-site-packages.txt file
site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')
if running_under_virtualenv() and os.path.isfile(no_global_file):
return True
def __get_username():
""" Returns the effective username of the current process. """
if sys.platform == 'win32':
return getpass.getuser()
import pwd
return pwd.getpwuid(os.geteuid()).pw_name
def _get_build_prefix():
""" Returns a safe build_prefix """
path = os.path.join(tempfile.gettempdir(), 'pip_build_%s' %
__get_username())
if sys.platform == 'win32':
""" on windows(tested on 7) temp dirs are isolated """
return path
try:
os.mkdir(path)
write_delete_marker_file(path)
except OSError:
file_uid = None
try:
# raises OSError for symlinks
# https://github.com/pypa/pip/pull/935#discussion_r5307003
file_uid = get_path_uid(path)
except OSError:
file_uid = None
if file_uid != os.geteuid():
msg = "The temporary folder for building (%s) is either not owned by you, or is a symlink." \
% path
print (msg)
print("pip will not work until the temporary folder is " + \
"either deleted or is a real directory owned by your user account.")
raise pip.exceptions.InstallationError(msg)
return path
if running_under_virtualenv():
build_prefix = os.path.join(sys.prefix, 'build')
src_prefix = os.path.join(sys.prefix, 'src')
else:
# Note: intentionally NOT using mkdtemp
# See https://github.com/pypa/pip/issues/906 for plan to move to mkdtemp
build_prefix = _get_build_prefix()
## FIXME: keep src in cwd for now (it is not a temporary folder)
try:
src_prefix = os.path.join(os.getcwd(), 'src')
except OSError:
# In case the current working directory has been renamed or deleted
sys.exit("The folder you are executing pip from can no longer be found.")
# under Mac OS X + virtualenv sys.prefix is not properly resolved
# it is something like /path/to/python/bin/..
# Note: using realpath due to tmp dirs on OSX being symlinks
build_prefix = os.path.abspath(os.path.realpath(build_prefix))
src_prefix = os.path.abspath(src_prefix)
# FIXME doesn't account for venv linked to global site-packages
site_packages = get_python_lib()
user_dir = os.path.expanduser('~')
if sys.platform == 'win32':
bin_py = os.path.join(sys.prefix, 'Scripts')
bin_user = os.path.join(user_site, 'Scripts') if user_site else None
# buildout uses 'bin' on Windows too?
if not os.path.exists(bin_py):
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin') if user_site else None
default_storage_dir = os.path.join(user_dir, 'pip')
default_config_file = os.path.join(default_storage_dir, 'pip.ini')
default_log_file = os.path.join(default_storage_dir, 'pip.log')
else:
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin') if user_site else None
default_storage_dir = os.path.join(user_dir, '.pip')
default_config_file = os.path.join(default_storage_dir, 'pip.conf')
default_log_file = os.path.join(default_storage_dir, 'pip.log')
# Forcing to use /usr/local/bin for standard Mac OS X framework installs
# Also log to ~/Library/Logs/ for use with the Console.app log viewer
if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':
bin_py = '/usr/local/bin'
default_log_file = os.path.join(user_dir, 'Library/Logs/pip.log')
def distutils_scheme(dist_name, user=False, home=None, root=None):
"""
Return a distutils install scheme
"""
from distutils.dist import Distribution
scheme = {}
d = Distribution({'name': dist_name})
d.parse_config_files()
i = d.get_command_obj('install', create=True)
# NOTE: setting user or home has the side-effect of creating the home dir or
# user base for installations during finalize_options()
# ideally, we'd prefer a scheme class that has no side-effects.
i.user = user or i.user
i.home = home or i.home
i.root = root or i.root
i.finalize_options()
for key in SCHEME_KEYS:
scheme[key] = getattr(i, 'install_'+key)
if running_under_virtualenv():
scheme['headers'] = os.path.join(sys.prefix,
'include',
'site',
'python' + sys.version[:3],
dist_name)
if root is not None:
scheme["headers"] = os.path.join(
root,
os.path.abspath(scheme["headers"])[1:],
)
return scheme
|
lgpl-3.0
|
EPDCenter/android_kernel_bq_dc_v2
|
arch/ia64/scripts/unwcheck.py
|
13143
|
1714
|
#!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
|
gpl-2.0
|
kthordarson/youtube-dl-ruv
|
youtube_dl/extractor/radiofrance.py
|
163
|
2089
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class RadioFranceIE(InfoExtractor):
_VALID_URL = r'^https?://maison\.radiofrance\.fr/radiovisions/(?P<id>[^?#]+)'
IE_NAME = 'radiofrance'
_TEST = {
'url': 'http://maison.radiofrance.fr/radiovisions/one-one',
'md5': 'bdbb28ace95ed0e04faab32ba3160daf',
'info_dict': {
'id': 'one-one',
'ext': 'ogg',
"title": "One to one",
"description": "Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.",
"uploader": "Thomas Hercouët",
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
description = self._html_search_regex(
r'<div class="bloc_page_wrapper"><div class="text">(.*?)</div>',
webpage, 'description', fatal=False)
uploader = self._html_search_regex(
r'<div class="credit"> © (.*?)</div>',
webpage, 'uploader', fatal=False)
formats_str = self._html_search_regex(
r'class="jp-jplayer[^"]*" data-source="([^"]+)">',
webpage, 'audio URLs')
formats = [
{
'format_id': fm[0],
'url': fm[1],
'vcodec': 'none',
'preference': i,
}
for i, fm in
enumerate(re.findall(r"([a-z0-9]+)\s*:\s*'([^']+)'", formats_str))
]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'uploader': uploader,
}
|
unlicense
|
parksandwildlife/borgcollector
|
tablemanager/migrations/0011_auto_20151020_0912.py
|
4
|
1746
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import tablemanager.models
class Migration(migrations.Migration):
dependencies = [
('tablemanager', '0010_auto_20150916_1107'),
]
operations = [
migrations.AlterField(
model_name='foreigntable',
name='sql',
field=tablemanager.models.SQLField(default='CREATE FOREIGN TABLE "{{schema}}"."{{self.name}}" () SERVER {{self.server.name}} OPTIONS (schema \'<schema>\', table \'<table>\');'),
preserve_default=True,
),
migrations.AlterField(
model_name='normalise',
name='sql',
field=tablemanager.models.SQLField(default='CREATE FUNCTION "{{trans_schema}}"."{{self.func_name}}"() RETURNS SETOF "{{normal_schema}}"."{{self.output_table.name}}" as $$\nBEGIN\n RETURN QUERY SELECT * FROM "{{input_schema}}"."{{self.input_table.name}}";\nEND;\n$$ LANGUAGE plpgsql;'),
preserve_default=True,
),
migrations.AlterField(
model_name='normaltable',
name='create_sql',
field=tablemanager.models.SQLField(default='CREATE TABLE "{{self.name}}" (name varchar(32) unique);'),
preserve_default=True,
),
migrations.AlterField(
model_name='publish',
name='sql',
field=tablemanager.models.SQLField(default='CREATE FUNCTION "{{trans_schema}}"."{{self.func_name}}"() RETURNS SETOF "{{input_table_schema}}"."{{input_table_name}}" as $$\nBEGIN\n RETURN QUERY SELECT * FROM "{{input_table_schema}}"."{{input_table_name}}";\nEND;\n$$ LANGUAGE plpgsql;'),
preserve_default=True,
),
]
|
bsd-3-clause
|
jcftang/ansible
|
lib/ansible/modules/monitoring/rollbar_deployment.py
|
48
|
4077
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014, Max Riveiro, <kavu13@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rollbar_deployment
version_added: 1.6
author: "Max Riveiro (@kavu)"
short_description: Notify Rollbar about app deployments
description:
- Notify Rollbar about app deployments
(see https://rollbar.com/docs/deploys_other/)
options:
token:
description:
- Your project access token.
required: true
environment:
description:
- Name of the environment being deployed, e.g. 'production'.
required: true
revision:
description:
- Revision number/sha being deployed.
required: true
user:
description:
- User who deployed.
required: false
rollbar_user:
description:
- Rollbar username of the user who deployed.
required: false
comment:
description:
- Deploy comment (e.g. what is being deployed).
required: false
url:
description:
- Optional URL to submit the notification to.
required: false
default: 'https://api.rollbar.com/api/1/deploy/'
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated.
This should only be used on personally controlled sites using
self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
- rollbar_deployment:
token: AAAAAA
environment: staging
user: ansible
revision: '4.2'
rollbar_user: admin
comment: Test Deploy
'''
import urllib
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import fetch_url
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True),
environment=dict(required=True),
revision=dict(required=True),
user=dict(required=False),
rollbar_user=dict(required=False),
comment=dict(required=False),
url=dict(
required=False,
default='https://api.rollbar.com/api/1/deploy/'
),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
if module.check_mode:
module.exit_json(changed=True)
params = dict(
access_token=module.params['token'],
environment=module.params['environment'],
revision=module.params['revision']
)
if module.params['user']:
params['local_username'] = module.params['user']
if module.params['rollbar_user']:
params['rollbar_username'] = module.params['rollbar_user']
if module.params['comment']:
params['comment'] = module.params['comment']
url = module.params.get('url')
try:
data = urllib.urlencode(params)
response, info = fetch_url(module, url, data=data)
except Exception:
e = get_exception()
module.fail_json(msg='Unable to notify Rollbar: %s' % e)
else:
if info['status'] == 200:
module.exit_json(changed=True)
else:
module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url))
if __name__ == '__main__':
main()
|
gpl-3.0
|
NeoRazorX/ubuntufaq
|
public.py
|
1
|
15820
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of ubuntufaq
# Copyright (C) 2011 Carlos Garcia Gomez neorazorx@gmail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, logging
# cargamos django 1.2
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from google.appengine.dist import use_library
use_library('django', '1.2')
from google.appengine.ext.webapp import template
from google.appengine.ext import db, webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import users, memcache
from recaptcha.client import captcha
from base import *
from preguntas import *
from enlaces import *
class Portada(Pagina):
def get(self):
Pagina.get(self)
mixto = self.sc.get_portada( users.get_current_user() )
tags = self.get_tags_from_mixto( mixto )
template_values = {
'titulo': 'Ubuntu FAQ',
'descripcion': APP_DESCRIPTION,
'tags': tags,
'mixto': mixto,
'urespuestas': self.sc.get_ultimas_respuestas(),
'searches': self.sc.get_allsearches(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'stats': self.sc.get_stats()
}
path = os.path.join(os.path.dirname(__file__), 'templates/portada.html')
self.response.out.write( template.render(path, template_values) )
class Populares(Pagina):
def get(self):
Pagina.get(self)
mixto = self.sc.get_populares()
tags = self.get_tags_from_mixto( mixto )
template_values = {
'titulo': 'Populares - Ubuntu FAQ',
'descripcion': 'Listado de preguntas y noticias populares de Ubuntu FAQ. ' + APP_DESCRIPTION,
'tags': tags,
'mixto': mixto,
'stats': self.sc.get_stats(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio
}
path = os.path.join(os.path.dirname(__file__), 'templates/populares.html')
self.response.out.write( template.render(path, template_values) )
class Ayuda(Pagina):
def get(self):
Pagina.get(self)
template_values = {
'titulo': 'Ayuda de Ubuntu FAQ',
'descripcion': u'Sección de ayuda de Ubuntu FAQ. ' + APP_DESCRIPTION,
'tags': 'ubuntu, kubuntu, xubuntu, lubuntu, problema, ayuda, linux, karmic, lucid, maverick, natty, ocelot',
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'formulario': self.formulario,
'error_dominio': self.error_dominio,
'karmalist': memcache.get('pending-users'),
'foco': 'ayuda'
}
path = os.path.join(os.path.dirname(__file__), 'templates/ayuda.html')
self.response.out.write(template.render(path, template_values))
class Nueva_publicacion(Pagina):
def get(self):
Pagina.get(self)
# el captcha
if users.get_current_user():
chtml = ''
else:
chtml = captcha.displayhtml(
public_key = RECAPTCHA_PUBLIC_KEY,
use_ssl = False,
error = None)
if self.request.get('tipo') == 'pregunta':
foco = 'pregunta'
elif self.request.get('tipo') == 'enlace':
foco = 'enlace'
else:
foco = 'pensamiento'
template_values = {
'titulo': 'Publicar...',
'descripcion': u'Formulario de publicación de Ubuntu FAQ. ' + APP_DESCRIPTION,
'tags': 'ubuntu, kubuntu, xubuntu, lubuntu, problema, ayuda, linux, karmic, lucid, maverick, natty, ocelot',
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'formulario': self.formulario,
'error_dominio': self.error_dominio,
'captcha': chtml,
'tipo': self.request.get('tipo'),
'contenido': self.request.get('contenido'),
'url2': self.request.get('url'),
'foco': foco
}
path = os.path.join(os.path.dirname(__file__), 'templates/nueva.html')
self.response.out.write(template.render(path, template_values))
class Pagina_buscar(Pagina):
def get(self, tag=None):
Pagina.get(self)
# para corregir fallos de codificación en el tag
if isinstance(tag, str):
tag = unicode( urllib.unquote(tag), 'utf-8')
else:
tag = unicode( urllib.unquote(tag) )
template_values = {
'titulo': 'Ubuntu FAQ: ' + tag,
'descripcion': u'Páginas relacionadas con ' + tag,
'tag': tag,
'tags': 'problema, duda, ayuda, ' + tag,
'relacionadas': self.sc.paginas_relacionadas(tag, True),
'alltags': self.sc.get_alltags(),
'searches': self.sc.get_allsearches(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'foco': 'buscar'
}
path = os.path.join(os.path.dirname(__file__), 'templates/search.html')
self.response.out.write(template.render(path, template_values))
def post(self, ntag=None):
Pagina.get(self)
query = urllib.unquote( self.request.get('query') )
template_values = {
'titulo': 'Ubuntu FAQ: ' + query,
'descripcion': u'Resultados de: ' + query,
'tag': query,
'buscando': True,
'tags': 'problema, duda, ayuda, ' + query,
'relacionadas': self.sc.buscar( query ),
'searches': self.sc.get_allsearches(),
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario' : self.formulario,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'foco': 'buscar'
}
path = os.path.join(os.path.dirname(__file__), 'templates/search.html')
self.response.out.write(template.render(path, template_values))
class Guardar_voto(Pagina):
def get(self, tipo='x', keye=None, voto='-1'):
try:
if self.request.environ['HTTP_USER_AGENT'].lower().find('googlebot') != -1:
logging.info('Googlebot!')
self.redirect('/')
else:
if tipo == 'r':
elemento = Respuesta.get( keye )
elif tipo == 'c':
elemento = Comentario.get( keye )
else:
elemento = False
if not elemento: # no hay elemento a votar
logging.warning('Elemento no encontrado!')
self.redirect('/error/404')
elif self.request.remote_addr in elemento.ips and self.request.remote_addr != '127.0.0.1': # ya se ha votado desde esta IP
logging.info('Voto ya realizado')
self.redirect( elemento.get_link() )
else: # voto válido
ips = elemento.ips
ips.append( self.request.remote_addr )
elemento.ips = ips
if voto == '0':
elemento.valoracion -= 1
logging.info('Voto negativo')
elif voto == '1':
elemento.valoracion += 1
logging.info('Voto positivo')
else:
logging.info('Voto no válido: ' + str(voto))
elemento.put()
elemento.borrar_cache()
# actualizamos la estadistica
stats = self.sc.get_stats()
if voto in ['0', '1']:
try:
stats['votos'] += 1
except:
stats['votos'] = 1
memcache.replace('stats', stats)
self.redirect( elemento.get_link() )
except:
self.redirect('/error/503')
class Rss(Pagina):
def get(self):
template_values = {
'portada': self.sc.get_portada(),
'domain': APP_DOMAIN,
'title': APP_NAME,
'descripcion': APP_DESCRIPTION
}
path = os.path.join(os.path.dirname(__file__), 'templates/rss.html')
self.response.out.write(template.render(path, template_values))
class Rssr(Pagina):
def get(self):
template_values = {
'respuestas': self.sc.get_ultimas_respuestas(),
'comentarios': self.sc.get_ultimos_comentarios(),
'domain': APP_DOMAIN,
'title': APP_NAME,
'descripcion': APP_DESCRIPTION
}
path = os.path.join(os.path.dirname(__file__), 'templates/rss-respuestas.html')
self.response.out.write(template.render(path, template_values))
class Sitemap(Pagina):
def get(self):
portada = self.sc.get_portada()
print 'Content-Type: text/xml'
print ''
print '<?xml version="1.0" encoding="UTF-8"?>'
print '<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">'
for p in portada:
print '<url><loc>' + p['link'] + '</loc><lastmod>' + str(p['fecha']).split(' ')[0] + '</lastmod><changefreq>always</changefreq><priority>0.9</priority></url>'
print '</urlset>'
class Perror(Pagina):
def get(self, cerror='404'):
Pagina.get(self)
derror = {
'403': 'Permiso denegado',
'403c': 'Permiso denegado - error en el captcha',
'404': u'Página no encontrada en Ubuntu FAQ',
'503': 'Error en Ubuntu FAQ',
'606': 'Idiota detectado'
}
merror = {
'403': '403 - Permiso denegado',
'403c': u'<img src="/img/fuuu_face.png" alt="fuuu"/><br/><br/>403 - Permiso denegado: debes repetir el captcha.<br/>Evita los captchas iniciando sesión.',
'404': u'404 - Página no encontrada en Ubuntu FAQ',
'503': '<img src="/img/fuuu_face.png" alt="explosión"/><br/><br/>503 - Error en Ubuntu FAQ,<br/>consulta el estado en: http://code.google.com/status/appengine',
'606': u'<img src="/img/troll_face.png" alt="troll"/><br/><br/>606 - ¿Por qué no pruebas a escribir algo diferente?'
}
if cerror == '503':
logging.error( '503' )
else:
logging.warning( cerror )
template_values = {
'titulo': str(cerror) + ' - Ubuntu FAQ',
'descripcion': derror.get(cerror, 'Error desconocido'),
'tags': 'ubuntu, kubuntu, xubuntu, lubuntu, problema, ayuda, linux, karmic, lucid, maverick, natty, ocelot',
'url': self.url,
'url_linktext': self.url_linktext,
'mi_perfil': self.mi_perfil,
'formulario': self.formulario,
'error': merror.get(cerror, 'Error desconocido'),
'cerror': cerror,
'usuario': users.get_current_user(),
'notis': self.get_notificaciones(),
'error_dominio': self.error_dominio,
'foco': 'buscar'
}
path = os.path.join(os.path.dirname(__file__), 'templates/portada.html')
self.response.out.write(template.render(path, template_values))
def main():
application = webapp.WSGIApplication([('/', Portada),
('/inicio', Todas_preguntas),
('/preguntas', Todas_preguntas),
(r'/preguntas/(.*)', Todas_preguntas),
('/populares', Populares),
('/sin-solucionar', Sin_solucionar),
('/actualidad', Actualidad),
(r'/actualidad/(.*)', Actualidad),
(r'/p/(.*)', Redir_pregunta),
(r'/question/(.*)', Detalle_pregunta),
('/nueva', Nueva_publicacion),
('/add_p', Nueva_pregunta),
('/mod_p', Detalle_pregunta),
('/del_p', Borrar_pregunta),
('/add_r', Responder),
('/mod_r', Modificar_respuesta),
('/del_r', Borrar_respuesta),
(r'/e/(.*)', Acceder_enlace),
(r'/de/(.*)', Redir_enlace),
(r'/story/(.*)', Detalle_enlace),
('/add_e', Actualidad),
('/mod_e', Detalle_enlace),
('/hun_e', Hundir_enlace),
('/del_e', Borrar_enlace),
('/add_c', Comentar),
('/mod_c', Modificar_comentario),
('/del_c', Borrar_comentario),
('/ayuda', Ayuda),
(r'/search/(.*)', Pagina_buscar),
(r'/votar/(.*)/(.*)/(.*)', Guardar_voto),
('/rss', Rss),
('/rss-respuestas', Rssr),
('/sitemap', Sitemap),
('/sitemap.xml', Sitemap),
(r'/error/(.*)', Perror),
('/.*', Perror),
],
debug=DEBUG_FLAG)
webapp.template.register_template_library('filters.filtros_django')
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
agpl-3.0
|
tuxfux-hlp-notes/python-batches
|
archieves/batch-60/modules/myenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euctwfreq.py
|
3133
|
34872
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
|
gpl-3.0
|
kreatorkodi/repository.torrentbr
|
plugin.video.yatp/site-packages/hachoir_core/dict.py
|
95
|
5376
|
"""
Dictionnary classes which store values order.
"""
from hachoir_core.error import HachoirError
from hachoir_core.i18n import _
class UniqKeyError(HachoirError):
"""
Error raised when a value is set whereas the key already exist in a
dictionnary.
"""
pass
class Dict(object):
"""
This class works like classic Python dict() but has an important method:
__iter__() which allow to iterate into the dictionnary _values_ (and not
keys like Python's dict does).
"""
def __init__(self, values=None):
self._index = {} # key => index
self._key_list = [] # index => key
self._value_list = [] # index => value
if values:
for key, value in values:
self.append(key,value)
def _getValues(self):
return self._value_list
values = property(_getValues)
def index(self, key):
"""
Search a value by its key and returns its index
Returns None if the key doesn't exist.
>>> d=Dict( (("two", "deux"), ("one", "un")) )
>>> d.index("two")
0
>>> d.index("one")
1
>>> d.index("three") is None
True
"""
return self._index.get(key)
def __getitem__(self, key):
"""
Get item with specified key.
To get a value by it's index, use mydict.values[index]
>>> d=Dict( (("two", "deux"), ("one", "un")) )
>>> d["one"]
'un'
"""
return self._value_list[self._index[key]]
def __setitem__(self, key, value):
self._value_list[self._index[key]] = value
def append(self, key, value):
"""
Append new value
"""
if key in self._index:
raise UniqKeyError(_("Key '%s' already exists") % key)
self._index[key] = len(self._value_list)
self._key_list.append(key)
self._value_list.append(value)
def __len__(self):
return len(self._value_list)
def __contains__(self, key):
return key in self._index
def __iter__(self):
return iter(self._value_list)
def iteritems(self):
"""
Create a generator to iterate on: (key, value).
>>> d=Dict( (("two", "deux"), ("one", "un")) )
>>> for key, value in d.iteritems():
... print "%r: %r" % (key, value)
...
'two': 'deux'
'one': 'un'
"""
for index in xrange(len(self)):
yield (self._key_list[index], self._value_list[index])
def itervalues(self):
"""
Create an iterator on values
"""
return iter(self._value_list)
def iterkeys(self):
"""
Create an iterator on keys
"""
return iter(self._key_list)
def replace(self, oldkey, newkey, new_value):
"""
Replace an existing value with another one
>>> d=Dict( (("two", "deux"), ("one", "un")) )
>>> d.replace("one", "three", 3)
>>> d
{'two': 'deux', 'three': 3}
You can also use the classic form:
>>> d['three'] = 4
>>> d
{'two': 'deux', 'three': 4}
"""
index = self._index[oldkey]
self._value_list[index] = new_value
if oldkey != newkey:
del self._index[oldkey]
self._index[newkey] = index
self._key_list[index] = newkey
def __delitem__(self, index):
"""
Delete item at position index. May raise IndexError.
>>> d=Dict( ((6, 'six'), (9, 'neuf'), (4, 'quatre')) )
>>> del d[1]
>>> d
{6: 'six', 4: 'quatre'}
"""
if index < 0:
index += len(self._value_list)
if not (0 <= index < len(self._value_list)):
raise IndexError(_("list assignment index out of range (%s/%s)")
% (index, len(self._value_list)))
del self._value_list[index]
del self._key_list[index]
# First loop which may alter self._index
for key, item_index in self._index.iteritems():
if item_index == index:
del self._index[key]
break
# Second loop update indexes
for key, item_index in self._index.iteritems():
if index < item_index:
self._index[key] -= 1
def insert(self, index, key, value):
"""
Insert an item at specified position index.
>>> d=Dict( ((6, 'six'), (9, 'neuf'), (4, 'quatre')) )
>>> d.insert(1, '40', 'quarante')
>>> d
{6: 'six', '40': 'quarante', 9: 'neuf', 4: 'quatre'}
"""
if key in self:
raise UniqKeyError(_("Insert error: key '%s' ready exists") % key)
_index = index
if index < 0:
index += len(self._value_list)
if not(0 <= index <= len(self._value_list)):
raise IndexError(_("Insert error: index '%s' is invalid") % _index)
for item_key, item_index in self._index.iteritems():
if item_index >= index:
self._index[item_key] += 1
self._index[key] = index
self._key_list.insert(index, key)
self._value_list.insert(index, value)
def __repr__(self):
items = ( "%r: %r" % (key, value) for key, value in self.iteritems() )
return "{%s}" % ", ".join(items)
|
gpl-2.0
|
themiken/mtasa-blue
|
vendor/google-breakpad/src/tools/gyp/test/win/gyptest-link-generate-manifest.py
|
238
|
4708
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure we generate a manifest file when linking binaries, including
handling AdditionalManifestFiles.
"""
import TestGyp
import sys
if sys.platform == 'win32':
import pywintypes
import win32api
import winerror
RT_MANIFEST = 24
class LoadLibrary(object):
"""Context manager for loading and releasing binaries in Windows.
Yields the handle of the binary loaded."""
def __init__(self, path):
self._path = path
self._handle = None
def __enter__(self):
self._handle = win32api.LoadLibrary(self._path)
return self._handle
def __exit__(self, type, value, traceback):
win32api.FreeLibrary(self._handle)
def extract_manifest(path, resource_name):
"""Reads manifest from |path| and returns it as a string.
Returns None is there is no such manifest."""
with LoadLibrary(path) as handle:
try:
return win32api.LoadResource(handle, RT_MANIFEST, resource_name)
except pywintypes.error as error:
if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
return None
else:
raise
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('generate-manifest.gyp', chdir=CHDIR)
test.build('generate-manifest.gyp', test.ALL, chdir=CHDIR)
# Make sure that generation of .generated.manifest does not cause a relink.
test.run_gyp('generate-manifest.gyp', chdir=CHDIR)
test.up_to_date('generate-manifest.gyp', test.ALL, chdir=CHDIR)
def test_manifest(filename, generate_manifest, embedded_manifest,
extra_manifest):
exe_file = test.built_file_path(filename, chdir=CHDIR)
if not generate_manifest:
test.must_not_exist(exe_file + '.manifest')
manifest = extract_manifest(exe_file, 1)
test.fail_test(manifest)
return
if embedded_manifest:
manifest = extract_manifest(exe_file, 1)
test.fail_test(not manifest)
else:
test.must_exist(exe_file + '.manifest')
manifest = test.read(exe_file + '.manifest')
test.fail_test(not manifest)
test.fail_test(extract_manifest(exe_file, 1))
if generate_manifest:
test.must_contain_any_line(manifest, 'requestedExecutionLevel')
if extra_manifest:
test.must_contain_any_line(manifest,
'35138b9a-5d96-4fbd-8e2d-a2440225f93a')
test.must_contain_any_line(manifest,
'e2011457-1546-43c5-a5fe-008deee3d3f0')
test_manifest('test_generate_manifest_true.exe',
generate_manifest=True,
embedded_manifest=False,
extra_manifest=False)
test_manifest('test_generate_manifest_false.exe',
generate_manifest=False,
embedded_manifest=False,
extra_manifest=False)
test_manifest('test_generate_manifest_default.exe',
generate_manifest=True,
embedded_manifest=False,
extra_manifest=False)
test_manifest('test_generate_manifest_true_as_embedded.exe',
generate_manifest=True,
embedded_manifest=True,
extra_manifest=False)
test_manifest('test_generate_manifest_false_as_embedded.exe',
generate_manifest=False,
embedded_manifest=True,
extra_manifest=False)
test_manifest('test_generate_manifest_default_as_embedded.exe',
generate_manifest=True,
embedded_manifest=True,
extra_manifest=False)
test_manifest('test_generate_manifest_true_with_extra_manifest.exe',
generate_manifest=True,
embedded_manifest=False,
extra_manifest=True)
test_manifest('test_generate_manifest_false_with_extra_manifest.exe',
generate_manifest=False,
embedded_manifest=False,
extra_manifest=True)
test_manifest('test_generate_manifest_true_with_extra_manifest_list.exe',
generate_manifest=True,
embedded_manifest=False,
extra_manifest=True)
test_manifest('test_generate_manifest_false_with_extra_manifest_list.exe',
generate_manifest=False,
embedded_manifest=False,
extra_manifest=True)
test_manifest('test_generate_manifest_default_embed_default.exe',
generate_manifest=True,
embedded_manifest=True,
extra_manifest=False)
test.pass_test()
|
gpl-3.0
|
Krossom/python-for-android
|
python-modules/twisted/twisted/protocols/mice/mouseman.py
|
81
|
2882
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Logictech MouseMan serial protocol.
http://www.softnco.demon.co.uk/SerialMouse.txt
"""
from twisted.internet import protocol
class MouseMan(protocol.Protocol):
"""
Parser for Logitech MouseMan serial mouse protocol (compatible
with Microsoft Serial Mouse).
"""
state = 'initial'
leftbutton=None
rightbutton=None
middlebutton=None
leftold=None
rightold=None
middleold=None
horiz=None
vert=None
horizold=None
vertold=None
def down_left(self):
pass
def up_left(self):
pass
def down_middle(self):
pass
def up_middle(self):
pass
def down_right(self):
pass
def up_right(self):
pass
def move(self, x, y):
pass
horiz=None
vert=None
def state_initial(self, byte):
if byte & 1<<6:
self.word1=byte
self.leftbutton = byte & 1<<5
self.rightbutton = byte & 1<<4
return 'horiz'
else:
return 'initial'
def state_horiz(self, byte):
if byte & 1<<6:
return self.state_initial(byte)
else:
x=(self.word1 & 0x03)<<6 | (byte & 0x3f)
if x>=128:
x=-256+x
self.horiz = x
return 'vert'
def state_vert(self, byte):
if byte & 1<<6:
# short packet
return self.state_initial(byte)
else:
x = (self.word1 & 0x0c)<<4 | (byte & 0x3f)
if x>=128:
x=-256+x
self.vert = x
self.snapshot()
return 'maybemiddle'
def state_maybemiddle(self, byte):
if byte & 1<<6:
self.snapshot()
return self.state_initial(byte)
else:
self.middlebutton=byte & 1<<5
self.snapshot()
return 'initial'
def snapshot(self):
if self.leftbutton and not self.leftold:
self.down_left()
self.leftold=1
if not self.leftbutton and self.leftold:
self.up_left()
self.leftold=0
if self.middlebutton and not self.middleold:
self.down_middle()
self.middleold=1
if not self.middlebutton and self.middleold:
self.up_middle()
self.middleold=0
if self.rightbutton and not self.rightold:
self.down_right()
self.rightold=1
if not self.rightbutton and self.rightold:
self.up_right()
self.rightold=0
if self.horiz or self.vert:
self.move(self.horiz, self.vert)
def dataReceived(self, data):
for c in data:
byte = ord(c)
self.state = getattr(self, 'state_'+self.state)(byte)
|
apache-2.0
|
Semi-global/edx-platform
|
common/djangoapps/terrain/steps.py
|
104
|
7473
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# Disable the "wildcard import" warning so we can bring in all methods from
# course helpers and ui helpers
# pylint: disable=wildcard-import
# Disable the "Unused import %s from wildcard import" warning
# pylint: disable=unused-wildcard-import
# Disable the "unused argument" warning because lettuce uses "step"
# pylint: disable=unused-argument
# django_url is assigned late in the process of loading lettuce,
# so we import this as a module, and then read django_url from
# it to get the correct value
import lettuce.django
from lettuce import world, step
from .course_helpers import *
from .ui_helpers import *
from nose.tools import assert_equals # pylint: disable=no-name-in-module
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from logging import getLogger
logger = getLogger(__name__)
@step(r'I wait (?:for )?"(\d+\.?\d*)" seconds?$')
def wait_for_seconds(step, seconds):
world.wait(seconds)
@step('I reload the page$')
def reload_the_page(step):
world.wait_for_ajax_complete()
world.browser.reload()
world.wait_for_js_to_load()
@step('I press the browser back button$')
def browser_back(step):
world.browser.driver.back()
@step('I (?:visit|access|open) the homepage$')
def i_visit_the_homepage(step):
world.visit('/')
assert world.is_css_present('header.global')
@step(u'I (?:visit|access|open) the dashboard$')
def i_visit_the_dashboard(step):
world.visit('/dashboard')
assert world.is_css_present('section.container.dashboard')
@step('I should be on the dashboard page$')
def i_should_be_on_the_dashboard(step):
assert world.is_css_present('section.container.dashboard')
assert 'Dashboard' in world.browser.title
@step(u'I (?:visit|access|open) the courses page$')
def i_am_on_the_courses_page(step):
world.visit('/courses')
assert world.is_css_present('div.courses')
@step(u'I press the "([^"]*)" button$')
def and_i_press_the_button(step, value):
button_css = 'input[value="%s"]' % value
world.css_click(button_css)
@step(u'I click the link with the text "([^"]*)"$')
def click_the_link_with_the_text_group1(step, linktext):
world.click_link(linktext)
@step('I should see that the path is "([^"]*)"$')
def i_should_see_that_the_path_is(step, path):
if 'COURSE' in world.scenario_dict:
path = path.format(world.scenario_dict['COURSE'].id)
assert world.url_equals(path), (
"path should be {!r} but is {!r}".format(path, world.browser.url)
)
@step(u'the page title should be "([^"]*)"$')
def the_page_title_should_be(step, title):
assert_equals(world.browser.title, title)
@step(u'the page title should contain "([^"]*)"$')
def the_page_title_should_contain(step, title):
assert title in world.browser.title
@step('I log in$')
def i_log_in(step):
world.log_in(username='robot', password='test')
@step('I am a logged in user$')
def i_am_logged_in_user(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
@step('I am not logged in$')
def i_am_not_logged_in(step):
world.visit('logout')
@step('I am staff for course "([^"]*)"$')
def i_am_staff_for_course_by_id(step, course_id):
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
world.register_by_course_key(course_key, True)
@step(r'click (?:the|a) link (?:called|with the text) "([^"]*)"$')
def click_the_link_called(step, text):
world.click_link(text)
@step(r'should see that the url is "([^"]*)"$')
def should_have_the_url(step, url):
assert_equals(world.browser.url, url)
@step(r'should see (?:the|a) link (?:called|with the text) "([^"]*)"$')
def should_see_a_link_called(step, text):
assert len(world.browser.find_link_by_text(text)) > 0
@step(r'should see (?:the|a) link with the id "([^"]*)" called "([^"]*)"$')
def should_have_link_with_id_and_text(step, link_id, text):
link = world.browser.find_by_id(link_id)
assert len(link) > 0
assert_equals(link.text, text)
@step(r'should see a link to "([^"]*)" with the text "([^"]*)"$')
def should_have_link_with_path_and_text(step, path, text):
link = world.browser.find_link_by_text(text)
assert len(link) > 0
assert_equals(link.first["href"], lettuce.django.django_url(path))
@step(r'should( not)? see "(.*)" (?:somewhere|anywhere) (?:in|on) (?:the|this) page')
def should_see_in_the_page(step, doesnt_appear, text):
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
multiplier = 2
else:
multiplier = 1
if doesnt_appear:
assert world.browser.is_text_not_present(text, wait_time=5 * multiplier)
else:
assert world.browser.is_text_present(text, wait_time=5 * multiplier)
@step('I am logged in$')
def i_am_logged_in(step):
world.create_user('robot', 'test')
world.log_in(username='robot', password='test')
world.browser.visit(lettuce.django.django_url('/'))
dash_css = 'section.container.dashboard'
assert world.is_css_present(dash_css)
@step(u'I am an edX user$')
def i_am_an_edx_user(step):
world.create_user('robot', 'test')
@step(u'User "([^"]*)" is an edX user$')
def registered_edx_user(step, uname):
world.create_user(uname, 'test')
@step(u'All dialogs should be closed$')
def dialogs_are_closed(step):
assert world.dialogs_closed()
@step(u'visit the url "([^"]*)"')
def visit_url(step, url):
if 'COURSE' in world.scenario_dict:
url = url.format(world.scenario_dict['COURSE'].id)
world.browser.visit(lettuce.django.django_url(url))
@step(u'wait for AJAX to (?:finish|complete)')
def wait_ajax(_step):
wait_for_ajax_complete()
@step('I will confirm all alerts')
def i_confirm_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return true;} ; window.alert = function(){return;}')
@step('I will cancel all alerts')
def i_cancel_all_alerts(step):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.confirm = function(){return false;} ; window.alert = function(){return;}')
@step('I will answer all prompts with "([^"]*)"')
def i_answer_prompts_with(step, prompt):
"""
Please note: This method must be called RIGHT BEFORE an expected alert
Window variables are page local and thus all changes are removed upon navigating to a new page
In addition, this method changes the functionality of ONLY future alerts
"""
world.browser.execute_script('window.prompt = function(){return %s;}') % prompt
@step('I run ipdb')
def run_ipdb(_step):
"""Run ipdb as step for easy debugging"""
import ipdb
ipdb.set_trace()
assert True
@step(u'(I am viewing|s?he views) the course team settings$')
def view_course_team_settings(_step, whom):
""" navigates to course team settings page """
world.click_course_settings()
link_css = 'li.nav-course-settings-team a'
world.css_click(link_css)
|
agpl-3.0
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/win32comext/shell/demos/browse_for_folder.py
|
47
|
1661
|
# A couple of samples using SHBrowseForFolder
import sys, os
from win32com.shell import shell, shellcon
import win32gui
# A callback procedure - called by SHBrowseForFolder
def BrowseCallbackProc(hwnd, msg, lp, data):
if msg== shellcon.BFFM_INITIALIZED:
win32gui.SendMessage(hwnd, shellcon.BFFM_SETSELECTION, 1, data)
elif msg == shellcon.BFFM_SELCHANGED:
# Set the status text of the
# For this message, 'lp' is the address of the PIDL.
pidl = shell.AddressAsPIDL(lp)
try:
path = shell.SHGetPathFromIDList(pidl)
win32gui.SendMessage(hwnd, shellcon.BFFM_SETSTATUSTEXT, 0, path)
except shell.error:
# No path for this PIDL
pass
if __name__=='__main__':
# Demonstrate a dialog with the cwd selected as the default - this
# must be done via a callback function.
flags = shellcon.BIF_STATUSTEXT
shell.SHBrowseForFolder(0, # parent HWND
None, # root PIDL.
"Default of %s" % os.getcwd(), # title
flags, # flags
BrowseCallbackProc, # callback function
os.getcwd() # 'data' param for the callback
)
# Browse from this directory down only.
# Get the PIDL for the cwd.
desktop = shell.SHGetDesktopFolder()
cb, pidl, extra = desktop.ParseDisplayName(0, None, os.getcwd())
shell.SHBrowseForFolder(0, # parent HWND
pidl, # root PIDL.
"From %s down only" % os.getcwd(), # title
)
|
gpl-3.0
|
shsingh/ansible
|
test/support/integration/plugins/module_utils/net_tools/nios/api.py
|
23
|
28784
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2018 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
from functools import partial
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
try:
from infoblox_client.connector import Connector
from infoblox_client.exceptions import InfobloxException
HAS_INFOBLOX_CLIENT = True
except ImportError:
HAS_INFOBLOX_CLIENT = False
# defining nios constants
NIOS_DNS_VIEW = 'view'
NIOS_NETWORK_VIEW = 'networkview'
NIOS_HOST_RECORD = 'record:host'
NIOS_IPV4_NETWORK = 'network'
NIOS_IPV6_NETWORK = 'ipv6network'
NIOS_ZONE = 'zone_auth'
NIOS_PTR_RECORD = 'record:ptr'
NIOS_A_RECORD = 'record:a'
NIOS_AAAA_RECORD = 'record:aaaa'
NIOS_CNAME_RECORD = 'record:cname'
NIOS_MX_RECORD = 'record:mx'
NIOS_SRV_RECORD = 'record:srv'
NIOS_NAPTR_RECORD = 'record:naptr'
NIOS_TXT_RECORD = 'record:txt'
NIOS_NSGROUP = 'nsgroup'
NIOS_IPV4_FIXED_ADDRESS = 'fixedaddress'
NIOS_IPV6_FIXED_ADDRESS = 'ipv6fixedaddress'
NIOS_NEXT_AVAILABLE_IP = 'func:nextavailableip'
NIOS_IPV4_NETWORK_CONTAINER = 'networkcontainer'
NIOS_IPV6_NETWORK_CONTAINER = 'ipv6networkcontainer'
NIOS_MEMBER = 'member'
NIOS_PROVIDER_SPEC = {
'host': dict(fallback=(env_fallback, ['INFOBLOX_HOST'])),
'username': dict(fallback=(env_fallback, ['INFOBLOX_USERNAME'])),
'password': dict(fallback=(env_fallback, ['INFOBLOX_PASSWORD']), no_log=True),
'validate_certs': dict(type='bool', default=False, fallback=(env_fallback, ['INFOBLOX_SSL_VERIFY']), aliases=['ssl_verify']),
'silent_ssl_warnings': dict(type='bool', default=True),
'http_request_timeout': dict(type='int', default=10, fallback=(env_fallback, ['INFOBLOX_HTTP_REQUEST_TIMEOUT'])),
'http_pool_connections': dict(type='int', default=10),
'http_pool_maxsize': dict(type='int', default=10),
'max_retries': dict(type='int', default=3, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES'])),
'wapi_version': dict(default='2.1', fallback=(env_fallback, ['INFOBLOX_WAP_VERSION'])),
'max_results': dict(type='int', default=1000, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES']))
}
def get_connector(*args, **kwargs):
''' Returns an instance of infoblox_client.connector.Connector
:params args: positional arguments are silently ignored
:params kwargs: dict that is passed to Connector init
:returns: Connector
'''
if not HAS_INFOBLOX_CLIENT:
raise Exception('infoblox-client is required but does not appear '
'to be installed. It can be installed using the '
'command `pip install infoblox-client`')
if not set(kwargs.keys()).issubset(list(NIOS_PROVIDER_SPEC.keys()) + ['ssl_verify']):
raise Exception('invalid or unsupported keyword argument for connector')
for key, value in iteritems(NIOS_PROVIDER_SPEC):
if key not in kwargs:
# apply default values from NIOS_PROVIDER_SPEC since we cannot just
# assume the provider values are coming from AnsibleModule
if 'default' in value:
kwargs[key] = value['default']
# override any values with env variables unless they were
# explicitly set
env = ('INFOBLOX_%s' % key).upper()
if env in os.environ:
kwargs[key] = os.environ.get(env)
if 'validate_certs' in kwargs.keys():
kwargs['ssl_verify'] = kwargs['validate_certs']
kwargs.pop('validate_certs', None)
return Connector(kwargs)
def normalize_extattrs(value):
''' Normalize extattrs field to expected format
The module accepts extattrs as key/value pairs. This method will
transform the key/value pairs into a structure suitable for
sending across WAPI in the format of:
extattrs: {
key: {
value: <value>
}
}
'''
return dict([(k, {'value': v}) for k, v in iteritems(value)])
def flatten_extattrs(value):
''' Flatten the key/value struct for extattrs
WAPI returns extattrs field as a dict in form of:
extattrs: {
key: {
value: <value>
}
}
This method will flatten the structure to:
extattrs: {
key: value
}
'''
return dict([(k, v['value']) for k, v in iteritems(value)])
def member_normalize(member_spec):
''' Transforms the member module arguments into a valid WAPI struct
This function will transform the arguments into a structure that
is a valid WAPI structure in the format of:
{
key: <value>,
}
It will remove any arguments that are set to None since WAPI will error on
that condition.
The remainder of the value validation is performed by WAPI
Some parameters in ib_spec are passed as a list in order to pass the validation for elements.
In this function, they are converted to dictionary.
'''
member_elements = ['vip_setting', 'ipv6_setting', 'lan2_port_setting', 'mgmt_port_setting',
'pre_provisioning', 'network_setting', 'v6_network_setting',
'ha_port_setting', 'lan_port_setting', 'lan2_physical_setting',
'lan_ha_port_setting', 'mgmt_network_setting', 'v6_mgmt_network_setting']
for key in member_spec.keys():
if key in member_elements and member_spec[key] is not None:
member_spec[key] = member_spec[key][0]
if isinstance(member_spec[key], dict):
member_spec[key] = member_normalize(member_spec[key])
elif isinstance(member_spec[key], list):
for x in member_spec[key]:
if isinstance(x, dict):
x = member_normalize(x)
elif member_spec[key] is None:
del member_spec[key]
return member_spec
class WapiBase(object):
''' Base class for implementing Infoblox WAPI API '''
provider_spec = {'provider': dict(type='dict', options=NIOS_PROVIDER_SPEC)}
def __init__(self, provider):
self.connector = get_connector(**provider)
def __getattr__(self, name):
try:
return self.__dict__[name]
except KeyError:
if name.startswith('_'):
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
return partial(self._invoke_method, name)
def _invoke_method(self, name, *args, **kwargs):
try:
method = getattr(self.connector, name)
return method(*args, **kwargs)
except InfobloxException as exc:
if hasattr(self, 'handle_exception'):
self.handle_exception(name, exc)
else:
raise
class WapiLookup(WapiBase):
''' Implements WapiBase for lookup plugins '''
def handle_exception(self, method_name, exc):
if ('text' in exc.response):
raise Exception(exc.response['text'])
else:
raise Exception(exc)
class WapiInventory(WapiBase):
''' Implements WapiBase for dynamic inventory script '''
pass
class WapiModule(WapiBase):
''' Implements WapiBase for executing a NIOS module '''
def __init__(self, module):
self.module = module
provider = module.params['provider']
try:
super(WapiModule, self).__init__(provider)
except Exception as exc:
self.module.fail_json(msg=to_text(exc))
def handle_exception(self, method_name, exc):
''' Handles any exceptions raised
This method will be called if an InfobloxException is raised for
any call to the instance of Connector and also, in case of generic
exception. This method will then gracefully fail the module.
:args exc: instance of InfobloxException
'''
if ('text' in exc.response):
self.module.fail_json(
msg=exc.response['text'],
type=exc.response['Error'].split(':')[0],
code=exc.response.get('code'),
operation=method_name
)
else:
self.module.fail_json(msg=to_native(exc))
def run(self, ib_obj_type, ib_spec):
''' Runs the module and performans configuration tasks
:args ib_obj_type: the WAPI object type to operate against
:args ib_spec: the specification for the WAPI object as a dict
:returns: a results dict
'''
update = new_name = None
state = self.module.params['state']
if state not in ('present', 'absent'):
self.module.fail_json(msg='state must be one of `present`, `absent`, got `%s`' % state)
result = {'changed': False}
obj_filter = dict([(k, self.module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
# get object reference
ib_obj_ref, update, new_name = self.get_object_ref(self.module, ib_obj_type, obj_filter, ib_spec)
proposed_object = {}
for key, value in iteritems(ib_spec):
if self.module.params[key] is not None:
if 'transform' in value:
proposed_object[key] = value['transform'](self.module)
else:
proposed_object[key] = self.module.params[key]
# If configure_by_dns is set to False, then delete the default dns set in the param else throw exception
if not proposed_object.get('configure_for_dns') and proposed_object.get('view') == 'default'\
and ib_obj_type == NIOS_HOST_RECORD:
del proposed_object['view']
elif not proposed_object.get('configure_for_dns') and proposed_object.get('view') != 'default'\
and ib_obj_type == NIOS_HOST_RECORD:
self.module.fail_json(msg='DNS Bypass is not allowed if DNS view is set other than \'default\'')
if ib_obj_ref:
if len(ib_obj_ref) > 1:
for each in ib_obj_ref:
# To check for existing A_record with same name with input A_record by IP
if each.get('ipv4addr') and each.get('ipv4addr') == proposed_object.get('ipv4addr'):
current_object = each
# To check for existing Host_record with same name with input Host_record by IP
elif each.get('ipv4addrs')[0].get('ipv4addr') and each.get('ipv4addrs')[0].get('ipv4addr')\
== proposed_object.get('ipv4addrs')[0].get('ipv4addr'):
current_object = each
# Else set the current_object with input value
else:
current_object = obj_filter
ref = None
else:
current_object = ib_obj_ref[0]
if 'extattrs' in current_object:
current_object['extattrs'] = flatten_extattrs(current_object['extattrs'])
if current_object.get('_ref'):
ref = current_object.pop('_ref')
else:
current_object = obj_filter
ref = None
# checks if the object type is member to normalize the attributes being passed
if (ib_obj_type == NIOS_MEMBER):
proposed_object = member_normalize(proposed_object)
# checks if the name's field has been updated
if update and new_name:
proposed_object['name'] = new_name
check_remove = []
if (ib_obj_type == NIOS_HOST_RECORD):
# this check is for idempotency, as if the same ip address shall be passed
# add param will be removed, and same exists true for remove case as well.
if 'ipv4addrs' in [current_object and proposed_object]:
for each in current_object['ipv4addrs']:
if each['ipv4addr'] == proposed_object['ipv4addrs'][0]['ipv4addr']:
if 'add' in proposed_object['ipv4addrs'][0]:
del proposed_object['ipv4addrs'][0]['add']
break
check_remove += each.values()
if proposed_object['ipv4addrs'][0]['ipv4addr'] not in check_remove:
if 'remove' in proposed_object['ipv4addrs'][0]:
del proposed_object['ipv4addrs'][0]['remove']
res = None
modified = not self.compare_objects(current_object, proposed_object)
if 'extattrs' in proposed_object:
proposed_object['extattrs'] = normalize_extattrs(proposed_object['extattrs'])
# Checks if nios_next_ip param is passed in ipv4addrs/ipv4addr args
proposed_object = self.check_if_nios_next_ip_exists(proposed_object)
if state == 'present':
if ref is None:
if not self.module.check_mode:
self.create_object(ib_obj_type, proposed_object)
result['changed'] = True
# Check if NIOS_MEMBER and the flag to call function create_token is set
elif (ib_obj_type == NIOS_MEMBER) and (proposed_object['create_token']):
proposed_object = None
# the function creates a token that can be used by a pre-provisioned member to join the grid
result['api_results'] = self.call_func('create_token', ref, proposed_object)
result['changed'] = True
elif modified:
if 'ipv4addrs' in proposed_object:
if ('add' not in proposed_object['ipv4addrs'][0]) and ('remove' not in proposed_object['ipv4addrs'][0]):
self.check_if_recordname_exists(obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object)
if (ib_obj_type in (NIOS_HOST_RECORD, NIOS_NETWORK_VIEW, NIOS_DNS_VIEW)):
run_update = True
proposed_object = self.on_update(proposed_object, ib_spec)
if 'ipv4addrs' in proposed_object:
if ('add' or 'remove') in proposed_object['ipv4addrs'][0]:
run_update, proposed_object = self.check_if_add_remove_ip_arg_exists(proposed_object)
if run_update:
res = self.update_object(ref, proposed_object)
result['changed'] = True
else:
res = ref
if (ib_obj_type in (NIOS_A_RECORD, NIOS_AAAA_RECORD, NIOS_PTR_RECORD, NIOS_SRV_RECORD)):
# popping 'view' key as update of 'view' is not supported with respect to a:record/aaaa:record/srv:record/ptr:record
proposed_object = self.on_update(proposed_object, ib_spec)
del proposed_object['view']
if not self.module.check_mode:
res = self.update_object(ref, proposed_object)
result['changed'] = True
elif 'network_view' in proposed_object:
proposed_object.pop('network_view')
result['changed'] = True
if not self.module.check_mode and res is None:
proposed_object = self.on_update(proposed_object, ib_spec)
self.update_object(ref, proposed_object)
result['changed'] = True
elif state == 'absent':
if ref is not None:
if 'ipv4addrs' in proposed_object:
if 'remove' in proposed_object['ipv4addrs'][0]:
self.check_if_add_remove_ip_arg_exists(proposed_object)
self.update_object(ref, proposed_object)
result['changed'] = True
elif not self.module.check_mode:
self.delete_object(ref)
result['changed'] = True
return result
def check_if_recordname_exists(self, obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object):
''' Send POST request if host record input name and retrieved ref name is same,
but input IP and retrieved IP is different'''
if 'name' in (obj_filter and ib_obj_ref[0]) and ib_obj_type == NIOS_HOST_RECORD:
obj_host_name = obj_filter['name']
ref_host_name = ib_obj_ref[0]['name']
if 'ipv4addrs' in (current_object and proposed_object):
current_ip_addr = current_object['ipv4addrs'][0]['ipv4addr']
proposed_ip_addr = proposed_object['ipv4addrs'][0]['ipv4addr']
elif 'ipv6addrs' in (current_object and proposed_object):
current_ip_addr = current_object['ipv6addrs'][0]['ipv6addr']
proposed_ip_addr = proposed_object['ipv6addrs'][0]['ipv6addr']
if obj_host_name == ref_host_name and current_ip_addr != proposed_ip_addr:
self.create_object(ib_obj_type, proposed_object)
def check_if_nios_next_ip_exists(self, proposed_object):
''' Check if nios_next_ip argument is passed in ipaddr while creating
host record, if yes then format proposed object ipv4addrs and pass
func:nextavailableip and ipaddr range to create hostrecord with next
available ip in one call to avoid any race condition '''
if 'ipv4addrs' in proposed_object:
if 'nios_next_ip' in proposed_object['ipv4addrs'][0]['ipv4addr']:
ip_range = self.module._check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip']
proposed_object['ipv4addrs'][0]['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
elif 'ipv4addr' in proposed_object:
if 'nios_next_ip' in proposed_object['ipv4addr']:
ip_range = self.module._check_type_dict(proposed_object['ipv4addr'])['nios_next_ip']
proposed_object['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
return proposed_object
def check_if_add_remove_ip_arg_exists(self, proposed_object):
'''
This function shall check if add/remove param is set to true and
is passed in the args, then we will update the proposed dictionary
to add/remove IP to existing host_record, if the user passes false
param with the argument nothing shall be done.
:returns: True if param is changed based on add/remove, and also the
changed proposed_object.
'''
update = False
if 'add' in proposed_object['ipv4addrs'][0]:
if proposed_object['ipv4addrs'][0]['add']:
proposed_object['ipv4addrs+'] = proposed_object['ipv4addrs']
del proposed_object['ipv4addrs']
del proposed_object['ipv4addrs+'][0]['add']
update = True
else:
del proposed_object['ipv4addrs'][0]['add']
elif 'remove' in proposed_object['ipv4addrs'][0]:
if proposed_object['ipv4addrs'][0]['remove']:
proposed_object['ipv4addrs-'] = proposed_object['ipv4addrs']
del proposed_object['ipv4addrs']
del proposed_object['ipv4addrs-'][0]['remove']
update = True
else:
del proposed_object['ipv4addrs'][0]['remove']
return update, proposed_object
def issubset(self, item, objects):
''' Checks if item is a subset of objects
:args item: the subset item to validate
:args objects: superset list of objects to validate against
:returns: True if item is a subset of one entry in objects otherwise
this method will return None
'''
for obj in objects:
if isinstance(item, dict):
if all(entry in obj.items() for entry in item.items()):
return True
else:
if item in obj:
return True
def compare_objects(self, current_object, proposed_object):
for key, proposed_item in iteritems(proposed_object):
current_item = current_object.get(key)
# if proposed has a key that current doesn't then the objects are
# not equal and False will be immediately returned
if current_item is None:
return False
elif isinstance(proposed_item, list):
for subitem in proposed_item:
if not self.issubset(subitem, current_item):
return False
elif isinstance(proposed_item, dict):
return self.compare_objects(current_item, proposed_item)
else:
if current_item != proposed_item:
return False
return True
def get_object_ref(self, module, ib_obj_type, obj_filter, ib_spec):
''' this function gets the reference object of pre-existing nios objects '''
update = False
old_name = new_name = None
if ('name' in obj_filter):
# gets and returns the current object based on name/old_name passed
try:
name_obj = self.module._check_type_dict(obj_filter['name'])
old_name = name_obj['old_name']
new_name = name_obj['new_name']
except TypeError:
name = obj_filter['name']
if old_name and new_name:
if (ib_obj_type == NIOS_HOST_RECORD):
test_obj_filter = dict([('name', old_name), ('view', obj_filter['view'])])
elif (ib_obj_type in (NIOS_AAAA_RECORD, NIOS_A_RECORD)):
test_obj_filter = obj_filter
else:
test_obj_filter = dict([('name', old_name)])
# get the object reference
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
if ib_obj:
obj_filter['name'] = new_name
else:
test_obj_filter['name'] = new_name
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
update = True
return ib_obj, update, new_name
if (ib_obj_type == NIOS_HOST_RECORD):
# to check only by name if dns bypassing is set
if not obj_filter['configure_for_dns']:
test_obj_filter = dict([('name', name)])
else:
test_obj_filter = dict([('name', name), ('view', obj_filter['view'])])
elif (ib_obj_type == NIOS_IPV4_FIXED_ADDRESS or ib_obj_type == NIOS_IPV6_FIXED_ADDRESS and 'mac' in obj_filter):
test_obj_filter = dict([['mac', obj_filter['mac']]])
elif (ib_obj_type == NIOS_A_RECORD):
# resolves issue where a_record with uppercase name was returning null and was failing
test_obj_filter = obj_filter
test_obj_filter['name'] = test_obj_filter['name'].lower()
# resolves issue where multiple a_records with same name and different IP address
try:
ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
ipaddr = ipaddr_obj['old_ipv4addr']
except TypeError:
ipaddr = obj_filter['ipv4addr']
test_obj_filter['ipv4addr'] = ipaddr
elif (ib_obj_type == NIOS_TXT_RECORD):
# resolves issue where multiple txt_records with same name and different text
test_obj_filter = obj_filter
try:
text_obj = self.module._check_type_dict(obj_filter['text'])
txt = text_obj['old_text']
except TypeError:
txt = obj_filter['text']
test_obj_filter['text'] = txt
# check if test_obj_filter is empty copy passed obj_filter
else:
test_obj_filter = obj_filter
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
elif (ib_obj_type == NIOS_A_RECORD):
# resolves issue where multiple a_records with same name and different IP address
test_obj_filter = obj_filter
try:
ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
ipaddr = ipaddr_obj['old_ipv4addr']
except TypeError:
ipaddr = obj_filter['ipv4addr']
test_obj_filter['ipv4addr'] = ipaddr
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
elif (ib_obj_type == NIOS_TXT_RECORD):
# resolves issue where multiple txt_records with same name and different text
test_obj_filter = obj_filter
try:
text_obj = self.module._check_type_dict(obj_filter['text'])
txt = text_obj['old_text']
except TypeError:
txt = obj_filter['text']
test_obj_filter['text'] = txt
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
elif (ib_obj_type == NIOS_ZONE):
# del key 'restart_if_needed' as nios_zone get_object fails with the key present
temp = ib_spec['restart_if_needed']
del ib_spec['restart_if_needed']
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
# reinstate restart_if_needed if ib_obj is none, meaning there's no existing nios_zone ref
if not ib_obj:
ib_spec['restart_if_needed'] = temp
elif (ib_obj_type == NIOS_MEMBER):
# del key 'create_token' as nios_member get_object fails with the key present
temp = ib_spec['create_token']
del ib_spec['create_token']
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
if temp:
# reinstate 'create_token' key
ib_spec['create_token'] = temp
else:
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
return ib_obj, update, new_name
def on_update(self, proposed_object, ib_spec):
''' Event called before the update is sent to the API endpoing
This method will allow the final proposed object to be changed
and/or keys filtered before it is sent to the API endpoint to
be processed.
:args proposed_object: A dict item that will be encoded and sent
the API endpoint with the updated data structure
:returns: updated object to be sent to API endpoint
'''
keys = set()
for key, value in iteritems(proposed_object):
update = ib_spec[key].get('update', True)
if not update:
keys.add(key)
return dict([(k, v) for k, v in iteritems(proposed_object) if k not in keys])
|
gpl-3.0
|
hclivess/Stallion
|
nuitka/Cryptodome/SelfTest/Util/test_Padding.py
|
3
|
5642
|
#
# SelfTest/Util/test_Padding.py: Self-test for padding functions
#
# ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
import unittest
from binascii import unhexlify as uh
from Cryptodome.Util.py3compat import *
from Cryptodome.SelfTest.st_common import list_test_cases
from Cryptodome.Util.Padding import pad, unpad
class PKCS7_Tests(unittest.TestCase):
def test1(self):
padded = pad(b(""), 4)
self.assertTrue(padded == uh(b("04040404")))
padded = pad(b(""), 4, 'pkcs7')
self.assertTrue(padded == uh(b("04040404")))
back = unpad(padded, 4)
self.assertTrue(back == b(""))
def test2(self):
padded = pad(uh(b("12345678")), 4)
self.assertTrue(padded == uh(b("1234567804040404")))
back = unpad(padded, 4)
self.assertTrue(back == uh(b("12345678")))
def test3(self):
padded = pad(uh(b("123456")), 4)
self.assertTrue(padded == uh(b("12345601")))
back = unpad(padded, 4)
self.assertTrue(back == uh(b("123456")))
def test4(self):
padded = pad(uh(b("1234567890")), 4)
self.assertTrue(padded == uh(b("1234567890030303")))
back = unpad(padded, 4)
self.assertTrue(back == uh(b("1234567890")))
def testn1(self):
self.assertRaises(ValueError, pad, uh(b("12")), 4, 'pkcs8')
def testn2(self):
self.assertRaises(ValueError, unpad, b("\0\0\0"), 4)
def testn3(self):
self.assertRaises(ValueError, unpad, b("123456\x02"), 4)
self.assertRaises(ValueError, unpad, b("123456\x00"), 4)
self.assertRaises(ValueError, unpad, b("123456\x05\x05\x05\x05\x05"), 4)
class X923_Tests(unittest.TestCase):
def test1(self):
padded = pad(b(""), 4, 'x923')
self.assertTrue(padded == uh(b("00000004")))
back = unpad(padded, 4, 'x923')
self.assertTrue(back == b(""))
def test2(self):
padded = pad(uh(b("12345678")), 4, 'x923')
self.assertTrue(padded == uh(b("1234567800000004")))
back = unpad(padded, 4, 'x923')
self.assertTrue(back == uh(b("12345678")))
def test3(self):
padded = pad(uh(b("123456")), 4, 'x923')
self.assertTrue(padded == uh(b("12345601")))
back = unpad(padded, 4, 'x923')
self.assertTrue(back == uh(b("123456")))
def test4(self):
padded = pad(uh(b("1234567890")), 4, 'x923')
self.assertTrue(padded == uh(b("1234567890000003")))
back = unpad(padded, 4, 'x923')
self.assertTrue(back == uh(b("1234567890")))
def testn1(self):
self.assertRaises(ValueError, unpad, b("123456\x02"), 4, 'x923')
self.assertRaises(ValueError, unpad, b("123456\x00"), 4, 'x923')
self.assertRaises(ValueError, unpad, b("123456\x00\x00\x00\x00\x05"), 4, 'x923')
class ISO7816_Tests(unittest.TestCase):
def test1(self):
padded = pad(b(""), 4, 'iso7816')
self.assertTrue(padded == uh(b("80000000")))
back = unpad(padded, 4, 'iso7816')
self.assertTrue(back == b(""))
def test2(self):
padded = pad(uh(b("12345678")), 4, 'iso7816')
self.assertTrue(padded == uh(b("1234567880000000")))
back = unpad(padded, 4, 'iso7816')
self.assertTrue(back == uh(b("12345678")))
def test3(self):
padded = pad(uh(b("123456")), 4, 'iso7816')
self.assertTrue(padded == uh(b("12345680")))
#import pdb; pdb.set_trace()
back = unpad(padded, 4, 'iso7816')
self.assertTrue(back == uh(b("123456")))
def test4(self):
padded = pad(uh(b("1234567890")), 4, 'iso7816')
self.assertTrue(padded == uh(b("1234567890800000")))
back = unpad(padded, 4, 'iso7816')
self.assertTrue(back == uh(b("1234567890")))
def testn1(self):
self.assertRaises(ValueError, unpad, b("123456\x81"), 4, 'iso7816')
def get_tests(config={}):
tests = []
tests += list_test_cases(PKCS7_Tests)
tests += list_test_cases(X923_Tests)
tests += list_test_cases(ISO7816_Tests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
|
gpl-3.0
|
ProfessionalIT/maxigenios-website
|
sdk/google_appengine/lib/django-1.3/django/contrib/localflavor/id/id_choices.py
|
439
|
3217
|
import warnings
from django.utils.translation import ugettext_lazy as _
# Reference: http://id.wikipedia.org/wiki/Daftar_provinsi_Indonesia
# Indonesia does not have an official Province code standard.
# I decided to use unambiguous and consistent (some are common) 3-letter codes.
warnings.warn(
'There have been recent changes to the ID localflavor. See the release notes for details',
RuntimeWarning
)
PROVINCE_CHOICES = (
('ACE', _('Aceh')),
('BLI', _('Bali')),
('BTN', _('Banten')),
('BKL', _('Bengkulu')),
('DIY', _('Yogyakarta')),
('JKT', _('Jakarta')),
('GOR', _('Gorontalo')),
('JMB', _('Jambi')),
('JBR', _('Jawa Barat')),
('JTG', _('Jawa Tengah')),
('JTM', _('Jawa Timur')),
('KBR', _('Kalimantan Barat')),
('KSL', _('Kalimantan Selatan')),
('KTG', _('Kalimantan Tengah')),
('KTM', _('Kalimantan Timur')),
('BBL', _('Kepulauan Bangka-Belitung')),
('KRI', _('Kepulauan Riau')),
('LPG', _('Lampung')),
('MLK', _('Maluku')),
('MUT', _('Maluku Utara')),
('NTB', _('Nusa Tenggara Barat')),
('NTT', _('Nusa Tenggara Timur')),
('PPA', _('Papua')),
('PPB', _('Papua Barat')),
('RIU', _('Riau')),
('SLB', _('Sulawesi Barat')),
('SLS', _('Sulawesi Selatan')),
('SLT', _('Sulawesi Tengah')),
('SLR', _('Sulawesi Tenggara')),
('SLU', _('Sulawesi Utara')),
('SMB', _('Sumatera Barat')),
('SMS', _('Sumatera Selatan')),
('SMU', _('Sumatera Utara')),
)
LICENSE_PLATE_PREFIX_CHOICES = (
('A', _('Banten')),
('AA', _('Magelang')),
('AB', _('Yogyakarta')),
('AD', _('Surakarta - Solo')),
('AE', _('Madiun')),
('AG', _('Kediri')),
('B', _('Jakarta')),
('BA', _('Sumatera Barat')),
('BB', _('Tapanuli')),
('BD', _('Bengkulu')),
('BE', _('Lampung')),
('BG', _('Sumatera Selatan')),
('BH', _('Jambi')),
('BK', _('Sumatera Utara')),
('BL', _('Nanggroe Aceh Darussalam')),
('BM', _('Riau')),
('BN', _('Kepulauan Bangka Belitung')),
('BP', _('Kepulauan Riau')),
('CC', _('Corps Consulate')),
('CD', _('Corps Diplomatic')),
('D', _('Bandung')),
('DA', _('Kalimantan Selatan')),
('DB', _('Sulawesi Utara Daratan')),
('DC', _('Sulawesi Barat')),
('DD', _('Sulawesi Selatan')),
('DE', _('Maluku')),
('DG', _('Maluku Utara')),
('DH', _('NTT - Timor')),
('DK', _('Bali')),
('DL', _('Sulawesi Utara Kepulauan')),
('DM', _('Gorontalo')),
('DN', _('Sulawesi Tengah')),
('DR', _('NTB - Lombok')),
('DS', _('Papua dan Papua Barat')),
('DT', _('Sulawesi Tenggara')),
('E', _('Cirebon')),
('EA', _('NTB - Sumbawa')),
('EB', _('NTT - Flores')),
('ED', _('NTT - Sumba')),
('F', _('Bogor')),
('G', _('Pekalongan')),
('H', _('Semarang')),
('K', _('Pati')),
('KB', _('Kalimantan Barat')),
('KH', _('Kalimantan Tengah')),
('KT', _('Kalimantan Timur')),
('L', _('Surabaya')),
('M', _('Madura')),
('N', _('Malang')),
('P', _('Jember')),
('R', _('Banyumas')),
('RI', _('Federal Government')),
('S', _('Bojonegoro')),
('T', _('Purwakarta')),
('W', _('Sidoarjo')),
('Z', _('Garut')),
)
|
mit
|
MERegistro/meregistro
|
django/contrib/gis/gdal/tests/test_envelope.py
|
68
|
3767
|
import unittest
from django.contrib.gis.gdal import Envelope, OGRException
class TestPoint(object):
def __init__(self, x, y):
self.x = x
self.y = y
class EnvelopeTest(unittest.TestCase):
def setUp(self):
self.e = Envelope(0, 0, 5, 5)
def test01_init(self):
"Testing Envelope initilization."
e1 = Envelope((0, 0, 5, 5))
e2 = Envelope(0, 0, 5, 5)
e3 = Envelope(0, '0', '5', 5) # Thanks to ww for this
e4 = Envelope(e1._envelope)
self.assertRaises(OGRException, Envelope, (5, 5, 0, 0))
self.assertRaises(OGRException, Envelope, 5, 5, 0, 0)
self.assertRaises(OGRException, Envelope, (0, 0, 5, 5, 3))
self.assertRaises(OGRException, Envelope, ())
self.assertRaises(ValueError, Envelope, 0, 'a', 5, 5)
self.assertRaises(TypeError, Envelope, u'foo')
self.assertRaises(OGRException, Envelope, (1, 1, 0, 0))
try:
Envelope(0, 0, 0, 0)
except OGRException:
self.fail("shouldn't raise an exception for min_x == max_x or min_y == max_y")
def test02_properties(self):
"Testing Envelope properties."
e = Envelope(0, 0, 2, 3)
self.assertEqual(0, e.min_x)
self.assertEqual(0, e.min_y)
self.assertEqual(2, e.max_x)
self.assertEqual(3, e.max_y)
self.assertEqual((0, 0), e.ll)
self.assertEqual((2, 3), e.ur)
self.assertEqual((0, 0, 2, 3), e.tuple)
self.assertEqual('POLYGON((0.0 0.0,0.0 3.0,2.0 3.0,2.0 0.0,0.0 0.0))', e.wkt)
self.assertEqual('(0.0, 0.0, 2.0, 3.0)', str(e))
def test03_equivalence(self):
"Testing Envelope equivalence."
e1 = Envelope(0.523, 0.217, 253.23, 523.69)
e2 = Envelope((0.523, 0.217, 253.23, 523.69))
self.assertEqual(e1, e2)
self.assertEqual((0.523, 0.217, 253.23, 523.69), e1)
def test04_expand_to_include_pt_2_params(self):
"Testing Envelope expand_to_include -- point as two parameters."
self.e.expand_to_include(2, 6)
self.assertEqual((0, 0, 5, 6), self.e)
self.e.expand_to_include(-1, -1)
self.assertEqual((-1, -1, 5, 6), self.e)
def test05_expand_to_include_pt_2_tuple(self):
"Testing Envelope expand_to_include -- point as a single 2-tuple parameter."
self.e.expand_to_include((10, 10))
self.assertEqual((0, 0, 10, 10), self.e)
self.e.expand_to_include((-10, -10))
self.assertEqual((-10, -10, 10, 10), self.e)
def test06_expand_to_include_extent_4_params(self):
"Testing Envelope expand_to_include -- extent as 4 parameters."
self.e.expand_to_include(-1, 1, 3, 7)
self.assertEqual((-1, 0, 5, 7), self.e)
def test06_expand_to_include_extent_4_tuple(self):
"Testing Envelope expand_to_include -- extent as a single 4-tuple parameter."
self.e.expand_to_include((-1, 1, 3, 7))
self.assertEqual((-1, 0, 5, 7), self.e)
def test07_expand_to_include_envelope(self):
"Testing Envelope expand_to_include with Envelope as parameter."
self.e.expand_to_include(Envelope(-1, 1, 3, 7))
self.assertEqual((-1, 0, 5, 7), self.e)
def test08_expand_to_include_point(self):
"Testing Envelope expand_to_include with Point as parameter."
self.e.expand_to_include(TestPoint(-1, 1))
self.assertEqual((-1, 0, 5, 5), self.e)
self.e.expand_to_include(TestPoint(10, 10))
self.assertEqual((-1, 0, 10, 10), self.e)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(EnvelopeTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
|
bsd-3-clause
|
Just-D/chromium-1
|
third_party/pexpect/ANSI.py
|
171
|
12646
|
"""This implements an ANSI (VT100) terminal emulator as a subclass of screen.
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
# references:
# http://en.wikipedia.org/wiki/ANSI_escape_code
# http://www.retards.org/terminals/vt102.html
# http://vt100.net/docs/vt102-ug/contents.html
# http://vt100.net/docs/vt220-rm/
# http://www.termsys.demon.co.uk/vtansi.htm
import screen
import FSM
import copy
import string
#
# The 'Do.*' functions are helper functions for the ANSI class.
#
def DoEmit (fsm):
screen = fsm.memory[0]
screen.write_ch(fsm.input_symbol)
def DoStartNumber (fsm):
fsm.memory.append (fsm.input_symbol)
def DoBuildNumber (fsm):
ns = fsm.memory.pop()
ns = ns + fsm.input_symbol
fsm.memory.append (ns)
def DoBackOne (fsm):
screen = fsm.memory[0]
screen.cursor_back ()
def DoBack (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_back (count)
def DoDownOne (fsm):
screen = fsm.memory[0]
screen.cursor_down ()
def DoDown (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_down (count)
def DoForwardOne (fsm):
screen = fsm.memory[0]
screen.cursor_forward ()
def DoForward (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_forward (count)
def DoUpReverse (fsm):
screen = fsm.memory[0]
screen.cursor_up_reverse()
def DoUpOne (fsm):
screen = fsm.memory[0]
screen.cursor_up ()
def DoUp (fsm):
count = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_up (count)
def DoHome (fsm):
c = int(fsm.memory.pop())
r = int(fsm.memory.pop())
screen = fsm.memory[0]
screen.cursor_home (r,c)
def DoHomeOrigin (fsm):
c = 1
r = 1
screen = fsm.memory[0]
screen.cursor_home (r,c)
def DoEraseDown (fsm):
screen = fsm.memory[0]
screen.erase_down()
def DoErase (fsm):
arg = int(fsm.memory.pop())
screen = fsm.memory[0]
if arg == 0:
screen.erase_down()
elif arg == 1:
screen.erase_up()
elif arg == 2:
screen.erase_screen()
def DoEraseEndOfLine (fsm):
screen = fsm.memory[0]
screen.erase_end_of_line()
def DoEraseLine (fsm):
arg = int(fsm.memory.pop())
screen = fsm.memory[0]
if arg == 0:
screen.erase_end_of_line()
elif arg == 1:
screen.erase_start_of_line()
elif arg == 2:
screen.erase_line()
def DoEnableScroll (fsm):
screen = fsm.memory[0]
screen.scroll_screen()
def DoCursorSave (fsm):
screen = fsm.memory[0]
screen.cursor_save_attrs()
def DoCursorRestore (fsm):
screen = fsm.memory[0]
screen.cursor_restore_attrs()
def DoScrollRegion (fsm):
screen = fsm.memory[0]
r2 = int(fsm.memory.pop())
r1 = int(fsm.memory.pop())
screen.scroll_screen_rows (r1,r2)
def DoMode (fsm):
screen = fsm.memory[0]
mode = fsm.memory.pop() # Should be 4
# screen.setReplaceMode ()
def DoLog (fsm):
screen = fsm.memory[0]
fsm.memory = [screen]
fout = open ('log', 'a')
fout.write (fsm.input_symbol + ',' + fsm.current_state + '\n')
fout.close()
class term (screen.screen):
"""This class is an abstract, generic terminal.
This does nothing. This is a placeholder that
provides a common base class for other terminals
such as an ANSI terminal. """
def __init__ (self, r=24, c=80):
screen.screen.__init__(self, r,c)
class ANSI (term):
"""This class implements an ANSI (VT100) terminal.
It is a stream filter that recognizes ANSI terminal
escape sequences and maintains the state of a screen object. """
def __init__ (self, r=24,c=80):
term.__init__(self,r,c)
#self.screen = screen (24,80)
self.state = FSM.FSM ('INIT',[self])
self.state.set_default_transition (DoLog, 'INIT')
self.state.add_transition_any ('INIT', DoEmit, 'INIT')
self.state.add_transition ('\x1b', 'INIT', None, 'ESC')
self.state.add_transition_any ('ESC', DoLog, 'INIT')
self.state.add_transition ('(', 'ESC', None, 'G0SCS')
self.state.add_transition (')', 'ESC', None, 'G1SCS')
self.state.add_transition_list ('AB012', 'G0SCS', None, 'INIT')
self.state.add_transition_list ('AB012', 'G1SCS', None, 'INIT')
self.state.add_transition ('7', 'ESC', DoCursorSave, 'INIT')
self.state.add_transition ('8', 'ESC', DoCursorRestore, 'INIT')
self.state.add_transition ('M', 'ESC', DoUpReverse, 'INIT')
self.state.add_transition ('>', 'ESC', DoUpReverse, 'INIT')
self.state.add_transition ('<', 'ESC', DoUpReverse, 'INIT')
self.state.add_transition ('=', 'ESC', None, 'INIT') # Selects application keypad.
self.state.add_transition ('#', 'ESC', None, 'GRAPHICS_POUND')
self.state.add_transition_any ('GRAPHICS_POUND', None, 'INIT')
self.state.add_transition ('[', 'ESC', None, 'ELB')
# ELB means Escape Left Bracket. That is ^[[
self.state.add_transition ('H', 'ELB', DoHomeOrigin, 'INIT')
self.state.add_transition ('D', 'ELB', DoBackOne, 'INIT')
self.state.add_transition ('B', 'ELB', DoDownOne, 'INIT')
self.state.add_transition ('C', 'ELB', DoForwardOne, 'INIT')
self.state.add_transition ('A', 'ELB', DoUpOne, 'INIT')
self.state.add_transition ('J', 'ELB', DoEraseDown, 'INIT')
self.state.add_transition ('K', 'ELB', DoEraseEndOfLine, 'INIT')
self.state.add_transition ('r', 'ELB', DoEnableScroll, 'INIT')
self.state.add_transition ('m', 'ELB', None, 'INIT')
self.state.add_transition ('?', 'ELB', None, 'MODECRAP')
self.state.add_transition_list (string.digits, 'ELB', DoStartNumber, 'NUMBER_1')
self.state.add_transition_list (string.digits, 'NUMBER_1', DoBuildNumber, 'NUMBER_1')
self.state.add_transition ('D', 'NUMBER_1', DoBack, 'INIT')
self.state.add_transition ('B', 'NUMBER_1', DoDown, 'INIT')
self.state.add_transition ('C', 'NUMBER_1', DoForward, 'INIT')
self.state.add_transition ('A', 'NUMBER_1', DoUp, 'INIT')
self.state.add_transition ('J', 'NUMBER_1', DoErase, 'INIT')
self.state.add_transition ('K', 'NUMBER_1', DoEraseLine, 'INIT')
self.state.add_transition ('l', 'NUMBER_1', DoMode, 'INIT')
### It gets worse... the 'm' code can have infinite number of
### number;number;number before it. I've never seen more than two,
### but the specs say it's allowed. crap!
self.state.add_transition ('m', 'NUMBER_1', None, 'INIT')
### LED control. Same implementation problem as 'm' code.
self.state.add_transition ('q', 'NUMBER_1', None, 'INIT')
# \E[?47h switch to alternate screen
# \E[?47l restores to normal screen from alternate screen.
self.state.add_transition_list (string.digits, 'MODECRAP', DoStartNumber, 'MODECRAP_NUM')
self.state.add_transition_list (string.digits, 'MODECRAP_NUM', DoBuildNumber, 'MODECRAP_NUM')
self.state.add_transition ('l', 'MODECRAP_NUM', None, 'INIT')
self.state.add_transition ('h', 'MODECRAP_NUM', None, 'INIT')
#RM Reset Mode Esc [ Ps l none
self.state.add_transition (';', 'NUMBER_1', None, 'SEMICOLON')
self.state.add_transition_any ('SEMICOLON', DoLog, 'INIT')
self.state.add_transition_list (string.digits, 'SEMICOLON', DoStartNumber, 'NUMBER_2')
self.state.add_transition_list (string.digits, 'NUMBER_2', DoBuildNumber, 'NUMBER_2')
self.state.add_transition_any ('NUMBER_2', DoLog, 'INIT')
self.state.add_transition ('H', 'NUMBER_2', DoHome, 'INIT')
self.state.add_transition ('f', 'NUMBER_2', DoHome, 'INIT')
self.state.add_transition ('r', 'NUMBER_2', DoScrollRegion, 'INIT')
### It gets worse... the 'm' code can have infinite number of
### number;number;number before it. I've never seen more than two,
### but the specs say it's allowed. crap!
self.state.add_transition ('m', 'NUMBER_2', None, 'INIT')
### LED control. Same problem as 'm' code.
self.state.add_transition ('q', 'NUMBER_2', None, 'INIT')
self.state.add_transition (';', 'NUMBER_2', None, 'SEMICOLON_X')
# Create a state for 'q' and 'm' which allows an infinite number of ignored numbers
self.state.add_transition_any ('SEMICOLON_X', DoLog, 'INIT')
self.state.add_transition_list (string.digits, 'SEMICOLON_X', None, 'NUMBER_X')
self.state.add_transition_any ('NUMBER_X', DoLog, 'INIT')
self.state.add_transition ('m', 'NUMBER_X', None, 'INIT')
self.state.add_transition ('q', 'NUMBER_X', None, 'INIT')
self.state.add_transition (';', 'NUMBER_2', None, 'SEMICOLON_X')
def process (self, c):
self.state.process(c)
def process_list (self, l):
self.write(l)
def write (self, s):
for c in s:
self.process(c)
def flush (self):
pass
def write_ch (self, ch):
"""This puts a character at the current cursor position. The cursor
position is moved forward with wrap-around, but no scrolling is done if
the cursor hits the lower-right corner of the screen. """
#\r and \n both produce a call to cr() and lf(), respectively.
ch = ch[0]
if ch == '\r':
self.cr()
return
if ch == '\n':
self.crlf()
return
if ch == chr(screen.BS):
self.cursor_back()
return
if ch not in string.printable:
fout = open ('log', 'a')
fout.write ('Nonprint: ' + str(ord(ch)) + '\n')
fout.close()
return
self.put_abs(self.cur_r, self.cur_c, ch)
old_r = self.cur_r
old_c = self.cur_c
self.cursor_forward()
if old_c == self.cur_c:
self.cursor_down()
if old_r != self.cur_r:
self.cursor_home (self.cur_r, 1)
else:
self.scroll_up ()
self.cursor_home (self.cur_r, 1)
self.erase_line()
# def test (self):
#
# import sys
# write_text = 'I\'ve got a ferret sticking up my nose.\n' + \
# '(He\'s got a ferret sticking up his nose.)\n' + \
# 'How it got there I can\'t tell\n' + \
# 'But now it\'s there it hurts like hell\n' + \
# 'And what is more it radically affects my sense of smell.\n' + \
# '(His sense of smell.)\n' + \
# 'I can see a bare-bottomed mandril.\n' + \
# '(Slyly eyeing his other nostril.)\n' + \
# 'If it jumps inside there too I really don\'t know what to do\n' + \
# 'I\'ll be the proud posessor of a kind of nasal zoo.\n' + \
# '(A nasal zoo.)\n' + \
# 'I\'ve got a ferret sticking up my nose.\n' + \
# '(And what is worst of all it constantly explodes.)\n' + \
# '"Ferrets don\'t explode," you say\n' + \
# 'But it happened nine times yesterday\n' + \
# 'And I should know for each time I was standing in the way.\n' + \
# 'I\'ve got a ferret sticking up my nose.\n' + \
# '(He\'s got a ferret sticking up his nose.)\n' + \
# 'How it got there I can\'t tell\n' + \
# 'But now it\'s there it hurts like hell\n' + \
# 'And what is more it radically affects my sense of smell.\n' + \
# '(His sense of smell.)'
# self.fill('.')
# self.cursor_home()
# for c in write_text:
# self.write_ch (c)
# print str(self)
#
#if __name__ == '__main__':
# t = ANSI(6,65)
# t.test()
|
bsd-3-clause
|
geertj/gruvi
|
tests/test_transports.py
|
2
|
7476
|
#
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2017 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import socket
import unittest
import pyuv
from support import UnitTest
from gruvi.transports import TransportError, Transport, DatagramTransport
class ProtocolLogger(object):
"""Utility protocol class that implements both the stream and datagram
protocols and that logs all callbacks."""
def __init__(self):
self.events = []
self.transport = None
def get_events(self, typ):
return [ev for ev in self.events if ev[0] == typ]
def connection_made(self, transport):
self.events.append(('connection_made', transport))
self.transport = transport
def data_received(self, data):
# collapse data_received for easier verification
if self.events[-1][0] == 'data_received':
self.events[-1] = ('data_received', self.events[-1][1] + data)
else:
self.events.append(('data_received', data))
def eof_received(self):
self.events.append(('eof_received',))
def connection_lost(self, exc=None):
self.events.append(('connection_lost', exc))
def datagram_received(self, data, addr):
self.events.append(('datagram_received', data, addr))
def error_received(self, exc):
self.events.append(('error_received', exc))
def resume_writing(self):
pass
def pause_writing(self):
pass
class EchoServer(ProtocolLogger):
def data_received(self, data):
super(EchoServer, self).data_received(data)
self.transport.write(data)
class EventLoopTest(UnitTest):
def setUp(self):
super(EventLoopTest, self).setUp()
self.loop = pyuv.Loop()
self.errors = []
def catch_errors(self, callback):
# Run *callback*. If it raises an exception, store it and stop the
# loop.
def run_callback(*args):
try:
callback(*args)
except Exception as e:
self.errors.append(e)
self.loop.stop()
return run_callback
def run_loop(self, timeout):
# Run the loop for at most *timeout* seconds. Re-raise any exception
# that was catch by a "catch_errors" callback.
timer = pyuv.Timer(self.loop)
def stop_loop(handle):
self.loop.stop()
timer.start(stop_loop, timeout, 0)
self.loop.run()
if self.errors:
raise self.errors[0]
class TransportTest(object):
def create_handle(self):
raise NotImplementedError
def bind_handle(self, handle):
raise NotImplementedError
def create_transport(self, handle, protocol, server_side):
transport = Transport(handle)
transport.start(protocol)
return transport
def test_echo(self):
# Test a simple echo server. The client writes some data end then
# writes an EOF (if the transport supports writing EOF). The server
# echos and upon receipt of EOF will close the connection.
@self.catch_errors
def echo_server(handle, error):
if error:
raise TransportError.from_errno(error)
client = self.create_handle()
handle.accept(client)
protocols[0] = EchoServer()
transports[0] = self.create_transport(client, protocols[0], True)
@self.catch_errors
def echo_client(handle, error):
if error:
raise TransportError.from_errno(error)
protocols[1] = ProtocolLogger()
trans = transports[1] = self.create_transport(handle, protocols[1], False)
trans.write(b'foo\n')
trans.write(b'bar\n')
trans.writelines([b'qux', b'quux'])
if trans.can_write_eof():
trans.write_eof()
transports = [None, None]
protocols = [None, None]
server = self.create_handle()
addr = self.bind_handle(server)
server.listen(echo_server)
client = self.create_handle()
client.connect(addr, echo_client)
self.run_loop(0.1)
strans, ctrans = transports
sproto, cproto = protocols
self.assertIsInstance(strans, Transport)
self.assertIsInstance(ctrans, Transport)
self.assertIsInstance(sproto, EchoServer)
self.assertIsInstance(cproto, ProtocolLogger)
ctrans.close()
self.run_loop(0.1)
sevents = sproto.events
self.assertIn(len(sevents), (3, 4))
self.assertEqual(sevents[0], ('connection_made', strans))
self.assertEqual(sevents[1], ('data_received', b'foo\nbar\nquxquux'))
if strans.can_write_eof():
self.assertEqual(sevents[2], ('eof_received',))
self.assertEqual(sevents[-1][0], 'connection_lost')
cevents = cproto.events
self.assertIn(len(cevents), (3, 4))
self.assertEqual(cevents[0], ('connection_made', ctrans))
self.assertEqual(cevents[1], ('data_received', b'foo\nbar\nquxquux'))
if ctrans.can_write_eof():
self.assertEqual(cevents[2], ('eof_received',))
self.assertEqual(cevents[-1][0], 'connection_lost')
class TestTcpTransport(TransportTest, EventLoopTest):
def create_handle(self):
return pyuv.TCP(self.loop)
def bind_handle(self, handle):
host = socket.gethostbyname('localhost')
handle.bind((host, 0))
return handle.getsockname()
class TestPipeTransport(TransportTest, EventLoopTest):
def create_handle(self):
return pyuv.Pipe(self.loop)
def bind_handle(self, handle):
addr = self.pipename('test-pipe')
handle.bind(addr)
return addr
class TestUdpTransport(EventLoopTest):
def create_handle(self):
return pyuv.UDP(self.loop)
def bind_handle(self, handle):
host = socket.gethostbyname('localhost')
handle.bind((host, 0))
return handle.getsockname()
def create_transport(self, handle, protocol):
transport = DatagramTransport(handle)
transport.start(protocol)
return transport
def test_echo(self):
server = self.create_handle()
saddr = self.bind_handle(server)
sproto = ProtocolLogger()
strans = self.create_transport(server, sproto)
client = self.create_handle()
caddr = self.bind_handle(client)
cproto = ProtocolLogger()
ctrans = self.create_transport(client, cproto)
# Try 5 times (since UDP is lossy)
for i in range(5):
ctrans.sendto(b'foo', saddr)
for i in range(5):
strans.sendto(b'bar', caddr)
self.run_loop(0.1)
sevents = sproto.get_events('datagram_received')
self.assertGreater(len(sevents), 0)
for event in sevents:
self.assertEqual(event[1], b'foo')
self.assertEqual(event[2], caddr)
cevents = cproto.get_events('datagram_received')
self.assertGreater(len(cevents), 0)
for event in cevents:
self.assertEqual(event[1], b'bar')
self.assertEqual(event[2], saddr)
if __name__ == '__main__':
unittest.main()
|
mit
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.6.0/Lib/idlelib/history.py
|
13
|
4043
|
"Implement Idle Shell history mechanism with History class"
from idlelib.config import idleConf
class History:
''' Implement Idle Shell history mechanism.
store - Store source statement (called from pyshell.resetoutput).
fetch - Fetch stored statement matching prefix already entered.
history_next - Bound to <<history-next>> event (default Alt-N).
history_prev - Bound to <<history-prev>> event (default Alt-P).
'''
def __init__(self, text):
'''Initialize data attributes and bind event methods.
.text - Idle wrapper of tk Text widget, with .bell().
.history - source statements, possibly with multiple lines.
.prefix - source already entered at prompt; filters history list.
.pointer - index into history.
.cyclic - wrap around history list (or not).
'''
self.text = text
self.history = []
self.prefix = None
self.pointer = None
self.cyclic = idleConf.GetOption("main", "History", "cyclic", 1, "bool")
text.bind("<<history-previous>>", self.history_prev)
text.bind("<<history-next>>", self.history_next)
def history_next(self, event):
"Fetch later statement; start with ealiest if cyclic."
self.fetch(reverse=False)
return "break"
def history_prev(self, event):
"Fetch earlier statement; start with most recent."
self.fetch(reverse=True)
return "break"
def fetch(self, reverse):
'''Fetch statememt and replace current line in text widget.
Set prefix and pointer as needed for successive fetches.
Reset them to None, None when returning to the start line.
Sound bell when return to start line or cannot leave a line
because cyclic is False.
'''
nhist = len(self.history)
pointer = self.pointer
prefix = self.prefix
if pointer is not None and prefix is not None:
if self.text.compare("insert", "!=", "end-1c") or \
self.text.get("iomark", "end-1c") != self.history[pointer]:
pointer = prefix = None
self.text.mark_set("insert", "end-1c") # != after cursor move
if pointer is None or prefix is None:
prefix = self.text.get("iomark", "end-1c")
if reverse:
pointer = nhist # will be decremented
else:
if self.cyclic:
pointer = -1 # will be incremented
else: # abort history_next
self.text.bell()
return
nprefix = len(prefix)
while 1:
pointer += -1 if reverse else 1
if pointer < 0 or pointer >= nhist:
self.text.bell()
if not self.cyclic and pointer < 0: # abort history_prev
return
else:
if self.text.get("iomark", "end-1c") != prefix:
self.text.delete("iomark", "end-1c")
self.text.insert("iomark", prefix)
pointer = prefix = None
break
item = self.history[pointer]
if item[:nprefix] == prefix and len(item) > nprefix:
self.text.delete("iomark", "end-1c")
self.text.insert("iomark", item)
break
self.text.see("insert")
self.text.tag_remove("sel", "1.0", "end")
self.pointer = pointer
self.prefix = prefix
def store(self, source):
"Store Shell input statement into history list."
source = source.strip()
if len(source) > 2:
# avoid duplicates
try:
self.history.remove(source)
except ValueError:
pass
self.history.append(source)
self.pointer = None
self.prefix = None
if __name__ == "__main__":
from unittest import main
main('idlelib.idle_test.test_history', verbosity=2, exit=False)
|
mit
|
rrohan/scikit-learn
|
sklearn/ensemble/voting_classifier.py
|
178
|
8006
|
"""
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <se.raschka@gmail.com>,
# Gilles Louppe <g.louppe@gmail.com>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
|
bsd-3-clause
|
LinuxChristian/home-assistant
|
homeassistant/components/binary_sensor/wink.py
|
4
|
5888
|
"""
Support for Wink binary sensors.
For more details about this platform, please refer to the documentation at
at https://home-assistant.io/components/binary_sensor.wink/
"""
import asyncio
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.wink import WinkDevice, DOMAIN
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['wink']
# These are the available sensors mapped to binary_sensor class
SENSOR_TYPES = {
'opened': 'opening',
'brightness': 'light',
'vibration': 'vibration',
'loudness': 'sound',
'noise': 'sound',
'capturing_audio': 'sound',
'liquid_detected': 'moisture',
'motion': 'motion',
'presence': 'occupancy',
'co_detected': 'gas',
'smoke_detected': 'smoke',
'capturing_video': None
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Wink binary sensor platform."""
import pywink
for sensor in pywink.get_sensors():
_id = sensor.object_id() + sensor.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
if sensor.capability() in SENSOR_TYPES:
add_devices([WinkBinarySensorDevice(sensor, hass)])
for key in pywink.get_keys():
_id = key.object_id() + key.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkBinarySensorDevice(key, hass)])
for sensor in pywink.get_smoke_and_co_detectors():
_id = sensor.object_id() + sensor.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkSmokeDetector(sensor, hass)])
for hub in pywink.get_hubs():
_id = hub.object_id() + hub.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkHub(hub, hass)])
for remote in pywink.get_remotes():
_id = remote.object_id() + remote.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkRemote(remote, hass)])
for button in pywink.get_buttons():
_id = button.object_id() + button.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkButton(button, hass)])
for gang in pywink.get_gangs():
_id = gang.object_id() + gang.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkGang(gang, hass)])
for door_bell_sensor in pywink.get_door_bells():
_id = door_bell_sensor.object_id() + door_bell_sensor.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
add_devices([WinkBinarySensorDevice(door_bell_sensor, hass)])
for camera_sensor in pywink.get_cameras():
_id = camera_sensor.object_id() + camera_sensor.name()
if _id not in hass.data[DOMAIN]['unique_ids']:
try:
if camera_sensor.capability() in SENSOR_TYPES:
add_devices([WinkBinarySensorDevice(camera_sensor, hass)])
except AttributeError:
_LOGGER.info("Device isn't a sensor, skipping")
class WinkBinarySensorDevice(WinkDevice, BinarySensorDevice, Entity):
"""Representation of a Wink binary sensor."""
def __init__(self, wink, hass):
"""Initialize the Wink binary sensor."""
super().__init__(wink, hass)
if hasattr(self.wink, 'unit'):
self._unit_of_measurement = self.wink.unit()
else:
self._unit_of_measurement = None
if hasattr(self.wink, 'capability'):
self.capability = self.wink.capability()
else:
self.capability = None
@asyncio.coroutine
def async_added_to_hass(self):
"""Callback when entity is added to hass."""
self.hass.data[DOMAIN]['entities']['binary_sensor'].append(self)
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self.wink.state()
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return SENSOR_TYPES.get(self.capability)
class WinkSmokeDetector(WinkBinarySensorDevice):
"""Representation of a Wink Smoke detector."""
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
'test_activated': self.wink.test_activated()
}
class WinkHub(WinkBinarySensorDevice):
"""Representation of a Wink Hub."""
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
'update needed': self.wink.update_needed(),
'firmware version': self.wink.firmware_version()
}
class WinkRemote(WinkBinarySensorDevice):
"""Representation of a Wink Lutron Connected bulb remote."""
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
'button_on_pressed': self.wink.button_on_pressed(),
'button_off_pressed': self.wink.button_off_pressed(),
'button_up_pressed': self.wink.button_up_pressed(),
'button_down_pressed': self.wink.button_down_pressed()
}
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return None
class WinkButton(WinkBinarySensorDevice):
"""Representation of a Wink Relay button."""
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
'pressed': self.wink.pressed(),
'long_pressed': self.wink.long_pressed()
}
class WinkGang(WinkBinarySensorDevice):
"""Representation of a Wink Relay gang."""
@property
def is_on(self):
"""Return true if the gang is connected."""
return self.wink.state()
|
apache-2.0
|
imageworks/OpenShadingLanguage
|
src/build-scripts/docdeep.py
|
2
|
16511
|
#!/usr/bin/env python
from __future__ import print_function, absolute_import
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
# ----------------------------------------------------------------------
# To generate docdeep for docdeep, run this:
# python3 docdeep.py -d docdeep docdeep.py > docdeep.md.html
# ----------------------------------------------------------------------
### <doc docdeep>
###
### **docdeep**
###
### Introduction
### ============
###
### `docdeep` is a utility that extracts documentation from source code and
### turns them into beautiful Markdeep documents.
###
### [Markdeep](https://casual-effects.com/markdeep) is a package by Morgan
### McGuire, an extension of Markdown with a whole bunch of nice features
### that make it extra nice for code, math, and diagrams. Read the Markdeep
### web page and look at the examples for details.
###
### `docdeep` is a little like `Doxygen`. A poor person's Doxygen. A very
### poor person. But without Doxygen's awkward syntax -- you just write
### the comments in Markdeep. And the output is very aesthetically pleasing.
### But it doesn't have any bells and whistles, like cross-referencing.
###
###
###
### Markup controls
### ===============
###
### ## Documentation lines.
###
### A line whose first three non-whitespace characters are either `///` or
### `###` is called a *doc-line*. Any other line is a *non-doc-line*.
###
### The `///` or `###` is the *doc-symbol*. Generally speaking, the `///`
### doc-symbol is used when doc-marking C or C++, and `###` when doc-marking
### Python, shell scripts, or other programs in languages where `#` is the
### comment character. For simplicity, in the rest of this document, we will
### always use `///` in our examples.
###
### ## Doc regions
###
### Doc-lines by themeslves don't do much, unless they are within a
### *doc-region*. The beginning of a doc-region is denoted by a doc-line
### whose first characters after the doc-symbol is
###
### <code>
### /// <doc region-name>
### </code>
###
### A doc region is ended one of three ways:
###
### 1. `</doc>` ends the active region.
### 2. `<doc newname>` setting a new region name.
### 3. The end of the source file.
###
### When there is an active named region, any other doc lines will be
### appended (after stripping off the doc symbol itself) to the current
### doc region text.
###
### You can have multiple regions with the same name, in entirely separate
### parts of your source code. They will just be concatenated.
###
### Only one main doc region will be output by the `docdeep` program,
### specified with the `-d` command line argument. Any other doc-regions
### will not be included in the documention output of that run.
###
### However, one doc-region may *include* the text of another doc-region
### as follows:
###
### <code>
### /// <inc region-name>
### </code>
###
### ## Doc continuations and code regions
###
### If three dots follow the `doc` directive, like this:
###
### <code>
### /// <doc... region-name>
### </code>
###
### This causes *all* lines (until the end of the doc region) to be
### interpreted as Markdeep documentation, even it if doesn't start with
### the doc-symbol.
###
### Furthermore, denoting any region thusly:
### <code>
### /// <code>
### ...
### /// </code>
### </code>
###
### will designate the contents not only as a doc continuation, but also
### to be formatted as source code (mono space, syntax highlighted).
###
### ## API Explanations
###
### There is a special syntax for a common case: call-by-call explanations
### of API methods and their explanations. Of course, this may constitute
### the bulk of your auto-generated documentation. The two cases we are
### concerned about is *pre-code comments* and *post-code comments*.
###
### For pre-code comments, the contiguous comment region associated with
### a declaration immediately precedes the declaration. Such a comment
### set has its first line start with `///>`. For post-code comments,
### the documentaiton comments follows the code declaration, and this is
### designated by having its first line start with `///<`. I like to
### just remember that the `<` or `>` points in the direction of the code
### declaration that the comment applies to.
###
### This is perhaps best explained by example:
###
### <code>
### ///> Do the foo operation. Everybody knows what this is. You can write
### /// any comment you want, with full markdeep formatting! It applies
### /// to the declaration that will immediately follow this comment.
### float foo (float x, float y);
###
### void bar (int n);
### ///< The bar procedure. Note that with post-comments, you explain the
### /// function or method *after* the declaration itself.
### </code>
###
### This will generate the following output:
###
### <code>
### float foo (float x, float y);
### </code>
### Do the foo operation. Everybody knows what this is. You can write
### any comment you want, with full markdeep formatting! It applies
### to the declaration that will immediately follow this comment.
###
### <code>
### void bar (int n);
### </code>
### The bar procedure. Note that with post-comments, you explain the
### function or method *after* the declaration itself.
###
### -----------------------
###
### It is important to keep in mind that **this API explanation is extremely
### stupid**. The comment documentation is just a series of doc-lines with
### no break between them, whose *first* line starts with `///>' or `///>`
### (for pre-code and post-code docs, respectively). And the code that it
### documents is just the immediately preceding or following set of non-doc
### lines that contain *no* whitespace-only lines. There is no syntax
### parsing going on here -- it just takes those non-blank, non-doc lines,
### strips off any semicolons and any characters following the semicolon
### from each line in the block, and deletes anything in the whole block
### at or after the first opening brace (`{`) found in the block. Strangely
### enough, this is almost exactly what I want to document.
###
###
### Command line arguments
### ======================
###
### Run from the command line as follows:
###
### `$ python docdeep.py -d` *region* `-s` *style.css* `input1.h input2.cpp [...] > output.md.html`
###
### Arguments:
###
### `-d` *region*
### : Specifies the name of the doc-region to generate output for. This is
### required.
### `-s` *stylesheet*
### : This optional argument will specify which CSS style sheet to include a
### reference to.
###
### Any number of filenames may be specified. They will be processed in the
### order that they appear on the command line.
###
import sys
import re
import argparse
# pattern for beginning of special comment
docline_pattern = re.compile ('(^[ \t]*)((///)|(###))[<>]?([ \t]|$)(.*)')
doc_begin_pattern = re.compile ('(^[ \t]*)((///)|(###))[ ]*<doc(\.\.\.)?[ ]*(\w*)[ ]*>')
docendpattern = re.compile ('(^[ \t]*)((///)|(###))[ ]*</doc>')
api_precomment_pattern = re.compile ('(^[ \t]*)((///)|(###))>( )+( )?(.*)')
api_postcomment_pattern = re.compile ('(^[ \t]*)((///)|(###))<( )+( )?(.*)')
inc_pattern = re.compile ('(^[ \t]*)<inc[ ]+([^>]+)>')
code_pattern = re.compile ('(^[ \t]*)((///)|(###))[ ]*<code>')
codeend_pattern = re.compile ('(^[ \t]*)((///)|(###))[ ]*</code>')
trimbrace_pattern = re.compile ("([^{]*)")
trimsemi_pattern = re.compile ("([^;]*)")
blankline_pattern = re.compile ("^[ \t]*$")
region_name = '_'
alldocs = { '_' : '' }
DEBUG = False
# Utility: blob is a string (possibly containing many "lines" separated by
# '\n'). For each line of text in the blob, shave off the first `indent`
# characters. Then reassemble and return.
def shave_each_line (blob, indent) :
r = ''
lines = blob.split ('\n')
for line in lines :
r += line[indent:] + '\n'
# strip trailing newline unless the original blob had a trailing newline
if len(r) and r[-1] == '\n' and blob[-1] != '\n':
r = r[:-1]
return r
# Enumerated type for the state machine.
class LineType :
BLANK = 0
DOC = 2
NONDOC = 3
# Append this doc blob to the current region, and clear the blob
def flush_doc_blob (doc_blob) :
global alldocs, region_name
if len(doc_blob) :
alldocs[region_name] += doc_blob + '\n'
doc_blob = ''
def read_input (filename, file) :
global region_name, alldocs
doc_cont_mode = False
post_api_mode = False
pre_api_mode = False
indent = 0 # Amount of indentation we saw on the liast doc line
lines = file.read().splitlines()
code_blob = '' # Running set of contiguous non-doc, non-blank lines
doc_blob = '' # Running set of contiguous doc lines
# state machine
line_type = LineType.BLANK
last_line_type = LineType.BLANK
for line in lines :
# Figure out what type of line we're on, and remember what type
# of line we saw last.
last_line_type = line_type
if blankline_pattern.match(line) :
line_type = LineType.BLANK
elif docline_pattern.match(line) :
line_type = LineType.DOC
else :
line_type = LineType.NONDOC
# print ('<!-- LINE----- ', line_type, line, ' -->\n')
# Not a doc line, but we're in "continuation mode": append
# (unindented) to the doc blob.
if line_type != LineType.DOC and doc_cont_mode :
doc_blob += line[indent:] + '\n'
if DEBUG :
alldocs[region_name] += '<!-- CONT ' + line[:35] + ' -->\n'
continue
# Blank line, not in continuation mode
if line_type == LineType.BLANK :
if DEBUG and last_line_type != LineType.BLANK :
alldocs[region_name] += '<!-- BLANK -->\n'
# FIXME: does this end a post-declaration API doc?
if pre_api_mode : # this blank ends a pre-api comment
continue # ...keep reading
# If this line ends a post-api comment, output the code blob
if post_api_mode :
if DEBUG :
alldocs[region_name] += ' <!-- blank ended post-api -->\n'
# Trim everything past the first ; or { from the code blob
alllines = code_blob.split('\n')
# code_blob = ''
new_code_blob = ''
for oneline in alllines :
m = trimsemi_pattern.search (oneline)
oneline = m.group(1)
new_code_blob += oneline + '\n'
new_code_blob = new_code_blob.rstrip(' \n\r')
m = trimbrace_pattern.match(new_code_blob)
alldocs[region_name] += ('~~~C\n' +
shave_each_line(m.group(1),indent) +
'\n~~~\n')
post_api_mode = False
# If this line ends a doc blob, output it
if len(doc_blob) :
alldocs[region_name] += doc_blob + '\n'
doc_blob = ''
# Blank lines clear the code blob
code_blob = ''
continue
# Non-blank, non-doc line: append to code blob and move on.
if line_type == LineType.NONDOC :
if DEBUG :
alldocs[region_name] += '<!-- NONDOC ' + line[:35] + ' -->\n'
code_blob += line + '\n'
continue
# Remaining cases are all doc lines!
# Any doc line resets the pre-doc-symbol indentation level
m = docline_pattern.match(line)
indent = len(m.group(1))
# Handle <doc> and <doc...> : start of new doc section
m = doc_begin_pattern.match(line)
if m :
flush_doc_blob (doc_blob)
doc_blob = ''
# If it led with <doc...> it also starts continuation mode
doc_cont_mode = (m.group(5) == '...')
# The <doc> directive gave the region name.
r = m.group(6)
r.strip()
if r == '' :
r = '_'
if DEBUG :
alldocs[region_name] += '<!-- DOCBEGIN ' + r + ' -->\n'
region_name = r
if not (region_name in alldocs) :
alldocs[region_name] = ''
continue
#
# Handle </doc>
m = docendpattern.match(line)
if m :
if DEBUG :
alldocs[region_name] += '<!-- DOCEND ' + region_name + '-->\n'
flush_doc_blob (doc_blob)
doc_blob = ''
doc_cont_mode = False
region_name = '_'
continue
#
# Handle start of post-declaration API comment
m = api_postcomment_pattern.match(line)
if m :
if DEBUG :
alldocs[region_name] += '<!-- start post-decl api ' + line[:35] + '-->\n'
flush_doc_blob (doc_blob)
doc_blob = ''
# Deduce indentation level
indent = len(m.group(1))
post_api_mode = True
remainder = m.group(7)
doc_blob += remainder + '\n'
continue
#
# Handle start of pre-declaration API comment
m = api_precomment_pattern.match(line)
if m :
flush_doc_blob (doc_blob)
doc_blob = ''
indent = len(m.group(1))
post_api_mode = True
remainder = m.group(7)
doc_blob += remainder + '\n'
continue
#
# Handle code section <code> ... </code>
m = code_pattern.match(line)
if m :
flush_doc_blob (doc_blob)
doc_blob = ''
if DEBUG :
doc_blob += '<!-- start code in '+ region_name+ '-->\n'
indent = len(m.group(1))
doc_blob += '<script type="preformatted">\n~~~C\n'
doc_cont_mode = True
continue
if codeend_pattern.match(line) :
doc_cont_mode = False
doc_blob += '~~~\n</script>\n'
if DEBUG :
doc_blob += '<!-- end code in '+ region_name+ '-->\n'
flush_doc_blob (doc_blob)
doc_blob = ''
continue
#
# Last case: just a continuing doc line
m = docline_pattern.match (line)
if m :
if DEBUG :
alldocs[region_name] += '<!-- doc cont ' + line[:35] + ' -->\n'
contents = m.group(6)
doc_blob += contents + '\n'
continue
print ('<!-- REMAINING CASE:', line, '-->\n')
flush_doc_blob (doc_blob)
region_name = '_'
def output_blob (blob) :
for line in blob.splitlines() :
m = inc_pattern.match (line)
if m and (m.group(2) in alldocs) :
# print ('Want to include', m.group(2))
output_blob (alldocs[m.group(2)])
else :
print (line)
parser = argparse.ArgumentParser (prog='docdeep',
description='Turn source comments into markdeep document')
parser.add_argument ('-d', dest='docname', default='main',
help='Name of documentation (default: "main")')
parser.add_argument ('-s', dest='stylesheet', default='',
help='Name of style sheet (default: "")')
parser.add_argument ('--debug', dest='DEBUG', action='store_const',
const=True, default=False)
parser.add_argument ('filenames', nargs='+')
# parser.add_argument ('-o', dest='output_filename', nargs=1,
# help='Output filename (default: stdout)')
args = parser.parse_args()
DEBUG = args.DEBUG
for filename in args.filenames :
with open(filename) as file:
read_input (filename, file)
print ('<meta charset="utf-8">\n')
if args.docname in alldocs :
output_blob (alldocs[args.docname])
else :
print ("Ick! could not find docs for", args.docname)
print ('<!-- Markdeep: --><style class="fallback">body{visibility:hidden;white-space:pre;font-family:monospace}</style><script src="markdeep.min.js"></script><script src="https://casual-effects.com/markdeep/latest/markdeep.min.js?"></script><script>window.alreadyProcessedMarkdeep||(document.body.style.visibility="visible")</script>')
if args.stylesheet :
print ('<link rel="stylesheet" href="{}">'.format (args.stylesheet))
|
bsd-3-clause
|
DataDog/integrations-core
|
openstack_controller/tests/common.py
|
1
|
14615
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import datetime
import os
CHECK_NAME = 'openstack'
FIXTURES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fixtures')
ALL_IDS = ['server-1', 'server-2', 'other-1', 'other-2']
EXCLUDED_NETWORK_IDS = ['server-1', 'other-.*']
EXCLUDED_SERVER_IDS = ['server-2', 'other-.*']
FILTERED_NETWORK_ID = 'server-2'
FILTERED_SERVER_ID = 'server-1'
FILTERED_BY_PROJ_SERVER_ID = ['server-1', 'server-2']
CONFIG_FILE_INSTANCE = {
'name': 'test_name',
'user': {'name': 'test_name', 'password': 'test_pass', 'domain': {'id': 'test_id'}},
'ssl_verify': False,
'exclude_network_ids': EXCLUDED_NETWORK_IDS,
'openstack_config_file_path': os.path.abspath('./tests/fixtures/openstack_config.yaml'),
'openstack_cloud_name': 'test_cloud',
}
KEYSTONE_INSTANCE = {
'name': 'test_name',
'keystone_server_url': 'http://10.0.2.15:5000',
'user': {'name': 'test_name', 'password': 'test_pass', 'domain': {'id': 'test_id'}},
'ssl_verify': False,
'exclude_network_ids': EXCLUDED_NETWORK_IDS,
}
MOCK_CONFIG = {'init_config': {}, 'instances': [KEYSTONE_INSTANCE]}
EXAMPLE_AUTH_RESPONSE = {
u'token': {
u'methods': [u'password'],
u'roles': [
{u'id': u'f20c215f5a4d47b7a6e510bc65485ced', u'name': u'datadog_monitoring'},
{u'id': u'9fe2ff9ee4384b1894a90878d3e92bab', u'name': u'_member_'},
],
u'expires_at': u'2015-11-02T15: 57: 43.911674Z',
u'project': {
u'domain': {u'id': u'default', u'name': u'Default'},
u'id': u'0850707581fe4d738221a72db0182876',
u'name': u'admin',
},
u'catalog': [
{
u'endpoints': [
{
u'url': u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'354e35ed19774e398f80dc2a90d07f4b',
},
{
u'url': u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'36e8e2bf24384105b9d56a65b0900172',
},
{
u'url': u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'de93edcbf7f9446286687ec68423c36f',
},
],
u'type': u'compute',
u'id': u'2023bd4f451849ba8abeaaf283cdde4f',
u'name': u'nova',
},
{
u'endpoints': [
{
u'url': u'http://10.0.3.111:8776/v1/***************************4bfc1',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'***************************2452f',
},
{
u'url': u'http://10.0.2.15:8776/v1/***************************4bfc1',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'***************************8239f',
},
{
u'url': u'http://10.0.2.15:8776/v1/***************************4bfc1',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'***************************7caa1',
},
],
u'type': u'volume',
u'id': u'***************************e7e16',
u'name': u'cinder',
},
{
u'endpoints': [
{
u'url': u'http://10.0.2.15:9292',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'7c1e318d8f7f42029fcb591598df2ef5',
},
{
u'url': u'http://10.0.2.15:9292',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'afcc88b1572f48a38bb393305dc2b584',
},
{
u'url': u'http://10.0.2.15:9292',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'd9730dbdc07844d785913219da64a197',
},
],
u'type': u'network',
u'id': u'21ad241f26194bccb7d2e49ee033d5a2',
u'name': u'neutron',
},
],
u'extras': {},
u'user': {
u'domain': {u'id': u'default', u'name': u'Default'},
u'id': u'5f10e63fbd6b411186e561dc62a9a675',
u'name': u'datadog',
},
u'audit_ids': [u'OMQQg9g3QmmxRHwKrfWxyQ'],
u'issued_at': u'2015-11-02T14: 57: 43.911697Z',
}
}
EXAMPLE_PROJECTS_RESPONSE = {
"projects": [
{
"domain_id": "1789d1",
"enabled": True,
"id": "263fd9",
"links": {"self": "https://example.com/identity/v3/projects/263fd9"},
"name": "Test Group",
}
],
"links": {"self": "https://example.com/identity/v3/auth/projects", "previous": None, "next": None},
}
# .. server/network
SERVERS_CACHE_MOCK = {
'servers': {
"server-1": {"id": "server-1", "name": "server-name-1", "status": "ACTIVE", "project_name": "testproj"},
"server-2": {"id": "server-2", "name": "server-name-2", "status": "ACTIVE", "project_name": "testproj"},
"other-1": {"id": "other-1", "name": "server-name-other-1", "status": "ACTIVE", "project_name": "blacklist_1"},
"other-2": {"id": "other-2", "name": "server-name-other-2", "status": "ACTIVE", "project_name": "blacklist_2"},
},
'change_since': datetime.datetime.utcnow().isoformat(),
}
EMPTY_NOVA_SERVERS = []
# One example from MOCK_NOVA_SERVERS to emulate pagination
MOCK_NOVA_SERVERS_PAGINATED = [
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "server-1",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-iffothgx",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": 'null',
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-02-14T19:24:43.891568",
"OS-SRV-USG:terminated_at": 'null',
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "server-1",
"metadata": {"My Server Name": "Apache1"},
"name": "new-server-test",
"status": "ACTIVE",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:43Z",
"user_id": "fake",
}
]
# Example response from - https://developer.openstack.org/api-ref/compute/#list-servers-detailed
# ID and server-name values have been changed for test readability
MOCK_NOVA_SERVERS = [
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "server-1",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-iffothgx",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": 'null',
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-02-14T19:24:43.891568",
"OS-SRV-USG:terminated_at": 'null',
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "server-1",
"metadata": {"My Server Name": "Apache1"},
"name": "new-server-test",
"status": "DELETED",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:43Z",
"user_id": "fake",
},
{
"OS-DCF:diskConfig": "AUTO",
"OS-EXT-AZ:availability_zone": "nova",
"OS-EXT-SRV-ATTR:host": "compute",
"OS-EXT-SRV-ATTR:hostname": "server-2",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
"OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"OS-EXT-SRV-ATTR:kernel_id": "",
"OS-EXT-SRV-ATTR:launch_index": 0,
"OS-EXT-SRV-ATTR:ramdisk_id": "",
"OS-EXT-SRV-ATTR:reservation_id": "r-iffothgx",
"OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
"OS-EXT-SRV-ATTR:user_data": "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg==",
"OS-EXT-STS:power_state": 1,
"OS-EXT-STS:task_state": 'null',
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-02-14T19:24:43.891568",
"OS-SRV-USG:terminated_at": 'null',
"accessIPv4": "1.2.3.4",
"accessIPv6": "80fe::",
"hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
"host_status": "UP",
"id": "server_newly_added",
"metadata": {"My Server Name": "Apache1"},
"name": "newly_added_server",
"status": "ACTIVE",
"tags": [],
"tenant_id": "6f70656e737461636b20342065766572",
"updated": "2017-02-14T19:24:43Z",
"user_id": "fake",
},
]
EXAMPLE_GET_FLAVORS_DETAIL_RETURN_VALUE = [
{'id': u'10', 'disk': 10, 'vcpus': 2, 'ram': 1024, 'OS-FLV-EXT-DATA:ephemeral': 0, 'swap': 0},
{
'id': u'625c2e4b-0a1f-4236-bb67-5ceee1a766e5',
'disk': 48,
'vcpus': 8,
'ram': 5934,
'OS-FLV-EXT-DATA:ephemeral': 0,
'swap': 0,
},
]
EXAMPLE_GET_OS_AGGREGATES_RETURN_VALUE = [{'hosts': ["compute"], 'name': "name", 'availability_zone': "london"}]
EXAMPLE_GET_OS_HYPERVISORS_RETURN_VALUE = [
{
"cpu_info": {
"arch": "x86_64",
"model": "Nehalem",
"vendor": "Intel",
"features": ["pge", "clflush"],
"topology": {"cores": 1, "threads": 1, "sockets": 4},
},
"current_workload": 0,
"status": "enabled",
"state": "up",
"disk_available_least": 0,
"host_ip": "1.1.1.1",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "host1",
"hypervisor_type": "fake",
"hypervisor_version": 1000,
"id": 2,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
"service": {"host": "host1", "id": 7, "disabled_reason": None},
"vcpus": 2,
"vcpus_used": 0,
}
]
EXAMPLE_GET_PROJECT_LIMITS_RETURN_VALUE = {
"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 51200,
"maxServerGroups": 10,
"maxServerGroupMembers": 10,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0,
"totalFloatingIpsUsed": 1,
"totalServerGroupsUsed": 0,
}
EXAMPLE_GET_NETWORKS_RETURN_VALUE = [
{
'id': u'2755452c-4fe8-4ba1-9b26-8898665b0958',
'name': u'net2',
'tenant_id': u'680031a39ce040e1b81289ea8c73fb11',
'admin_state_up': True,
}
]
DEFAULT_METRICS = [
'openstack.controller',
'openstack.nova.current_workload',
'openstack.nova.disk_available_least',
'openstack.nova.free_disk_gb',
'openstack.nova.free_ram_mb',
'openstack.nova.hypervisor_load.1',
'openstack.nova.hypervisor_load.15',
'openstack.nova.hypervisor_load.5',
'openstack.nova.limits.max_image_meta',
'openstack.nova.limits.max_personality',
'openstack.nova.limits.max_personality_size',
'openstack.nova.limits.max_security_group_rules',
'openstack.nova.limits.max_security_groups',
'openstack.nova.limits.max_server_meta',
'openstack.nova.limits.max_total_cores',
'openstack.nova.limits.max_total_floating_ips',
'openstack.nova.limits.max_total_instances',
'openstack.nova.limits.max_total_keypairs',
'openstack.nova.limits.max_total_ram_size',
'openstack.nova.limits.total_cores_used',
'openstack.nova.limits.total_floating_ips_used',
'openstack.nova.limits.total_instances_used',
'openstack.nova.limits.total_ram_used',
'openstack.nova.limits.total_security_groups_used',
'openstack.nova.local_gb',
'openstack.nova.local_gb_used',
'openstack.nova.memory_mb',
'openstack.nova.memory_mb_used',
'openstack.nova.running_vms',
'openstack.nova.vcpus',
'openstack.nova.vcpus_used',
]
|
bsd-3-clause
|
bukzor/pgctl
|
tests/unit/flock.py
|
3
|
1917
|
# -*- coding: utf-8 -*-
# pylint:disable=no-self-use
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from pytest import yield_fixture as fixture
from testfixtures import ShouldRaise
from pgctl.flock import flock
from pgctl.flock import Locked
@fixture
def tmpfile(tmpdir):
tmpfile = tmpdir.join('tmpfile')
assert not tmpfile.exists()
yield tmpfile.strpath
def assert_locked(tmpfile):
with ShouldRaise(Locked(11)):
with flock(tmpfile):
raise AssertionError('this should not work')
class DescribeFlock(object):
def it_allows_first_caller(self, tmpfile):
with flock(tmpfile):
print('oh, hi!')
def it_disallows_subsequent_callers(self, tmpfile):
with flock(tmpfile):
print('oh, hi!')
assert_locked(tmpfile)
def it_releases_lock_on_exit(self, tmpfile):
with flock(tmpfile):
print('oh, hi!')
with flock(tmpfile):
print('oh, hi!')
def it_creates_a_file_if_it_didnt_exist(self, tmpfile):
from os.path import exists
assert not exists(tmpfile)
with flock(tmpfile):
print('oh, hi!')
assert exists(tmpfile)
def it_works_fine_with_a_directory(self, tmpfile):
import os.path
assert not os.path.isdir(tmpfile)
os.mkdir(tmpfile)
with flock(tmpfile):
print('oh, hi!')
assert os.path.isdir(tmpfile)
def it_stays_locked_for_the_lifetime_of_subprocesses(self, tmpfile):
from subprocess import Popen
with flock(tmpfile):
p = Popen(('sleep', '99999'))
assert p.poll() is None
assert_locked(tmpfile)
assert_locked(tmpfile)
p.terminate()
assert p.wait() == -15
with flock(tmpfile):
print('oh hi there!')
|
mit
|
asimshankar/tensorflow
|
tensorflow/python/saved_model/save.py
|
1
|
34278
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports a SavedModel from a Checkpointable Python object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.saved_model import builder_impl
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import function_serialization
from tensorflow.python.saved_model import saved_object_graph_pb2
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import utils_impl
from tensorflow.python.training.checkpointable import base
from tensorflow.python.training.checkpointable import tracking
from tensorflow.python.training.checkpointable import util
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
DEFAULT_SIGNATURE_ATTR = "_default_save_signature"
def _find_function_to_export(root):
"""Iterate over `root`'s attributes, finding traced functions."""
exported_function = None
previous_attribute_name = None
for attribute_name in dir(root):
attribute_value = getattr(root, attribute_name, None)
if isinstance(attribute_value, def_function.PolymorphicFunction):
if exported_function is not None:
raise ValueError(
("Exporting an object with no "
"tf.saved_model.save(..., signatures=...) "
"argument specified, and with more than one "
"@tf.function-decorated method attached to it: {}. The signature "
"keys for these functions are ambiguous. Specify signature "
"functions explicitly.").format(
[previous_attribute_name, attribute_name]))
exported_function = attribute_value
previous_attribute_name = attribute_name
if exported_function is None:
exported_function = getattr(root, DEFAULT_SIGNATURE_ATTR, None)
if exported_function is None:
raise ValueError(
("Exporting an object with no tf.saved_model.save(..., signatures=...) "
"argument specified, and with no @tf.function-decorated methods "
"attached to it. In the future this will be a supported use-case for "
"Python re-import, but at the moment saving a SavedModel without "
"signatures does not make sense, as the only consumers will expect "
"signatures. Either decorate a method or specify a signature function "
"explicitly."))
return exported_function
def _canonicalize_signatures(signatures):
"""Converts `signatures` into a dictionary of concrete functions."""
if not isinstance(signatures, collections.Mapping):
signatures = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signatures}
concrete_signatures = {}
for serving_key, signature_function in signatures.items():
if isinstance(signature_function, (defun.PolymorphicFunction,
def_function.PolymorphicFunction)):
input_signature = signature_function._input_signature # pylint: disable=protected-access
if input_signature is None:
raise ValueError(
("Unable to use the function {} as a signature directly. Functions "
"used to generate serving signatures must either have an "
"`input_signature=` specified when constructed, or must be "
"converted to concrete functions using "
"`f.get_concrete_function(...)`.").format(signature_function))
signature_function = signature_function.get_concrete_function()
elif not isinstance(signature_function, defun.Function):
raise ValueError(
("Expected a TensorFlow function to generate a signature for, but "
"got {}. Python functions may be decorated with "
"`@tf.function(input_signature=...)` and passed as signatures "
"directly, or created without a signature using `@tf.function` "
"and then converted to a concrete TensorFlow function using "
"`f.get_concrete_function(...)`.").format(signature_function))
concrete_signatures[serving_key] = signature_function
return concrete_signatures
def _is_flat(sequence):
sequence_flat = nest.flatten(sequence)
try:
nest.assert_same_structure(sequence_flat, sequence)
return True
except ValueError:
return False
except TypeError:
return False
def _normalize_outputs(outputs, function_name, signature_key):
"""Construct an output dictionary from unnormalized function outputs."""
if isinstance(outputs, collections.Mapping):
for key, value in outputs.items():
if not isinstance(value, ops.Tensor):
raise ValueError(
("Got a dictionary containing non-Tensor value {} for key {} "
"in the output of the function {} used to generate a SavedModel "
"signature. Dictionaries outputs for functions used as signatures "
"should have one Tensor output per string key.")
.format(value, key, compat.as_str_any(function_name)))
return outputs
else:
original_outputs = outputs
if not isinstance(outputs, collections.Sequence):
outputs = [outputs]
if not _is_flat(outputs):
raise ValueError(
("Got non-flat outputs '{}' from '{}' for SavedModel "
"signature '{}'. Signatures have one Tensor per output, so "
"to have predictable names Python functions used to generate "
"these signatures should avoid outputting Tensors in nested "
"structures.")
.format(original_outputs, function_name, signature_key))
return {("output_{}".format(output_index)): output
for output_index, output
in enumerate(outputs)}
def _tensor_dict_to_tensorinfo(tensor_dict):
return {key: utils_impl.build_tensor_info(value)
for key, value in tensor_dict.items()}
def _map_captures_to_created_tensors(
original_captures, resource_map):
"""Maps eager tensors captured by a function to Graph resources for export.
Args:
original_captures: A dictionary mapping from tensors captured by the
function to interior placeholders for those tensors (inside the function
body).
resource_map: A dictionary mapping from resource tensors owned by the eager
context to resource tensors in the exported graph.
Returns:
A list of stand-in tensors which belong to the exported graph, corresponding
to the function's captures.
Raises:
AssertionError: If the function references a resource which is not part of
`resource_map`.
"""
export_captures = []
for exterior, interior in original_captures.items():
mapped_resource = resource_map.get(exterior, None)
if mapped_resource is None:
if exterior.dtype == dtypes.resource:
raise AssertionError(
("Tried to export a function which references untracked stateful "
"object {}. Stateful TensorFlow objects (e.g. tf.Variable) must "
"be tracked by the main object. Objects may be tracked by "
"assigning them to an attribute of another tracked object, or to "
"an attribute of the main object directly.")
.format(interior))
else:
# This is a captured Tensor, but it's not a resource. We'll just add it
# to the graph as a constant.
mapped_resource = constant_op.constant(exterior.numpy())
export_captures.append(mapped_resource)
return export_captures
def _map_function_arguments_to_created_inputs(
function_arguments, signature_key, function_name):
"""Creates exterior placeholders in the exported graph for function arguments.
Functions have two types of inputs: tensors captured from the outside (eager)
context, and arguments to the function which we expect to receive from the
user at each call. `_map_captures_to_created_tensors` replaces
captured tensors with stand-ins (typically these are resource dtype tensors
associated with variables). `_map_function_inputs_to_created_inputs` runs over
every argument, creating a new placeholder for each which will belong to the
exported graph rather than the function body.
Args:
function_arguments: A list of argument placeholders in the function body.
signature_key: The name of the signature being exported, for error messages.
function_name: The name of the function, for error messages.
Returns:
A tuple of (mapped_inputs, exterior_placeholders)
mapped_inputs: A list with entries corresponding to `function_arguments`
containing all of the inputs of the function gathered from the exported
graph (both captured resources and arguments).
exterior_argument_placeholders: A dictionary mapping from argument names
to placeholders in the exported graph, containing the explicit arguments
to the function which a user is expected to provide.
Raises:
ValueError: If argument names are not unique.
"""
# `exterior_argument_placeholders` holds placeholders which are outside the
# function body, directly contained in a MetaGraph of the SavedModel. The
# function body itself contains nearly identical placeholders used when
# running the function, but these exterior placeholders allow Session-based
# APIs to call the function using feeds and fetches which name Tensors in the
# MetaGraph.
exterior_argument_placeholders = {}
mapped_inputs = []
for placeholder in function_arguments:
# `export_captures` contains an exhaustive set of captures, so if we don't
# find the input there then we now know we have an argument.
user_input_name = compat.as_str_any(
placeholder.op.get_attr("_user_specified_name"))
# If the internal placeholders for a function have names which were
# uniquified by TensorFlow, then a single user-specified argument name
# must refer to multiple Tensors. The resulting signatures would be
# confusing to call. Instead, we throw an exception telling the user to
# specify explicit names.
if user_input_name != placeholder.op.name:
# This should be unreachable, since concrete functions may not be
# generated with non-unique argument names.
raise ValueError(
("Got non-flat/non-unique argument names for SavedModel "
"signature '{}': more than one argument to '{}' was named '{}'. "
"Signatures have one Tensor per named input, so to have "
"predictable names Python functions used to generate these "
"signatures should avoid *args and Tensors in nested "
"structures unless unique names are specified for each. Use "
"tf.TensorSpec(..., name=...) to provide a name for a Tensor "
"input.")
.format(signature_key, compat.as_str_any(function_name),
user_input_name))
arg_placeholder = array_ops.placeholder(
shape=placeholder.shape,
dtype=placeholder.dtype,
name="{}_{}".format(signature_key, user_input_name))
exterior_argument_placeholders[user_input_name] = arg_placeholder
mapped_inputs.append(arg_placeholder)
return mapped_inputs, exterior_argument_placeholders
def _call_function_with_mapped_captures(function, args, resource_map):
"""Calls `function` in the exported graph, using mapped resource captures."""
export_captures = _map_captures_to_created_tensors(
function.graph.captures, resource_map)
mapped_inputs = args + export_captures
# Calls the function quite directly, since we have new captured resource
# tensors we need to feed in which weren't part of the original function
# definition.
# pylint: disable=protected-access
outputs = function._build_call_outputs(
function._inference_function.call(context.context(), mapped_inputs))
return outputs
def _generate_signatures(signature_functions, resource_map):
"""Validates and calls `signature_functions` in the default graph.
Args:
signature_functions: A dictionary mapping string keys to concrete TensorFlow
functions (e.g. from `_canonicalize_signatures`) which will be used to
generate SignatureDefs.
resource_map: A dictionary mapping from resource tensors in the eager
context to resource tensors in the Graph being exported. This dictionary
is used to re-bind resources captured by functions to tensors which will
exist in the SavedModel.
Returns:
Each function in the `signature_functions` dictionary is called with
placeholder Tensors, generating a function call operation and output
Tensors. The placeholder Tensors, the function call operation, and the
output Tensors from the function call are part of the default Graph.
This function then returns a dictionary with the same structure as
`signature_functions`, with the concrete functions replaced by SignatureDefs
implicitly containing information about how to call each function from a
TensorFlow 1.x Session / the C++ Loader API. These SignatureDefs reference
the generated placeholders and Tensor outputs by name.
The caller is expected to include the default Graph set while calling this
function as a MetaGraph in a SavedModel, including the returned
SignatureDefs as part of that MetaGraph.
"""
signatures = {}
for signature_key, function in sorted(signature_functions.items()):
if function.graph.captures:
argument_inputs = function.graph.inputs[:-len(function.graph.captures)]
else:
argument_inputs = function.graph.inputs
mapped_inputs, exterior_argument_placeholders = (
_map_function_arguments_to_created_inputs(
argument_inputs, signature_key, function.name))
outputs = _normalize_outputs(
_call_function_with_mapped_captures(
function, mapped_inputs, resource_map),
function.name, signature_key)
signatures[signature_key] = signature_def_utils.build_signature_def(
_tensor_dict_to_tensorinfo(exterior_argument_placeholders),
_tensor_dict_to_tensorinfo(outputs))
return signatures
def _trace_resource_initializers(accessible_objects):
"""Create concrete functions from `TrackableResource` objects."""
resource_initializers = []
def _wrap_initializer(obj):
obj.initialize()
return constant_op.constant(1.) # Dummy control output
for obj in accessible_objects:
if isinstance(obj, tracking.TrackableResource):
resource_initializers.append(def_function.function(
functools.partial(_wrap_initializer, obj),
# All inputs are captures.
input_signature=[]).get_concrete_function())
return resource_initializers
_AssetInfo = collections.namedtuple(
"_AssetInfo", [
# List of AssetFileDef protocol buffers
"asset_defs",
# Map from asset variable resource Tensors to their init ops
"asset_initializers_by_resource",
# Map from base asset filenames to full paths
"asset_filename_map",
# Map from TrackableAsset to index of corresponding AssetFileDef
"asset_index"])
def _process_asset(trackable_asset, asset_info, resource_map):
"""Add `trackable_asset` to `asset_info` and `resource_map`."""
original_variable = trackable_asset.asset_path
with context.eager_mode():
original_path = original_variable.numpy()
path = builder_impl.get_asset_filename_to_add(
asset_filepath=original_path,
asset_filename_map=asset_info.asset_filename_map)
# TODO(andresp): Instead of mapping 1-1 between trackable asset
# and asset in the graph def consider deduping the assets that
# point to the same file.
asset_path_initializer = array_ops.placeholder(
shape=original_variable.shape,
dtype=dtypes.string,
name="asset_path_initializer")
asset_variable = resource_variable_ops.ResourceVariable(
asset_path_initializer)
asset_info.asset_filename_map[path] = original_path
asset_def = meta_graph_pb2.AssetFileDef()
asset_def.filename = path
asset_def.tensor_info.name = asset_path_initializer.name
asset_info.asset_defs.append(asset_def)
asset_info.asset_initializers_by_resource[original_variable.handle] = (
asset_variable.initializer)
asset_info.asset_index[trackable_asset] = len(asset_info.asset_defs) - 1
resource_map[original_variable.handle] = asset_variable.handle
def _map_resources(accessible_objects):
"""Makes new resource handle ops corresponding to existing resource tensors.
Creates resource handle ops in the current default graph, whereas
`accessible_objects` will be from an eager context. Resource mapping adds
resource handle ops to the main GraphDef of a SavedModel, which allows the C++
loader API to interact with variables.
Args:
accessible_objects: A list of objects, some of which may contain resources,
to create replacements for.
Returns:
A tuple of (object_map, resource_map, asset_info):
object_map: A dictionary mapping from object in `accessible_objects` to
replacement objects created to hold the new resource tensors.
resource_map: A dictionary mapping from resource tensors extracted from
`accessible_objects` to newly created resource tensors.
asset_info: An _AssetInfo tuple describing external assets referenced from
accessible_objects.
"""
# TODO(allenl): Handle MirroredVariables and other types of variables which
# may need special casing.
object_map = {}
resource_map = {}
asset_info = _AssetInfo(
asset_defs=[],
asset_initializers_by_resource={},
asset_filename_map={},
asset_index={})
for obj in accessible_objects:
if isinstance(obj, tracking.TrackableResource):
new_resource = obj.create_resource()
resource_map[obj.resource_handle] = new_resource
elif resource_variable_ops.is_resource_variable(obj):
new_variable = resource_variable_ops.copy_to_graph_uninitialized(obj)
object_map[obj] = new_variable
resource_map[obj.handle] = new_variable.handle
elif isinstance(obj, tracking.TrackableAsset):
_process_asset(obj, asset_info, resource_map)
return object_map, resource_map, asset_info
def _fill_meta_graph_def(meta_graph_def, obj, signature_functions,
object_saver):
"""Generates a MetaGraph which calls `signature_functions`.
Args:
meta_graph_def: The MetaGraphDef proto to fill.
obj: The checkpointable object being exported.
signature_functions: A dictionary mapping signature keys to concrete
functions containing signatures to add to the MetaGraph.
object_saver: A CheckpointableSaver to add to the MetaGraph.
Returns:
An _AssetInfo, which contains information to help creating the SavedModel.
"""
signatures = {}
# List objects from the eager context to make sure Optimizers give us the
# right Graph-dependent variables.
accessible_objects = util.list_objects(obj)
resource_initializer_functions = _trace_resource_initializers(
accessible_objects)
exported_graph = ops.Graph()
resource_initializer_ops = []
with exported_graph.as_default():
object_map, resource_map, asset_info = _map_resources(accessible_objects)
for resource_initializer_function in resource_initializer_functions:
asset_dependencies = []
for capture in resource_initializer_function.graph.external_captures:
asset_initializer = asset_info.asset_initializers_by_resource.get(
capture, None)
if asset_initializer is not None:
asset_dependencies.append(asset_initializer)
with ops.control_dependencies(asset_dependencies):
resource_initializer_ops.append(
_call_function_with_mapped_captures(
resource_initializer_function, [], resource_map))
with ops.control_dependencies(resource_initializer_ops):
init_op = control_flow_ops.no_op()
# Add the same op to the main_op collection and to the init_op
# signature. The collection is for compatibility with older loader APIs;
# only one will be executed.
meta_graph_def.collection_def[constants.MAIN_OP_KEY].node_list.value.append(
init_op.name)
meta_graph_def.signature_def[constants.INIT_OP_SIGNATURE_KEY].CopyFrom(
signature_def_utils.op_signature_def(
init_op, constants.INIT_OP_SIGNATURE_KEY))
# Saving an object-based checkpoint again gathers variables. We need to do the
# gathering from the eager context so Optimizers save the right set of
# variables, but want any operations associated with the save/restore to be in
# the exported graph (thus the `to_graph` argument).
saver = object_saver.freeze(object_map=object_map, to_graph=exported_graph)
# We must resolve the concrete function to add to MetaGraph while in eager
# mode.
concrete_functions = []
for accessible_object in accessible_objects:
for function in function_serialization.list_all_polymorphic_functions(
accessible_object).values():
concrete_functions.extend(
function_serialization.list_all_concrete_functions(function))
with exported_graph.as_default():
signatures = _generate_signatures(signature_functions, resource_map)
for concrete_function in concrete_functions:
concrete_function.add_to_graph()
saver_def = saver.to_proto()
meta_graph_def.saver_def.CopyFrom(saver_def)
graph_def = exported_graph.as_graph_def(add_shapes=True)
# Clean reference cycles so repeated export()s don't make work for the garbage
# collector.
ops.dismantle_graph(exported_graph)
meta_graph_def.graph_def.CopyFrom(graph_def)
meta_graph_def.meta_info_def.tags.append(tag_constants.SERVING)
meta_graph_def.asset_file_def.extend(asset_info.asset_defs)
for signature_key, signature in signatures.items():
meta_graph_def.signature_def[signature_key].CopyFrom(signature)
meta_graph.strip_graph_default_valued_attrs(meta_graph_def)
return asset_info
def _write_object_graph(root, export_dir, asset_file_def_index):
"""Save a SavedObjectGraph proto for `root`."""
# SavedObjectGraph is similar to the CheckpointableObjectGraph proto in the
# checkpoint. It will eventually go into the SavedModel.
proto = saved_object_graph_pb2.SavedObjectGraph()
checkpointable_objects, node_ids, slot_variables = util.find_objects(root)
util.fill_object_graph_proto(checkpointable_objects, node_ids, slot_variables,
proto)
for obj, obj_proto in zip(checkpointable_objects, proto.nodes):
_write_object_proto(obj, obj_proto, asset_file_def_index)
function_serialization.add_polymorphic_functions_to_object_graph_proto(
checkpointable_objects, proto)
extra_asset_dir = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.EXTRA_ASSETS_DIRECTORY))
file_io.recursive_create_dir(extra_asset_dir)
object_graph_filename = os.path.join(
extra_asset_dir, compat.as_bytes("object_graph.pb"))
file_io.write_string_to_file(object_graph_filename, proto.SerializeToString())
def _write_object_proto(obj, proto, asset_file_def_index):
"""Saves an object into SavedObject proto."""
if isinstance(obj, tracking.TrackableAsset):
proto.asset.SetInParent()
proto.asset.asset_file_def_index = asset_file_def_index[obj]
else:
proto.user_object.SetInParent()
@tf_export("saved_model.save", v1=["saved_model.experimental.save"])
def save(obj, export_dir, signatures=None):
# pylint: disable=line-too-long
"""Exports the Checkpointable object `obj` to [SavedModel format](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md).
Example usage:
```python
class Adder(tf.train.Checkpoint):
@tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
def add(self, x):
return x + x + 1.
to_export = Adder()
tf.saved_model.save(to_export, '/tmp/adder')
```
The resulting SavedModel is then servable with an input named "x", its value
having any shape and dtype float32.
The optional `signatures` argument controls which methods in `obj` will be
available to programs which consume `SavedModel`s, for example serving
APIs. Python functions may be decorated with
`@tf.function(input_signature=...)` and passed as signatures directly, or
lazily with a call to `get_concrete_function` on the method decorated with
`@tf.function`.
If the `signatures` argument is omitted, `obj` will be searched for
`@tf.function`-decorated methods. If exactly one `@tf.function` is found, that
method will be used as the default signature for the SavedModel. This behavior
is expected to change in the future, when a corresponding
`tf.saved_model.load` symbol is added. At that point signatures will be
completely optional, and any `@tf.function` attached to `obj` or its
dependencies will be exported for use with `load`.
When invoking a signature in an exported SavedModel, `Tensor` arguments are
identified by name. These names will come from the Python function's argument
names by default. They may be overridden by specifying a `name=...` argument
in the corresponding `tf.TensorSpec` object. Explicit naming is required if
multiple `Tensor`s are passed through a single argument to the Python
function.
The outputs of functions used as `signatures` must either be flat lists, in
which case outputs will be numbered, or a dictionary mapping string keys to
`Tensor`, in which case the keys will be used to name outputs.
Since `tf.keras.Model` objects are also Checkpointable, this function can be
used to export Keras models. For example, exporting with a signature
specified:
```python
class Model(tf.keras.Model):
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)])
def serve(self, serialized):
...
m = Model()
tf.saved_model.save(m, '/tmp/saved_model/')
```
Exporting from a function without a fixed signature:
```python
class Model(tf.keras.Model):
@tf.function
def call(self, x):
...
m = Model()
tf.saved_model.save(
m, '/tmp/saved_model/',
signatures=m.call.get_concrete_function(
tf.TensorSpec(shape=[None, 3], dtype=tf.float32, name="inp")))
```
`tf.keras.Model` instances constructed from inputs and outputs already have a
signature and so do not require a `@tf.function` decorator or a `signatures`
argument. If neither are specified, the model's forward pass is exported.
```python
x = input_layer.Input((4,), name="x")
y = core.Dense(5, name="out")(x)
model = training.Model(x, y)
tf.saved_model.save(model, '/tmp/saved_model/')
# The exported SavedModel takes "x" with shape [None, 4] and returns "out"
# with shape [None, 5]
```
Variables must be tracked by assigning them to an attribute of a tracked
object or to an attribute of `obj` directly. TensorFlow objects (e.g. layers
from `tf.keras.layers`, optimizers from `tf.train`) track their variables
automatically. This is the same tracking scheme that `tf.train.Checkpoint`
uses, and an exported `Checkpoint` object may be restored as a training
checkpoint by pointing `tf.train.Checkpoint.restore` to the SavedModel's
"variables/" subdirectory. Currently variables are the only stateful objects
supported by `tf.saved_model.save`, but others (e.g. tables) will be supported
in the future.
`tf.function` does not hard-code device annotations from outside the function
body, instead using the calling context's device. This means for example that
exporting a model which runs on a GPU and serving it on a CPU will generally
work, with some exceptions. `tf.device` annotations inside the body of the
function will be hard-coded in the exported model; this type of annotation is
discouraged. Device-specific operations, e.g. with "cuDNN" in the name or with
device-specific layouts, may cause issues. Currently a `DistributionStrategy`
is another exception: active distribution strategies will cause device
placements to be hard-coded in a function. Exporting a single-device
computation and importing under a `DistributionStrategy` is not currently
supported, but may be in the future.
SavedModels exported with `tf.saved_model.save` [strip default-valued
attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes)
automatically, which removes one source of incompatibilities when the consumer
of a SavedModel is running an older TensorFlow version than the
producer. There are however other sources of incompatibilities which are not
handled automatically, such as when the exported model contains operations
which the consumer does not have definitions for.
The current implementation of `tf.saved_model.save` targets serving use-cases,
but omits information which will be necessary for the planned future
implementation of `tf.saved_model.load`. Exported models using the current
`save` implementation, and other existing SavedModels, will not be compatible
with `tf.saved_model.load` when it is implemented. Further, `save` will in the
future attempt to export `@tf.function`-decorated methods which it does not
currently inspect, so some objects which are exportable today will raise
exceptions on export in the future (e.g. due to complex/non-serializable
default arguments). Such backwards-incompatible API changes are expected only
prior to the TensorFlow 2.0 release.
Args:
obj: A checkpointable object to export.
export_dir: A directory in which to write the SavedModel.
signatures: Optional, either a `tf.function` with an input signature
specified or the result of `f.get_concrete_function` on a
`@tf.function`-decorated function `f`, in which case `f` will be used to
generate a signature for the SavedModel under the default serving
signature key. `signatures` may also be a dictionary, in which case it
maps from signature keys to either `tf.function` instances with input
signatures or concrete functions. The keys of such a dictionary may be
arbitrary strings, but will typically be from the
`tf.saved_model.signature_constants` module.
Raises:
ValueError: If `obj` is not checkpointable.
@compatibility(eager)
Not supported when graph building. From TensorFlow 1.x,
`tf.enable_eager_execution()` must run first. May not be called from within a
function body.
@end_compatibility
"""
if not context.executing_eagerly():
with ops.init_scope():
if context.executing_eagerly():
raise AssertionError(
"tf.saved_model.save is not supported inside a traced "
"@tf.function. Move the call to the outer eagerly-executed "
"context.")
else:
raise AssertionError(
"tf.saved_model.save is not supported when graph building. "
"tf.enable_eager_execution() must run first when calling it from "
"TensorFlow 1.x.")
# pylint: enable=line-too-long
if not isinstance(obj, base.CheckpointableBase):
raise ValueError(
"Expected a Checkpointable object for export, got {}.".format(obj))
if signatures is None:
# Note that we run this before saving the checkpoint, since looping over
# attributes may have the side effect of creating variables in some cases.
signatures = _find_function_to_export(obj)
signatures = _canonicalize_signatures(signatures)
# TODO(allenl): Factor out some subset of SavedModelBuilder which is 2.x
# compatible (no sessions) and share it with this export API rather than
# making a SavedModel proto and writing it directly.
saved_model = saved_model_pb2.SavedModel()
meta_graph_def = saved_model.meta_graphs.add()
object_saver = util.CheckpointableSaver(obj)
asset_info = _fill_meta_graph_def(
meta_graph_def, obj, signatures, object_saver)
saved_model.saved_model_schema_version = (
constants.SAVED_MODEL_SCHEMA_VERSION)
# So far we've just been generating protocol buffers with no I/O. Now we write
# the checkpoint, copy assets into the assets directory, and write out the
# SavedModel proto itself.
utils_impl.get_or_create_variables_dir(export_dir)
object_saver.save(utils_impl.get_variables_path(export_dir))
builder_impl.copy_assets_to_destination_dir(asset_info.asset_filename_map,
export_dir)
path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
file_io.write_string_to_file(path, saved_model.SerializeToString())
_write_object_graph(obj, export_dir, asset_info.asset_index)
|
apache-2.0
|
BlackPole/bp-dvbapp
|
lib/python/Components/Renderer/ChannelNumber.py
|
5
|
1375
|
from Components.VariableText import VariableText
from enigma import eLabel, iServiceInformation, eServiceReference, eServiceCenter
from Renderer import Renderer
#
# borrowed from vali, adapted for openpli
#
class ChannelNumber(Renderer, VariableText):
def __init__(self):
Renderer.__init__(self)
VariableText.__init__(self)
self.list = []
self.getList()
GUI_WIDGET = eLabel
def changed(self, what):
service = self.source.service
info = service and service.info()
if info is None:
self.text = ""
return
name = info.getName().replace('\xc2\x86', '').replace('\xc2\x87', '')
if name in self.list:
for idx in range(1, len(self.list)):
if name == self.list[idx-1]:
self.text = str(idx)
break
else:
self.text = '---'
def getList(self):
serviceHandler = eServiceCenter.getInstance()
services = serviceHandler.list(eServiceReference('1:7:1:0:0:0:0:0:0:0:(type == 1) || (type == 17) || (type == 195) || (type == 25) FROM BOUQUET "bouquets.tv" ORDER BY bouquet'))
bouquets = services and services.getContent("SN", True)
for bouquet in bouquets:
services = serviceHandler.list(eServiceReference(bouquet[0]))
channels = services and services.getContent("SN", True)
for channel in channels:
if not channel[0].startswith("1:64:"):
self.list.append(channel[1].replace('\xc2\x86', '').replace('\xc2\x87', ''))
|
gpl-2.0
|
alejandroesquiva/AutomaticApiRest-PythonConnector
|
example/test.py
|
1
|
1552
|
__author__ = 'Alejandro Esquiva Rodriguez'
from aarpy.AARConnector import AARConnector
#AAR Instance
##Create instance via URL
AAR = AARConnector(url="http://automaticapirest.info/demo/getData.php?t=Country&c=Code,Name&l=0,5")
##Create instance via parameters
AAR = AARConnector(domain="http://automaticapirest.info/demo/",table="Country",columns="Name",orderby="Name",limit="10",where="Name:'Albania'")
#Get all the json
jsondata = AAR.getJson()
'''
print(jsondata)
##########################
{'dbInfo': ['Code', 'Name'], 'data': [{'1': 'Aruba', 'Code': 'ABW', '0': 'ABW', 'Name': 'Aruba'}, {'1': 'Afghanistan', 'Code': 'AFG', '0': 'AFG', 'Name': 'Afghanistan'}, {'1': 'Angola', 'Code': 'AGO', '0': 'AGO', 'Name': 'Angola'}, {'1': 'Anguilla', 'Code': 'AIA', '0': 'AIA', 'Name': 'Anguilla'}, {'1': 'Albania', 'Code': 'ALB', '0': 'ALB', 'Name': 'Albania'}]}
'''
#Get all the data
data = AAR.getData()
#Get query info
dbinfo = AAR.getDBInfo()
#Output a specific data
#Name in the first row
name = data[0]["Name"]
#OR
name = data[0]["0"]
print(name)
#Print query info
AAR.printDBInfo()
'''
[
"Code",
"Name"
]
'''
#Print Data
AAR.printData()
'''
[
{
"0": "ABW",
"1": "Aruba",
"Code": "ABW",
"Name": "Aruba"
},
{
"0": "AFG",
"1": "Afghanistan",
"Code": "AFG",
"Name": "Afghanistan"
},
{
"0": "AGO",
"1": "Angola",
"Code": "AGO",
"Name": "Angola"
},
{
"0": "AIA",
"1": "Anguilla",
"Code": "AIA",
"Name": "Anguilla"
},
{
"0": "ALB",
"1": "Albania",
"Code": "ALB",
"Name": "Albania"
}
]
'''
|
mit
|
GinnyN/towerofdimensions-django
|
build/lib/django/conf/locale/ml/formats.py
|
341
|
1635
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
bsd-3-clause
|
darkryder/django
|
django/contrib/auth/tokens.py
|
433
|
2803
|
from datetime import date
from django.conf import settings
from django.utils import six
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.http import base36_to_int, int_to_base36
class PasswordResetTokenGenerator(object):
"""
Strategy object used to generate and check tokens for the password
reset mechanism.
"""
key_salt = "django.contrib.auth.tokens.PasswordResetTokenGenerator"
def make_token(self, user):
"""
Returns a token that can be used once to do a password reset
for the given user.
"""
return self._make_token_with_timestamp(user, self._num_days(self._today()))
def check_token(self, user, token):
"""
Check that a password reset token is correct for a given user.
"""
# Parse the token
try:
ts_b36, hash = token.split("-")
except ValueError:
return False
try:
ts = base36_to_int(ts_b36)
except ValueError:
return False
# Check that the timestamp/uid has not been tampered with
if not constant_time_compare(self._make_token_with_timestamp(user, ts), token):
return False
# Check the timestamp is within limit
if (self._num_days(self._today()) - ts) > settings.PASSWORD_RESET_TIMEOUT_DAYS:
return False
return True
def _make_token_with_timestamp(self, user, timestamp):
# timestamp is number of days since 2001-1-1. Converted to
# base 36, this gives us a 3 digit string until about 2121
ts_b36 = int_to_base36(timestamp)
# By hashing on the internal state of the user and using state
# that is sure to change (the password salt will change as soon as
# the password is set, at least for current Django auth, and
# last_login will also change), we produce a hash that will be
# invalid as soon as it is used.
# We limit the hash to 20 chars to keep URL short
hash = salted_hmac(
self.key_salt,
self._make_hash_value(user, timestamp),
).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
def _make_hash_value(self, user, timestamp):
# Ensure results are consistent across DB backends
login_timestamp = '' if user.last_login is None else user.last_login.replace(microsecond=0, tzinfo=None)
return (
six.text_type(user.pk) + user.password +
six.text_type(login_timestamp) + six.text_type(timestamp)
)
def _num_days(self, dt):
return (dt - date(2001, 1, 1)).days
def _today(self):
# Used for mocking in tests
return date.today()
default_token_generator = PasswordResetTokenGenerator()
|
bsd-3-clause
|
ubiar/odoo
|
addons/website_event_track/models/event.py
|
300
|
8344
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.addons.website.models.website import slug
import pytz
class event_track_tag(osv.osv):
_name = "event.track.tag"
_order = 'name'
_columns = {
'name': fields.char('Event Track Tag', translate=True)
}
class event_tag(osv.osv):
_name = "event.tag"
_order = 'name'
_columns = {
'name': fields.char('Event Tag', translate=True)
}
#
# Tracks: conferences
#
class event_track_stage(osv.osv):
_name = "event.track.stage"
_order = 'sequence'
_columns = {
'name': fields.char('Track Stage', translate=True),
'sequence': fields.integer('Sequence')
}
_defaults = {
'sequence': 0
}
class event_track_location(osv.osv):
_name = "event.track.location"
_columns = {
'name': fields.char('Track Rooms')
}
class event_track(osv.osv):
_name = "event.track"
_description = 'Event Tracks'
_order = 'priority, date'
_inherit = ['mail.thread', 'ir.needaction_mixin', 'website.seo.metadata']
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for track in self.browse(cr, uid, ids, context=context):
res[track.id] = "/event/%s/track/%s" % (slug(track.event_id), slug(track))
return res
_columns = {
'name': fields.char('Track Title', required=True, translate=True),
'user_id': fields.many2one('res.users', 'Responsible'),
'speaker_ids': fields.many2many('res.partner', string='Speakers'),
'tag_ids': fields.many2many('event.track.tag', string='Tags'),
'stage_id': fields.many2one('event.track.stage', 'Stage'),
'description': fields.html('Track Description', translate=True),
'date': fields.datetime('Track Date'),
'duration': fields.float('Duration', digits=(16,2)),
'location_id': fields.many2one('event.track.location', 'Location'),
'event_id': fields.many2one('event.event', 'Event', required=True),
'color': fields.integer('Color Index'),
'priority': fields.selection([('3','Low'),('2','Medium (*)'),('1','High (**)'),('0','Highest (***)')], 'Priority', required=True),
'website_published': fields.boolean('Available in the website', copy=False),
'website_url': fields.function(_website_url, string="Website url", type="char"),
'image': fields.related('speaker_ids', 'image', type='binary', readonly=True)
}
def set_priority(self, cr, uid, ids, priority, context={}):
return self.write(cr, uid, ids, {'priority' : priority})
def _default_stage_id(self, cr, uid, context={}):
stage_obj = self.pool.get('event.track.stage')
ids = stage_obj.search(cr, uid, [], context=context)
return ids and ids[0] or False
_defaults = {
'user_id': lambda self, cr, uid, ctx: uid,
'website_published': lambda self, cr, uid, ctx: False,
'duration': lambda *args: 1.5,
'stage_id': _default_stage_id,
'priority': '2'
}
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
stage_obj = self.pool.get('event.track.stage')
result = stage_obj.name_search(cr, uid, '', context=context)
return result, {}
_group_by_full = {
'stage_id': _read_group_stage_ids,
}
#
# Events
#
class event_event(osv.osv):
_inherit = "event.event"
def _list_tz(self,cr,uid, context=None):
# put POSIX 'Etc/*' entries at the end to avoid confusing users - see bug 1086728
return [(tz,tz) for tz in sorted(pytz.all_timezones, key=lambda tz: tz if not tz.startswith('Etc/') else '_')]
def _count_tracks(self, cr, uid, ids, field_name, arg, context=None):
return {
event.id: len(event.track_ids)
for event in self.browse(cr, uid, ids, context=context)
}
def _get_tracks_tag_ids(self, cr, uid, ids, field_names, arg=None, context=None):
res = dict((res_id, []) for res_id in ids)
for event in self.browse(cr, uid, ids, context=context):
for track in event.track_ids:
res[event.id] += [tag.id for tag in track.tag_ids]
res[event.id] = list(set(res[event.id]))
return res
_columns = {
'tag_ids': fields.many2many('event.tag', string='Tags'),
'track_ids': fields.one2many('event.track', 'event_id', 'Tracks', copy=True),
'sponsor_ids': fields.one2many('event.sponsor', 'event_id', 'Sponsorships', copy=True),
'blog_id': fields.many2one('blog.blog', 'Event Blog'),
'show_track_proposal': fields.boolean('Talks Proposals'),
'show_tracks': fields.boolean('Multiple Tracks'),
'show_blog': fields.boolean('News'),
'count_tracks': fields.function(_count_tracks, type='integer', string='Tracks'),
'tracks_tag_ids': fields.function(_get_tracks_tag_ids, type='one2many', relation='event.track.tag', string='Tags of Tracks'),
'allowed_track_tag_ids': fields.many2many('event.track.tag', string='Accepted Tags', help="List of available tags for track proposals."),
'timezone_of_event': fields.selection(_list_tz, 'Event Timezone', size=64),
}
_defaults = {
'show_track_proposal': False,
'show_tracks': False,
'show_blog': False,
'timezone_of_event':lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).tz,
}
def _get_new_menu_pages(self, cr, uid, event, context=None):
context = context or {}
result = super(event_event, self)._get_new_menu_pages(cr, uid, event, context=context)
if event.show_tracks:
result.append( (_('Talks'), '/event/%s/track' % slug(event)))
result.append( (_('Agenda'), '/event/%s/agenda' % slug(event)))
if event.blog_id:
result.append( (_('News'), '/blogpost'+slug(event.blog_ig)))
if event.show_track_proposal:
result.append( (_('Talk Proposals'), '/event/%s/track_proposal' % slug(event)))
return result
#
# Sponsors
#
class event_sponsors_type(osv.osv):
_name = "event.sponsor.type"
_order = "sequence"
_columns = {
"name": fields.char('Sponsor Type', required=True, translate=True),
"sequence": fields.integer('Sequence')
}
class event_sponsors(osv.osv):
_name = "event.sponsor"
_order = "sequence"
_columns = {
'event_id': fields.many2one('event.event', 'Event', required=True),
'sponsor_type_id': fields.many2one('event.sponsor.type', 'Sponsoring Type', required=True),
'partner_id': fields.many2one('res.partner', 'Sponsor/Customer', required=True),
'url': fields.text('Sponsor Website'),
'sequence': fields.related('sponsor_type_id', 'sequence', string='Sequence', store=True),
'image_medium': fields.related('partner_id', 'image_medium', string='Logo', type='binary')
}
def has_access_to_partner(self, cr, uid, ids, context=None):
partner_ids = [sponsor.partner_id.id for sponsor in self.browse(cr, uid, ids, context=context)]
return len(partner_ids) == self.pool.get("res.partner").search(cr, uid, [("id", "in", partner_ids)], count=True, context=context)
|
agpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.