repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
loli/medpy
|
bin/medpy_diff.py
|
1
|
3675
|
#!/usr/bin/env python
"""
Compares the pixel values of two images and gives a measure of the difference.
Copyright (C) 2013 Oskar Maier
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>."""
# build-in modules
import sys
import argparse
import logging
# third-party modules
import scipy
# path changes
# own modules
from medpy.core import Logger
from medpy.io import load
from functools import reduce
# information
__author__ = "Oskar Maier"
__version__ = "r0.1.0, 2012-05-25"
__email__ = "oskar.maier@googlemail.com"
__status__ = "Release"
__description__ = """
Compares the pixel values of two images and gives a measure of the difference.
Also compares the dtype and shape.
Copyright (C) 2013 Oskar Maier
This program comes with ABSOLUTELY NO WARRANTY; This is free software,
and you are welcome to redistribute it under certain conditions; see
the LICENSE file or <http://www.gnu.org/licenses/> for details.
"""
# code
def main():
args = getArguments(getParser())
# prepare logger
logger = Logger.getInstance()
if args.debug: logger.setLevel(logging.DEBUG)
elif args.verbose: logger.setLevel(logging.INFO)
# load input image1
data_input1, _ = load(args.input1)
# load input image2
data_input2, _ = load(args.input2)
# compare dtype and shape
if not data_input1.dtype == data_input2.dtype: print('Dtype differs: {} to {}'.format(data_input1.dtype, data_input2.dtype))
if not data_input1.shape == data_input2.shape:
print('Shape differs: {} to {}'.format(data_input1.shape, data_input2.shape))
print('The voxel content of images of different shape can not be compared. Exiting.')
sys.exit(-1)
# compare image data
voxel_total = reduce(lambda x, y: x*y, data_input1.shape)
voxel_difference = len((data_input1 != data_input2).nonzero()[0])
if not 0 == voxel_difference:
print('Voxel differ: {} of {} total voxels'.format(voxel_difference, voxel_total))
print('Max difference: {}'.format(scipy.absolute(data_input1 - data_input2).max()))
else: print('No other difference.')
logger.info("Successfully terminated.")
def getArguments(parser):
"Provides additional validation of the arguments collected by argparse."
return parser.parse_args()
def getParser():
"Creates and returns the argparse parser object."
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument('input1', help='Source volume one.')
parser.add_argument('input2', help='Source volume two.')
parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.')
parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.')
parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.')
return parser
if __name__ == "__main__":
main()
|
gpl-3.0
|
amrdraz/kodr
|
app/brython/www/src/Lib/test/test_ossaudiodev.py
|
32
|
7216
|
from test import support
support.requires('audio')
from test.support import findfile
ossaudiodev = support.import_module('ossaudiodev')
import errno
import sys
import sunau
import time
import audioop
import unittest
# Arggh, AFMT_S16_NE not defined on all platforms -- seems to be a
# fairly recent addition to OSS.
try:
from ossaudiodev import AFMT_S16_NE
except ImportError:
if sys.byteorder == "little":
AFMT_S16_NE = ossaudiodev.AFMT_S16_LE
else:
AFMT_S16_NE = ossaudiodev.AFMT_S16_BE
def read_sound_file(path):
with open(path, 'rb') as fp:
au = sunau.open(fp)
rate = au.getframerate()
nchannels = au.getnchannels()
encoding = au._encoding
fp.seek(0)
data = fp.read()
if encoding != sunau.AUDIO_FILE_ENCODING_MULAW_8:
raise RuntimeError("Expect .au file with 8-bit mu-law samples")
# Convert the data to 16-bit signed.
data = audioop.ulaw2lin(data, 2)
return (data, rate, 16, nchannels)
class OSSAudioDevTests(unittest.TestCase):
def play_sound_file(self, data, rate, ssize, nchannels):
try:
dsp = ossaudiodev.open('w')
except IOError as msg:
if msg.args[0] in (errno.EACCES, errno.ENOENT,
errno.ENODEV, errno.EBUSY):
raise unittest.SkipTest(msg)
raise
# at least check that these methods can be invoked
dsp.bufsize()
dsp.obufcount()
dsp.obuffree()
dsp.getptr()
dsp.fileno()
# Make sure the read-only attributes work.
self.assertFalse(dsp.closed)
self.assertEqual(dsp.name, "/dev/dsp")
self.assertEqual(dsp.mode, "w", "bad dsp.mode: %r" % dsp.mode)
# And make sure they're really read-only.
for attr in ('closed', 'name', 'mode'):
try:
setattr(dsp, attr, 42)
except (TypeError, AttributeError):
pass
else:
self.fail("dsp.%s not read-only" % attr)
# Compute expected running time of sound sample (in seconds).
expected_time = float(len(data)) / (ssize/8) / nchannels / rate
# set parameters based on .au file headers
dsp.setparameters(AFMT_S16_NE, nchannels, rate)
self.assertTrue(abs(expected_time - 3.51) < 1e-2, expected_time)
t1 = time.time()
dsp.write(data)
dsp.close()
t2 = time.time()
elapsed_time = t2 - t1
percent_diff = (abs(elapsed_time - expected_time) / expected_time) * 100
self.assertTrue(percent_diff <= 10.0,
"elapsed time (%s) > 10%% off of expected time (%s)" %
(elapsed_time, expected_time))
def set_parameters(self, dsp):
# Two configurations for testing:
# config1 (8-bit, mono, 8 kHz) should work on even the most
# ancient and crufty sound card, but maybe not on special-
# purpose high-end hardware
# config2 (16-bit, stereo, 44.1kHz) should work on all but the
# most ancient and crufty hardware
config1 = (ossaudiodev.AFMT_U8, 1, 8000)
config2 = (AFMT_S16_NE, 2, 44100)
for config in [config1, config2]:
(fmt, channels, rate) = config
if (dsp.setfmt(fmt) == fmt and
dsp.channels(channels) == channels and
dsp.speed(rate) == rate):
break
else:
raise RuntimeError("unable to set audio sampling parameters: "
"you must have really weird audio hardware")
# setparameters() should be able to set this configuration in
# either strict or non-strict mode.
result = dsp.setparameters(fmt, channels, rate, False)
self.assertEqual(result, (fmt, channels, rate),
"setparameters%r: returned %r" % (config, result))
result = dsp.setparameters(fmt, channels, rate, True)
self.assertEqual(result, (fmt, channels, rate),
"setparameters%r: returned %r" % (config, result))
def set_bad_parameters(self, dsp):
# Now try some configurations that are presumably bogus: eg. 300
# channels currently exceeds even Hollywood's ambitions, and
# negative sampling rate is utter nonsense. setparameters() should
# accept these in non-strict mode, returning something other than
# was requested, but should barf in strict mode.
fmt = AFMT_S16_NE
rate = 44100
channels = 2
for config in [(fmt, 300, rate), # ridiculous nchannels
(fmt, -5, rate), # impossible nchannels
(fmt, channels, -50), # impossible rate
]:
(fmt, channels, rate) = config
result = dsp.setparameters(fmt, channels, rate, False)
self.assertNotEqual(result, config,
"unexpectedly got requested configuration")
try:
result = dsp.setparameters(fmt, channels, rate, True)
except ossaudiodev.OSSAudioError as err:
pass
else:
self.fail("expected OSSAudioError")
def test_playback(self):
sound_info = read_sound_file(findfile('audiotest.au'))
self.play_sound_file(*sound_info)
def test_set_parameters(self):
dsp = ossaudiodev.open("w")
try:
self.set_parameters(dsp)
# Disabled because it fails under Linux 2.6 with ALSA's OSS
# emulation layer.
#self.set_bad_parameters(dsp)
finally:
dsp.close()
self.assertTrue(dsp.closed)
def test_mixer_methods(self):
# Issue #8139: ossaudiodev didn't initialize its types properly,
# therefore some methods were unavailable.
with ossaudiodev.openmixer() as mixer:
self.assertGreaterEqual(mixer.fileno(), 0)
def test_with(self):
with ossaudiodev.open('w') as dsp:
pass
self.assertTrue(dsp.closed)
def test_on_closed(self):
dsp = ossaudiodev.open('w')
dsp.close()
self.assertRaises(ValueError, dsp.fileno)
self.assertRaises(ValueError, dsp.read, 1)
self.assertRaises(ValueError, dsp.write, b'x')
self.assertRaises(ValueError, dsp.writeall, b'x')
self.assertRaises(ValueError, dsp.bufsize)
self.assertRaises(ValueError, dsp.obufcount)
self.assertRaises(ValueError, dsp.obufcount)
self.assertRaises(ValueError, dsp.obuffree)
self.assertRaises(ValueError, dsp.getptr)
mixer = ossaudiodev.openmixer()
mixer.close()
self.assertRaises(ValueError, mixer.fileno)
def test_main():
try:
dsp = ossaudiodev.open('w')
except (ossaudiodev.error, IOError) as msg:
if msg.args[0] in (errno.EACCES, errno.ENOENT,
errno.ENODEV, errno.EBUSY):
raise unittest.SkipTest(msg)
raise
dsp.close()
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
mit
|
StrellaGroup/erpnext
|
erpnext/hr/doctype/upload_attendance/test_upload_attendance.py
|
13
|
1103
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import getdate
from erpnext.hr.doctype.upload_attendance.upload_attendance import get_data
from erpnext.hr.doctype.employee.test_employee import make_employee
class TestUploadAttendance(unittest.TestCase):
def test_date_range(self):
employee = make_employee("test_employee@company.com")
employee_doc = frappe.get_doc("Employee", employee)
date_of_joining = "2018-01-02"
relieving_date = "2018-01-03"
from_date = "2018-01-01"
to_date = "2018-01-04"
employee_doc.date_of_joining = date_of_joining
employee_doc.relieving_date = relieving_date
employee_doc.save()
args = {
"from_date": from_date,
"to_date": to_date
}
data = get_data(args)
filtered_data = []
for row in data:
if row[1] == employee:
filtered_data.append(row)
for row in filtered_data:
self.assertTrue(getdate(row[3]) >= getdate(date_of_joining) and getdate(row[3]) <= getdate(relieving_date))
|
gpl-3.0
|
alon/polinax
|
libs/external_libs/docutils-0.4/docutils/statemachine.py
|
6
|
55377
|
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 4152 $
# Date: $Date: 2005-12-08 00:46:30 +0100 (Thu, 08 Dec 2005) $
# Copyright: This module has been placed in the public domain.
"""
A finite state machine specialized for regular-expression-based text filters,
this module defines the following classes:
- `StateMachine`, a state machine
- `State`, a state superclass
- `StateMachineWS`, a whitespace-sensitive version of `StateMachine`
- `StateWS`, a state superclass for use with `StateMachineWS`
- `SearchStateMachine`, uses `re.search()` instead of `re.match()`
- `SearchStateMachineWS`, uses `re.search()` instead of `re.match()`
- `ViewList`, extends standard Python lists.
- `StringList`, string-specific ViewList.
Exception classes:
- `StateMachineError`
- `UnknownStateError`
- `DuplicateStateError`
- `UnknownTransitionError`
- `DuplicateTransitionError`
- `TransitionPatternNotFound`
- `TransitionMethodNotFound`
- `UnexpectedIndentationError`
- `TransitionCorrection`: Raised to switch to another transition.
- `StateCorrection`: Raised to switch to another state & transition.
Functions:
- `string2lines()`: split a multi-line string into a list of one-line strings
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Import it: ``import statemachine`` or ``from statemachine import ...``.
You will also need to ``import re``.
2. Derive a subclass of `State` (or `StateWS`) for each state in your state
machine::
class MyState(statemachine.State):
Within the state's class definition:
a) Include a pattern for each transition, in `State.patterns`::
patterns = {'atransition': r'pattern', ...}
b) Include a list of initial transitions to be set up automatically, in
`State.initial_transitions`::
initial_transitions = ['atransition', ...]
c) Define a method for each transition, with the same name as the
transition pattern::
def atransition(self, match, context, next_state):
# do something
result = [...] # a list
return context, next_state, result
# context, next_state may be altered
Transition methods may raise an `EOFError` to cut processing short.
d) You may wish to override the `State.bof()` and/or `State.eof()` implicit
transition methods, which handle the beginning- and end-of-file.
e) In order to handle nested processing, you may wish to override the
attributes `State.nested_sm` and/or `State.nested_sm_kwargs`.
If you are using `StateWS` as a base class, in order to handle nested
indented blocks, you may wish to:
- override the attributes `StateWS.indent_sm`,
`StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or
`StateWS.known_indent_sm_kwargs`;
- override the `StateWS.blank()` method; and/or
- override or extend the `StateWS.indent()`, `StateWS.known_indent()`,
and/or `StateWS.firstknown_indent()` methods.
3. Create a state machine object::
sm = StateMachine(state_classes=[MyState, ...],
initial_state='MyState')
4. Obtain the input text, which needs to be converted into a tab-free list of
one-line strings. For example, to read text from a file called
'inputfile'::
input_string = open('inputfile').read()
input_lines = statemachine.string2lines(input_string)
5. Run the state machine on the input text and collect the results, a list::
results = sm.run(input_lines)
6. Remove any lingering circular references::
sm.unlink()
"""
__docformat__ = 'restructuredtext'
import sys
import re
import types
import unicodedata
class StateMachine:
"""
A finite state machine for text filters using regular expressions.
The input is provided in the form of a list of one-line strings (no
newlines). States are subclasses of the `State` class. Transitions consist
of regular expression patterns and transition methods, and are defined in
each state.
The state machine is started with the `run()` method, which returns the
results of processing in a list.
"""
def __init__(self, state_classes, initial_state, debug=0):
"""
Initialize a `StateMachine` object; add state objects.
Parameters:
- `state_classes`: a list of `State` (sub)classes.
- `initial_state`: a string, the class name of the initial state.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.input_lines = None
"""`StringList` of input lines (without newlines).
Filled by `self.run()`."""
self.input_offset = 0
"""Offset of `self.input_lines` from the beginning of the file."""
self.line = None
"""Current input line."""
self.line_offset = -1
"""Current input line offset from beginning of `self.input_lines`."""
self.debug = debug
"""Debugging mode on/off."""
self.initial_state = initial_state
"""The name of the initial state (key to `self.states`)."""
self.current_state = initial_state
"""The name of the current state (key to `self.states`)."""
self.states = {}
"""Mapping of {state_name: State_object}."""
self.add_states(state_classes)
self.observers = []
"""List of bound methods or functions to call whenever the current
line changes. Observers are called with one argument, ``self``.
Cleared at the end of `run()`."""
def unlink(self):
"""Remove circular references to objects no longer required."""
for state in self.states.values():
state.unlink()
self.states = None
def run(self, input_lines, input_offset=0, context=None,
input_source=None):
"""
Run the state machine on `input_lines`. Return results (a list).
Reset `self.line_offset` and `self.current_state`. Run the
beginning-of-file transition. Input one line at a time and check for a
matching transition. If a match is found, call the transition method
and possibly change the state. Store the context returned by the
transition method to be passed on to the next transition matched.
Accumulate the results returned by the transition methods in a list.
Run the end-of-file transition. Finally, return the accumulated
results.
Parameters:
- `input_lines`: a list of strings without newlines, or `StringList`.
- `input_offset`: the line offset of `input_lines` from the beginning
of the file.
- `context`: application-specific storage.
- `input_source`: name or path of source of `input_lines`.
"""
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = self.initial_state
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, '\n| '.join(self.input_lines)))
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print >>sys.stderr, ('\nStateMachine.run: bof transition')
context, result = state.bof(context)
results.extend(result)
while 1:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print >>sys.stderr, (
'\nStateMachine.run: line (source=%r, '
'offset=%r):\n| %s'
% (source, offset, self.line))
context, next_state, result = self.check_line(
context, state, transitions)
except EOFError:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: %s.eof transition'
% state.__class__.__name__)
result = state.eof(context)
results.extend(result)
break
else:
results.extend(result)
except TransitionCorrection, exception:
self.previous_line() # back up for another try
transitions = (exception.args[0],)
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: TransitionCorrection to '
'state "%s", transition %s.'
% (state.__class__.__name__, transitions[0]))
continue
except StateCorrection, exception:
self.previous_line() # back up for another try
next_state = exception.args[0]
if len(exception.args) == 1:
transitions = None
else:
transitions = (exception.args[1],)
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: StateCorrection to state '
'"%s", transition %s.'
% (next_state, transitions[0]))
else:
transitions = None
state = self.get_state(next_state)
except:
if self.debug:
self.error()
raise
self.observers = []
return results
def get_state(self, next_state=None):
"""
Return current state object; set it first if `next_state` given.
Parameter `next_state`: a string, the name of the next state.
Exception: `UnknownStateError` raised if `next_state` unknown.
"""
if next_state:
if self.debug and next_state != self.current_state:
print >>sys.stderr, \
('\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.current_state, next_state,
self.abs_line_number()))
self.current_state = next_state
try:
return self.states[self.current_state]
except KeyError:
raise UnknownStateError(self.current_state)
def next_line(self, n=1):
"""Load `self.line` with the `n`'th next line and return it."""
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def is_next_line_blank(self):
"""Return 1 if the next line is blank or non-existant."""
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1
def at_eof(self):
"""Return 1 if the input is at or past end-of-file."""
return self.line_offset >= len(self.input_lines) - 1
def at_bof(self):
"""Return 1 if the input is at or before beginning-of-file."""
return self.line_offset <= 0
def previous_line(self, n=1):
"""Load `self.line` with the `n`'th previous line and return it."""
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line
def goto_line(self, line_offset):
"""Jump to absolute line offset `line_offset`, load and return it."""
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def get_source(self, line_offset):
"""Return source of line at absolute line offset `line_offset`."""
return self.input_lines.source(line_offset - self.input_offset)
def abs_line_offset(self):
"""Return line offset of current line, from beginning of file."""
return self.line_offset + self.input_offset
def abs_line_number(self):
"""Return line number of current line (counting from 1)."""
return self.line_offset + self.input_offset + 1
def insert_input(self, input_lines, source):
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding')
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding')
self.input_lines.insert(self.line_offset + 2,
StringList(input_lines, source))
def get_text_block(self, flush_left=0):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
try:
block = self.input_lines.get_text_block(self.line_offset,
flush_left)
self.next_line(len(block) - 1)
return block
except UnexpectedIndentationError, error:
block, source, lineno = error
self.next_line(len(block) - 1) # advance to last line of block
raise
def check_line(self, context, state, transitions=None):
"""
Examine one line of input for a transition match & execute its method.
Parameters:
- `context`: application-dependent storage.
- `state`: a `State` object, the current state.
- `transitions`: an optional ordered list of transition names to try,
instead of ``state.transition_order``.
Return the values returned by the transition method:
- context: possibly modified from the parameter `context`;
- next state name (`State` subclass name);
- the result output of the transition, a list.
When there is no match, ``state.no_match()`` is called and its return
value is returned.
"""
if transitions is None:
transitions = state.transition_order
state_correction = None
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: state="%s", transitions=%r.'
% (state.__class__.__name__, transitions))
for name in transitions:
pattern, method, next_state = state.transitions[name]
match = self.match(pattern)
if match:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: Matched transition '
'"%s" in state "%s".'
% (name, state.__class__.__name__))
return method(match, context, next_state)
else:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: No match in state "%s".'
% state.__class__.__name__)
return state.no_match(context, transitions)
def match(self, pattern):
"""
Return the result of a regular expression match.
Parameter `pattern`: an `re` compiled regular expression.
"""
return pattern.match(self.line)
def add_state(self, state_class):
"""
Initialize & add a `state_class` (`State` subclass) object.
Exception: `DuplicateStateError` raised if `state_class` was already
added.
"""
statename = state_class.__name__
if self.states.has_key(statename):
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug)
def add_states(self, state_classes):
"""
Add `state_classes` (a list of `State` subclasses).
"""
for state_class in state_classes:
self.add_state(state_class)
def runtime_init(self):
"""
Initialize `self.states`.
"""
for state in self.states.values():
state.runtime_init()
def error(self):
"""Report error details."""
type, value, module, line, function = _exception_data()
print >>sys.stderr, '%s: %s' % (type, value)
print >>sys.stderr, 'input line %s' % (self.abs_line_number())
print >>sys.stderr, ('module %s, line %s, function %s'
% (module, line, function))
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes two
arguments, the source and offset of the current line.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for observer in self.observers:
try:
info = self.input_lines.info(self.line_offset)
except IndexError:
info = (None, None)
observer(*info)
class State:
"""
State superclass. Contains a list of transitions, and transition methods.
Transition methods all have the same signature. They take 3 parameters:
- An `re` match object. ``match.string`` contains the matched input line,
``match.start()`` gives the start index of the match, and
``match.end()`` gives the end index.
- A context object, whose meaning is application-defined (initial value
``None``). It can be used to store any information required by the state
machine, and the retured context is passed on to the next transition
method unchanged.
- The name of the next state, a string, taken from the transitions list;
normally it is returned unchanged, but it may be altered by the
transition method if necessary.
Transition methods all return a 3-tuple:
- A context object, as (potentially) modified by the transition method.
- The next state name (a return value of ``None`` means no state change).
- The processing result, a list, which is accumulated by the state
machine.
Transition methods may raise an `EOFError` to cut processing short.
There are two implicit transitions, and corresponding transition methods
are defined: `bof()` handles the beginning-of-file, and `eof()` handles
the end-of-file. These methods have non-standard signatures and return
values. `bof()` returns the initial context and results, and may be used
to return a header string, or do any other processing needed. `eof()`
should handle any remaining context and wrap things up; it returns the
final processing result.
Typical applications need only subclass `State` (or a subclass), set the
`patterns` and `initial_transitions` class attributes, and provide
corresponding transition methods. The default object initialization will
take care of constructing the list of transitions.
"""
patterns = None
"""
{Name: pattern} mapping, used by `make_transition()`. Each pattern may
be a string or a compiled `re` pattern. Override in subclasses.
"""
initial_transitions = None
"""
A list of transitions to initialize when a `State` is instantiated.
Each entry is either a transition name string, or a (transition name, next
state name) pair. See `make_transitions()`. Override in subclasses.
"""
nested_sm = None
"""
The `StateMachine` class for handling nested processing.
If left as ``None``, `nested_sm` defaults to the class of the state's
controlling state machine. Override it in subclasses to avoid the default.
"""
nested_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `nested_sm` constructor.
Two keys must have entries in the dictionary:
- Key 'state_classes' must be set to a list of `State` classes.
- Key 'initial_state' must be set to the name of the initial state class.
If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the
class of the current state, and 'initial_state' defaults to the name of
the class of the current state. Override in subclasses to avoid the
defaults.
"""
def __init__(self, state_machine, debug=0):
"""
Initialize a `State` object; make & add initial transitions.
Parameters:
- `statemachine`: the controlling `StateMachine` object.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.transition_order = []
"""A list of transition names in search order."""
self.transitions = {}
"""
A mapping of transition names to 3-tuples containing
(compiled_pattern, transition_method, next_state_name). Initialized as
an instance attribute dynamically (instead of as a class attribute)
because it may make forward references to patterns and methods in this
or other classes.
"""
self.add_initial_transitions()
self.state_machine = state_machine
"""A reference to the controlling `StateMachine` object."""
self.debug = debug
"""Debugging mode on/off."""
if self.nested_sm is None:
self.nested_sm = self.state_machine.__class__
if self.nested_sm_kwargs is None:
self.nested_sm_kwargs = {'state_classes': [self.__class__],
'initial_state': self.__class__.__name__}
def runtime_init(self):
"""
Initialize this `State` before running the state machine; called from
`self.state_machine.run()`.
"""
pass
def unlink(self):
"""Remove circular references to objects no longer required."""
self.state_machine = None
def add_initial_transitions(self):
"""Make and add transitions listed in `self.initial_transitions`."""
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions)
def add_transitions(self, names, transitions):
"""
Add a list of transitions to the start of the transition list.
Parameters:
- `names`: a list of transition names.
- `transitions`: a mapping of names to transition tuples.
Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`.
"""
for name in names:
if self.transitions.has_key(name):
raise DuplicateTransitionError(name)
if not transitions.has_key(name):
raise UnknownTransitionError(name)
self.transition_order[:0] = names
self.transitions.update(transitions)
def add_transition(self, name, transition):
"""
Add a transition to the start of the transition list.
Parameter `transition`: a ready-made transition 3-tuple.
Exception: `DuplicateTransitionError`.
"""
if self.transitions.has_key(name):
raise DuplicateTransitionError(name)
self.transition_order[:0] = [name]
self.transitions[name] = transition
def remove_transition(self, name):
"""
Remove a transition by `name`.
Exception: `UnknownTransitionError`.
"""
try:
del self.transitions[name]
self.transition_order.remove(name)
except:
raise UnknownTransitionError(name)
def make_transition(self, name, next_state=None):
"""
Make & return a transition tuple based on `name`.
This is a convenience function to simplify transition creation.
Parameters:
- `name`: a string, the name of the transition pattern & method. This
`State` object must have a method called '`name`', and a dictionary
`self.patterns` containing a key '`name`'.
- `next_state`: a string, the name of the next `State` object for this
transition. A value of ``None`` (or absent) implies no state change
(i.e., continue with the same state).
Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`.
"""
if next_state is None:
next_state = self.__class__.__name__
try:
pattern = self.patterns[name]
if not hasattr(pattern, 'match'):
pattern = re.compile(pattern)
except KeyError:
raise TransitionPatternNotFound(
'%s.patterns[%r]' % (self.__class__.__name__, name))
try:
method = getattr(self, name)
except AttributeError:
raise TransitionMethodNotFound(
'%s.%s' % (self.__class__.__name__, name))
return (pattern, method, next_state)
def make_transitions(self, name_list):
"""
Return a list of transition names and a transition mapping.
Parameter `name_list`: a list, where each entry is either a transition
name string, or a 1- or 2-tuple (transition name, optional next state
name).
"""
stringtype = type('')
names = []
transitions = {}
for namestate in name_list:
if type(namestate) is stringtype:
transitions[namestate] = self.make_transition(namestate)
names.append(namestate)
else:
transitions[namestate[0]] = self.make_transition(*namestate)
names.append(namestate[0])
return names, transitions
def no_match(self, context, transitions):
"""
Called when there is no match from `StateMachine.check_line()`.
Return the same values returned by transition methods:
- context: unchanged;
- next state name: ``None``;
- empty result list.
Override in subclasses to catch this event.
"""
return context, None, []
def bof(self, context):
"""
Handle beginning-of-file. Return unchanged `context`, empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return context, []
def eof(self, context):
"""
Handle end-of-file. Return empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return []
def nop(self, match, context, next_state):
"""
A "do nothing" transition method.
Return unchanged `context` & `next_state`, empty result. Useful for
simple state changes (actionless transitions).
"""
return context, next_state, []
class StateMachineWS(StateMachine):
"""
`StateMachine` subclass specialized for whitespace recognition.
There are three methods provided for extracting indented text blocks:
- `get_indented()`: use when the indent is unknown.
- `get_known_indented()`: use when the indent is known for all lines.
- `get_first_known_indented()`: use when only the first line's indent is
known.
"""
def get_indented(self, until_blank=0, strip_indent=1):
"""
Return a block of indented lines of text, and info.
Extract an indented block where the indent is unknown for all lines.
:Parameters:
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip common leading indent if true (1,
default).
:Return:
- the indented block (a list of lines of text),
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent)
if indented:
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
def get_known_indented(self, indent, until_blank=0, strip_indent=1):
"""
Return an indented block and info.
Extract an indented block where the indent is known for all lines.
Starting with the current line, extract the entire text block with at
least `indent` indentation (which must be whitespace, except for the
first line).
:Parameters:
- `indent`: The number of indent columns/characters.
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
:Return:
- the indented block,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
block_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, offset, blank_finish
def get_first_known_indented(self, indent, until_blank=0, strip_indent=1,
strip_top=1):
"""
Return an indented block and info.
Extract an indented block where the indent is known for the first line
and unknown for all other lines.
:Parameters:
- `indent`: The first line's indent (# of columns/characters).
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
- `strip_top`: Strip blank lines from the beginning of the block.
:Return:
- the indented block,
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
first_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
if strip_top:
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
class StateWS(State):
"""
State superclass specialized for whitespace (blank lines & indents).
Use this class with `StateMachineWS`. The transitions 'blank' (for blank
lines) and 'indent' (for indented text blocks) are added automatically,
before any other transitions. The transition method `blank()` handles
blank lines and `indent()` handles nested indented blocks. Indented
blocks trigger a new state machine to be created by `indent()` and run.
The class of the state machine to be created is in `indent_sm`, and the
constructor keyword arguments are in the dictionary `indent_sm_kwargs`.
The methods `known_indent()` and `firstknown_indent()` are provided for
indented blocks where the indent (all lines' and first line's only,
respectively) is known to the transition method, along with the attributes
`known_indent_sm` and `known_indent_sm_kwargs`. Neither transition method
is triggered automatically.
"""
indent_sm = None
"""
The `StateMachine` class handling indented text blocks.
If left as ``None``, `indent_sm` defaults to the value of
`State.nested_sm`. Override it in subclasses to avoid the default.
"""
indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `indent_sm` constructor.
If left as ``None``, `indent_sm_kwargs` defaults to the value of
`State.nested_sm_kwargs`. Override it in subclasses to avoid the default.
"""
known_indent_sm = None
"""
The `StateMachine` class handling known-indented text blocks.
If left as ``None``, `known_indent_sm` defaults to the value of
`indent_sm`. Override it in subclasses to avoid the default.
"""
known_indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `known_indent_sm` constructor.
If left as ``None``, `known_indent_sm_kwargs` defaults to the value of
`indent_sm_kwargs`. Override it in subclasses to avoid the default.
"""
ws_patterns = {'blank': ' *$',
'indent': ' +'}
"""Patterns for default whitespace transitions. May be overridden in
subclasses."""
ws_initial_transitions = ('blank', 'indent')
"""Default initial whitespace transitions, added before those listed in
`State.initial_transitions`. May be overridden in subclasses."""
def __init__(self, state_machine, debug=0):
"""
Initialize a `StateSM` object; extends `State.__init__()`.
Check for indent state machine attributes, set defaults if not set.
"""
State.__init__(self, state_machine, debug)
if self.indent_sm is None:
self.indent_sm = self.nested_sm
if self.indent_sm_kwargs is None:
self.indent_sm_kwargs = self.nested_sm_kwargs
if self.known_indent_sm is None:
self.known_indent_sm = self.indent_sm
if self.known_indent_sm_kwargs is None:
self.known_indent_sm_kwargs = self.indent_sm_kwargs
def add_initial_transitions(self):
"""
Add whitespace-specific transitions before those defined in subclass.
Extends `State.add_initial_transitions()`.
"""
State.add_initial_transitions(self)
if self.patterns is None:
self.patterns = {}
self.patterns.update(self.ws_patterns)
names, transitions = self.make_transitions(
self.ws_initial_transitions)
self.add_transitions(names, transitions)
def blank(self, match, context, next_state):
"""Handle blank lines. Does nothing. Override in subclasses."""
return self.nop(match, context, next_state)
def indent(self, match, context, next_state):
"""
Handle an indented text block. Extend or override in subclasses.
Recursively run the registered state machine for indented blocks
(`self.indent_sm`).
"""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def known_indent(self, match, context, next_state):
"""
Handle a known-indent text block. Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def first_known_indent(self, match, context, next_state):
"""
Handle an indented text block (first line's indent known).
Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
class _SearchOverride:
"""
Mix-in class to override `StateMachine` regular expression behavior.
Changes regular expression matching, from the default `re.match()`
(succeeds only if the pattern matches at the start of `self.line`) to
`re.search()` (succeeds if the pattern matches anywhere in `self.line`).
When subclassing a `StateMachine`, list this class **first** in the
inheritance list of the class definition.
"""
def match(self, pattern):
"""
Return the result of a regular expression search.
Overrides `StateMachine.match()`.
Parameter `pattern`: `re` compiled regular expression.
"""
return pattern.search(self.line)
class SearchStateMachine(_SearchOverride, StateMachine):
"""`StateMachine` which uses `re.search()` instead of `re.match()`."""
pass
class SearchStateMachineWS(_SearchOverride, StateMachineWS):
"""`StateMachineWS` which uses `re.search()` instead of `re.match()`."""
pass
class ViewList:
"""
List with extended functionality: slices of ViewList objects are child
lists, linked to their parents. Changes made to a child list also affect
the parent list. A child list is effectively a "view" (in the SQL sense)
of the parent list. Changes to parent lists, however, do *not* affect
active child lists. If a parent list is changed, any active child lists
should be recreated.
The start and end of the slice can be trimmed using the `trim_start()` and
`trim_end()` methods, without affecting the parent list. The link between
child and parent lists can be broken by calling `disconnect()` on the
child list.
Also, ViewList objects keep track of the source & offset of each item.
This information is accessible via the `source()`, `offset()`, and
`info()` methods.
"""
def __init__(self, initlist=None, source=None, items=None,
parent=None, parent_offset=None):
self.data = []
"""The actual list of data, flattened from various sources."""
self.items = []
"""A list of (source, offset) pairs, same length as `self.data`: the
source of each line and the offset of each line from the beginning of
its source."""
self.parent = parent
"""The parent list."""
self.parent_offset = parent_offset
"""Offset of this list from the beginning of the parent list."""
if isinstance(initlist, ViewList):
self.data = initlist.data[:]
self.items = initlist.items[:]
elif initlist is not None:
self.data = list(initlist)
if items:
self.items = items
else:
self.items = [(source, i) for i in range(len(initlist))]
assert len(self.data) == len(self.items), 'data mismatch'
def __str__(self):
return str(self.data)
def __repr__(self):
return '%s(%s, items=%s)' % (self.__class__.__name__,
self.data, self.items)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cmp__(self, other): return cmp(self.data, self.__cast(other))
def __cast(self, other):
if isinstance(other, ViewList):
return other.data
else:
return other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
# The __getitem__()/__setitem__() methods check whether the index
# is a slice first, since native list objects start supporting
# them directly in Python 2.3 (no exception is raised when
# indexing a list with a slice object; they just work).
def __getitem__(self, i):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
return self.__class__(self.data[i.start:i.stop],
items=self.items[i.start:i.stop],
parent=self, parent_offset=i.start)
else:
return self.data[i]
def __setitem__(self, i, item):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
if not isinstance(item, ViewList):
raise TypeError('assigning non-ViewList to ViewList slice')
self.data[i.start:i.stop] = item.data
self.items[i.start:i.stop] = item.items
assert len(self.data) == len(self.items), 'data mismatch'
if self.parent:
self.parent[i.start + self.parent_offset
: i.stop + self.parent_offset] = item
else:
self.data[i] = item
if self.parent:
self.parent[i + self.parent_offset] = item
def __delitem__(self, i):
try:
del self.data[i]
del self.items[i]
if self.parent:
del self.parent[i + self.parent_offset]
except TypeError:
assert i.step is None, 'cannot handle slice with stride'
del self.data[i.start:i.stop]
del self.items[i.start:i.stop]
if self.parent:
del self.parent[i.start + self.parent_offset
: i.stop + self.parent_offset]
def __add__(self, other):
if isinstance(other, ViewList):
return self.__class__(self.data + other.data,
items=(self.items + other.items))
else:
raise TypeError('adding non-ViewList to a ViewList')
def __radd__(self, other):
if isinstance(other, ViewList):
return self.__class__(other.data + self.data,
items=(other.items + self.items))
else:
raise TypeError('adding ViewList to a non-ViewList')
def __iadd__(self, other):
if isinstance(other, ViewList):
self.data += other.data
else:
raise TypeError('argument to += must be a ViewList')
return self
def __mul__(self, n):
return self.__class__(self.data * n, items=(self.items * n))
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
self.items *= n
return self
def extend(self, other):
if not isinstance(other, ViewList):
raise TypeError('extending a ViewList with a non-ViewList')
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, other)
self.data.extend(other.data)
self.items.extend(other.items)
def append(self, item, source=None, offset=0):
if source is None:
self.extend(item)
else:
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, item,
source, offset)
self.data.append(item)
self.items.append((source, offset))
def insert(self, i, item, source=None, offset=0):
if source is None:
if not isinstance(item, ViewList):
raise TypeError('inserting non-ViewList with no source given')
self.data[i:i] = item.data
self.items[i:i] = item.items
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item)
else:
self.data.insert(i, item)
self.items.insert(i, (source, offset))
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item,
source, offset)
def pop(self, i=-1):
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.pop(index + self.parent_offset)
self.items.pop(i)
return self.data.pop(i)
def trim_start(self, n=1):
"""
Remove items from the start of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[:n]
del self.items[:n]
if self.parent:
self.parent_offset += n
def trim_end(self, n=1):
"""
Remove items from the end of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[-n:]
del self.items[-n:]
def remove(self, item):
index = self.index(item)
del self[index]
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self):
self.data.reverse()
self.items.reverse()
self.parent = None
def sort(self, *args):
tmp = zip(self.data, self.items)
tmp.sort(*args)
self.data = [entry[0] for entry in tmp]
self.items = [entry[1] for entry in tmp]
self.parent = None
def info(self, i):
"""Return source & offset for index `i`."""
try:
return self.items[i]
except IndexError:
if i == len(self.data): # Just past the end
return self.items[i - 1][0], None
else:
raise
def source(self, i):
"""Return source for index `i`."""
return self.info(i)[0]
def offset(self, i):
"""Return offset for index `i`."""
return self.info(i)[1]
def disconnect(self):
"""Break link between this list and parent list."""
self.parent = None
class StringList(ViewList):
"""A `ViewList` with string-specific methods."""
def trim_left(self, length, start=0, end=sys.maxint):
"""
Trim `length` characters off the beginning of each item, in-place,
from index `start` to `end`. No whitespace-checking is done on the
trimmed text. Does not affect slice parent.
"""
self.data[start:end] = [line[length:]
for line in self.data[start:end]]
def get_text_block(self, start, flush_left=0):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
end = start
last = len(self.data)
while end < last:
line = self.data[end]
if not line.strip():
break
if flush_left and (line[0] == ' '):
source, offset = self.info(end)
raise UnexpectedIndentationError(self[start:end], source,
offset + 1)
end += 1
return self[start:end]
def get_indented(self, start=0, until_blank=0, strip_indent=1,
block_indent=None, first_indent=None):
"""
Extract and return a StringList of indented lines of text.
Collect all lines with indentation, determine the minimum indentation,
remove the minimum indentation from all indented lines (unless
`strip_indent` is false), and return them. All lines up to but not
including the first unindented line will be returned.
:Parameters:
- `start`: The index of the first line to examine.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
- `block_indent`: The indent of the entire block, if known.
- `first_indent`: The indent of the first line, if known.
:Return:
- a StringList of indented lines with mininum indent removed;
- the amount of the indent;
- a boolean: did the indented block finish with a blank line or EOF?
"""
indent = block_indent # start with None if unknown
end = start
if block_indent is not None and first_indent is None:
first_indent = block_indent
if first_indent is not None:
end += 1
last = len(self.data)
while end < last:
line = self.data[end]
if line and (line[0] != ' '
or (block_indent is not None
and line[:block_indent].strip())):
# Line not indented or insufficiently indented.
# Block finished properly iff the last indented line blank:
blank_finish = ((end > start)
and not self.data[end - 1].strip())
break
stripped = line.lstrip()
if not stripped: # blank line
if until_blank:
blank_finish = 1
break
elif block_indent is None:
line_indent = len(line) - len(stripped)
if indent is None:
indent = line_indent
else:
indent = min(indent, line_indent)
end += 1
else:
blank_finish = 1 # block ends at end of lines
block = self[start:end]
if first_indent is not None and block:
block.data[0] = block.data[0][first_indent:]
if indent and strip_indent:
block.trim_left(indent, start=(first_indent is not None))
return block, indent or 0, blank_finish
def get_2D_block(self, top, left, bottom, right, strip_indent=1):
block = self[top:bottom]
indent = right
for i in range(len(block.data)):
block.data[i] = line = block.data[i][left:right].rstrip()
if line:
indent = min(indent, len(line) - len(line.lstrip()))
if strip_indent and 0 < indent < right:
block.data = [line[indent:] for line in block.data]
return block
def pad_double_width(self, pad_char):
"""
Pad all double-width characters in self by appending `pad_char` to each.
For East Asian language support.
"""
if hasattr(unicodedata, 'east_asian_width'):
east_asian_width = unicodedata.east_asian_width
else:
return # new in Python 2.4
for i in range(len(self.data)):
line = self.data[i]
if isinstance(line, types.UnicodeType):
new = []
for char in line:
new.append(char)
if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width
new.append(pad_char)
self.data[i] = ''.join(new)
def replace(self, old, new):
"""Replace all occurrences of substring `old` with `new`."""
for i in range(len(self.data)):
self.data[i] = self.data[i].replace(old, new)
class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
class DuplicateStateError(StateMachineError): pass
class UnknownTransitionError(StateMachineError): pass
class DuplicateTransitionError(StateMachineError): pass
class TransitionPatternNotFound(StateMachineError): pass
class TransitionMethodNotFound(StateMachineError): pass
class UnexpectedIndentationError(StateMachineError): pass
class TransitionCorrection(Exception):
"""
Raise from within a transition method to switch to another transition.
Raise with one argument, the new transition name.
"""
class StateCorrection(Exception):
"""
Raise from within a transition method to switch to another state.
Raise with one or two arguments: new state name, and an optional new
transition name.
"""
def string2lines(astring, tab_width=8, convert_whitespace=0,
whitespace=re.compile('[\v\f]')):
"""
Return a list of one-line strings with tabs expanded, no newlines, and
trailing whitespace stripped.
Each tab is expanded with between 1 and `tab_width` spaces, so that the
next character's index becomes a multiple of `tab_width` (8 by default).
Parameters:
- `astring`: a multi-line string.
- `tab_width`: the number of columns between tab stops.
- `convert_whitespace`: convert form feeds and vertical tabs to spaces?
"""
if convert_whitespace:
astring = whitespace.sub(' ', astring)
return [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()]
def _exception_data():
"""
Return exception information:
- the exception's class name;
- the exception object;
- the name of the file containing the offending code;
- the line number of the offending code;
- the function name of the offending code.
"""
type, value, traceback = sys.exc_info()
while traceback.tb_next:
traceback = traceback.tb_next
code = traceback.tb_frame.f_code
return (type.__name__, value, code.co_filename, traceback.tb_lineno,
code.co_name)
|
gpl-2.0
|
disruptek/boto
|
boto/ecs/__init__.py
|
153
|
4177
|
# Copyright (c) 2010 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto.connection import AWSQueryConnection, AWSAuthConnection
from boto.exception import BotoServerError
import time
import urllib
import xml.sax
from boto.ecs.item import ItemSet
from boto import handler
class ECSConnection(AWSQueryConnection):
"""
ECommerce Connection
For more information on how to use this module see:
http://blog.coredumped.org/2010/09/search-for-books-on-amazon-using-boto.html
"""
APIVersion = '2010-11-01'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host='ecs.amazonaws.com',
debug=0, https_connection_factory=None, path='/',
security_token=None, profile_name=None):
super(ECSConnection, self).__init__(aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
host, debug, https_connection_factory, path,
security_token=security_token,
profile_name=profile_name)
def _required_auth_capability(self):
return ['ecs']
def get_response(self, action, params, page=0, itemSet=None):
"""
Utility method to handle calls to ECS and parsing of responses.
"""
params['Service'] = "AWSECommerceService"
params['Operation'] = action
if page:
params['ItemPage'] = page
response = self.make_request(None, params, "/onca/xml")
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status != 200:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise BotoServerError(response.status, response.reason, body)
if itemSet is None:
rs = ItemSet(self, action, params, page)
else:
rs = itemSet
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body.encode('utf-8'), h)
if not rs.is_valid:
raise BotoServerError(response.status, '{Code}: {Message}'.format(**rs.errors[0]))
return rs
#
# Group methods
#
def item_search(self, search_index, **params):
"""
Returns items that satisfy the search criteria, including one or more search
indices.
For a full list of search terms,
:see: http://docs.amazonwebservices.com/AWSECommerceService/2010-09-01/DG/index.html?ItemSearch.html
"""
params['SearchIndex'] = search_index
return self.get_response('ItemSearch', params)
def item_lookup(self, **params):
"""
Returns items that satisfy the lookup query.
For a full list of parameters, see:
http://s3.amazonaws.com/awsdocs/Associates/2011-08-01/prod-adv-api-dg-2011-08-01.pdf
"""
return self.get_response('ItemLookup', params)
|
mit
|
reiaaoyama/contrail-controller
|
src/config/common/importutils.py
|
18
|
2171
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Import related utilities and helper functions.
"""
import sys
import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Tries to import object from default namespace.
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs)
def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
return import_module(import_str)
except ImportError:
return default
|
apache-2.0
|
ehsanirani/slides_postdoc_interview
|
node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py
|
1824
|
3474
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'CONFIGURATION_NAME',
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'LIB_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
'SHARED_LIB_DIR',
'SHARED_LIB_PREFIX',
'SHARED_LIB_SUFFIX',
'STATIC_LIB_PREFIX',
'STATIC_LIB_SUFFIX',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
|
mit
|
alphagov/stagecraft
|
stagecraft/apps/dashboards/models/dashboard.py
|
1
|
14599
|
from __future__ import unicode_literals
import uuid
from django.core.validators import RegexValidator
from django.db import models
from stagecraft.apps.organisation.models import Node
from stagecraft.apps.users.models import User
from django.db.models.query import QuerySet
def list_to_tuple_pairs(elements):
return tuple([(element, element) for element in elements])
class DashboardManager(models.Manager):
def by_tx_id(self, tx_id):
filter_string = '"service_id:{}"'.format(tx_id)
return self.raw('''
SELECT DISTINCT dashboards_dashboard.*
FROM dashboards_module
INNER JOIN dashboards_dashboard ON
dashboards_dashboard.id = dashboards_module.dashboard_id,
json_array_elements(query_parameters::json->'filter_by') AS filters
WHERE filters::text = %s
AND data_set_id=(SELECT id FROM datasets_dataset
WHERE name='transactional_services_summaries')
AND dashboards_dashboard.status='published'
''', [filter_string])
def get_query_set(self):
return QuerySet(self.model, using=self._db)
def for_user(self, user):
return self.get_query_set().filter(owners=user)
class Dashboard(models.Model):
objects = DashboardManager()
id = models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True)
slug_validator = RegexValidator(
'^[-a-z0-9]+$',
message='Slug can only contain lower case letters, numbers or hyphens'
)
slug = models.CharField(
max_length=1000,
unique=True,
validators=[
slug_validator
]
)
owners = models.ManyToManyField(User)
dashboard_types = [
'transaction',
'high-volume-transaction',
'service-group',
'agency',
'department',
'content',
'other',
]
customer_types = [
'',
'Business',
'Individuals',
'Business and individuals',
'Charity',
]
business_models = [
'',
'Department budget',
'Fees and charges',
'Taxpayers',
'Fees and charges, and taxpayers',
]
straplines = [
'Dashboard',
'Service dashboard',
'Content dashboard',
'Performance',
'Policy dashboard',
'Public sector purchasing dashboard',
'Topic Explorer',
'Service Explorer',
]
dashboard_type = models.CharField(
max_length=30,
choices=list_to_tuple_pairs(dashboard_types),
default=dashboard_types[0]
)
page_type = models.CharField(max_length=80, default='dashboard')
status = models.CharField(
max_length=30,
default='unpublished'
)
title = models.CharField(max_length=256)
description = models.CharField(max_length=500, blank=True)
description_extra = models.CharField(max_length=400, blank=True)
costs = models.CharField(max_length=1500, blank=True)
other_notes = models.CharField(max_length=1000, blank=True)
customer_type = models.CharField(
max_length=30,
choices=list_to_tuple_pairs(customer_types),
default=customer_types[0],
blank=True
)
business_model = models.CharField(
max_length=31,
choices=list_to_tuple_pairs(business_models),
default=business_models[0],
blank=True
)
# 'department' is not being considered for now
# 'agency' is not being considered for now
improve_dashboard_message = models.BooleanField(default=True)
strapline = models.CharField(
max_length=40,
choices=list_to_tuple_pairs(straplines),
default=straplines[0]
)
tagline = models.CharField(max_length=400, blank=True)
_organisation = models.ForeignKey(
'organisation.Node', blank=True, null=True,
db_column='organisation_id')
@property
def published(self):
return self.status == 'published'
@published.setter
def published(self, value):
if value is True:
self.status = 'published'
else:
self.status = 'unpublished'
@property
def organisation(self):
return self._organisation
@organisation.setter
def organisation(self, node):
self.department_cache = None
self.agency_cache = None
self.service_cache = None
self.transaction_cache = None
self._organisation = node
if node is not None:
for n in node.get_ancestors(include_self=True):
if n.typeOf.name == 'department':
self.department_cache = n
elif n.typeOf.name == 'agency':
self.agency_cache = n
elif n.typeOf.name == 'service':
self.service_cache = n
elif n.typeOf.name == 'transaction':
self.transaction_cache = n
# Denormalise org tree for querying ease.
department_cache = models.ForeignKey(
'organisation.Node', blank=True, null=True,
related_name='dashboards_owned_by_department')
agency_cache = models.ForeignKey(
'organisation.Node', blank=True, null=True,
related_name='dashboards_owned_by_agency')
service_cache = models.ForeignKey(
'organisation.Node', blank=True, null=True,
related_name='dashboards_owned_by_service')
transaction_cache = models.ForeignKey(
'organisation.Node', blank=True, null=True,
related_name='dashboards_owned_by_transaction')
spotlightify_base_fields = [
'business_model',
'costs',
'customer_type',
'dashboard_type',
'description',
'description_extra',
'other_notes',
'page_type',
'published',
'slug',
'strapline',
'tagline',
'title'
]
list_base_fields = [
'slug',
'title',
'dashboard_type'
]
@classmethod
def list_for_spotlight(cls):
dashboards = Dashboard.objects.filter(status='published')\
.select_related('department_cache', 'agency_cache',
'service_cache')
def spotlightify_for_list(item):
return item.spotlightify_for_list()
return {
'page-type': 'browse',
'items': map(spotlightify_for_list, dashboards),
}
def spotlightify_for_list(self):
base_dict = self.list_base_dict()
if self.department_cache is not None:
base_dict['department'] = self.department_cache.spotlightify()
if self.agency_cache is not None:
base_dict['agency'] = self.agency_cache.spotlightify()
if self.service_cache is not None:
base_dict['service'] = self.service_cache.spotlightify()
return base_dict
def spotlightify_base_dict(self):
base_dict = {}
for field in self.spotlightify_base_fields:
base_dict[field.replace('_', '-')] = getattr(self, field)
return base_dict
def list_base_dict(self):
base_dict = {}
for field in self.list_base_fields:
base_dict[field.replace('_', '-')] = getattr(self, field)
return base_dict
def related_pages_dict(self):
related_pages_dict = {}
transaction_link = self.get_transaction_link()
if transaction_link:
related_pages_dict['transaction'] = (
transaction_link.serialize())
related_pages_dict['other'] = [
link.serialize() for link
in self.get_other_links()
]
related_pages_dict['improve-dashboard-message'] = (
self.improve_dashboard_message
)
return related_pages_dict
def spotlightify(self, request_slug=None):
base_dict = self.spotlightify_base_dict()
base_dict['modules'] = self.spotlightify_modules()
base_dict['relatedPages'] = self.related_pages_dict()
if self.department():
base_dict['department'] = self.spotlightify_department()
if self.agency():
base_dict['agency'] = self.spotlightify_agency()
modules_or_tabs = get_modules_or_tabs(request_slug, base_dict)
return modules_or_tabs
def serialize(self):
def simple_field(field):
return not (field.is_relation or field.one_to_one or (
field.many_to_one and field.related_model))
serialized = {}
fields = self._meta.get_fields()
field_names = [field.name for field in fields
if simple_field(field)]
for field in field_names:
if not (field.startswith('_') or field.endswith('_cache')):
value = getattr(self, field)
serialized[field] = value
if self.status == 'published':
serialized['published'] = True
else:
serialized['published'] = False
if self.organisation:
serialized['organisation'] = Node.serialize(self.organisation)
else:
serialized['organisation'] = None
serialized['links'] = [link.serialize()
for link in self.link_set.all()]
serialized['modules'] = self.serialized_modules()
return serialized
def __str__(self):
return self.slug
def serialized_modules(self):
return [m.serialize()
for m in self.module_set.filter(parent=None).order_by('order')]
def spotlightify_modules(self):
return [m.spotlightify() for m in
self.module_set.filter(parent=None).order_by('order')]
def spotlightify_agency(self):
return self.agency().spotlightify()
def spotlightify_department(self):
return self.department().spotlightify()
def update_transaction_link(self, title, url):
transaction_link = self.get_transaction_link()
if not transaction_link:
self.link_set.create(
title=title,
url=url,
link_type='transaction'
)
else:
link = transaction_link
link.title = title
link.url = url
link.save()
def add_other_link(self, title, url):
self.link_set.create(
title=title,
url=url,
link_type='other'
)
def get_transaction_link(self):
transaction_link = self.link_set.filter(link_type='transaction')
if len(transaction_link) == 1:
return transaction_link[0]
else:
return None
def get_other_links(self):
return self.link_set.filter(link_type='other').all()
def validate_and_save(self):
self.full_clean()
self.save()
class Meta:
app_label = 'dashboards'
def organisations(self):
department = None
agency = None
if self.organisation is not None:
for node in self.organisation.get_ancestors(include_self=False):
if node.typeOf.name == 'department':
department = node
if node.typeOf.name == 'agency':
agency = node
return department, agency
def agency(self):
if self.organisation is not None:
if self.organisation.typeOf.name == 'agency':
return self.organisation
for node in self.organisation.get_ancestors():
if node.typeOf.name == 'agency':
return node
return None
def department(self):
if self.agency() is not None:
dept = None
for node in self.agency().get_ancestors(include_self=False):
if node.typeOf.name == 'department':
dept = node
return dept
else:
if self.organisation is not None:
for node in self.organisation.get_ancestors():
if node.typeOf.name == 'department':
return node
return None
class Link(models.Model):
id = models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True)
title = models.CharField(max_length=100)
url = models.URLField(max_length=200)
dashboard = models.ForeignKey(Dashboard)
link_types = [
'transaction',
'other',
]
link_type = models.CharField(
max_length=20,
choices=list_to_tuple_pairs(link_types),
)
def link_url(self):
return '<a href="{0}">{0}</a>'.format(self.url)
link_url.allow_tags = True
def serialize(self):
return {
'title': self.title,
'type': self.link_type,
'url': self.url
}
class Meta:
app_label = 'dashboards'
def get_modules_or_tabs(request_slug, dashboard_json):
# first part will always be empty as we never end the dashboard slug with
# a slash
if request_slug is None:
return dashboard_json
module_slugs = request_slug.replace(
dashboard_json['slug'], '').split('/')[1:]
if len(module_slugs) == 0:
return dashboard_json
if 'modules' not in dashboard_json:
return None
modules = dashboard_json['modules']
for slug in module_slugs:
module = find_first_item_matching_slug(modules, slug)
if module is None:
return None
elif module['modules']:
modules = module['modules']
dashboard_json['modules'] = [module]
elif 'tabs' in module:
last_slug = module_slugs[-1]
if last_slug == slug:
dashboard_json['modules'] = [module]
else:
tab_slug = last_slug.replace(slug + '-', '')
tab = get_single_tab_from_module(tab_slug, module)
if tab is None:
return None
else:
tab['info'] = module['info']
tab['title'] = module['title'] + ' - ' + tab['title']
dashboard_json['modules'] = [tab]
break
else:
dashboard_json['modules'] = [module]
dashboard_json['page-type'] = 'module'
return dashboard_json
def get_single_tab_from_module(tab_slug, module_json):
return find_first_item_matching_slug(module_json['tabs'], tab_slug)
def find_first_item_matching_slug(item_list, slug):
for item in item_list:
if item['slug'] == slug:
return item
|
mit
|
thispc/download-manager
|
module/lib/jinja2/ext.py
|
64
|
23899
|
# -*- coding: utf-8 -*-
"""
jinja2.ext
~~~~~~~~~~
Jinja extensions allow to add custom tags similar to the way django custom
tags work. By default two example extensions exist: an i18n and a cache
extension.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from collections import deque
from jinja2 import nodes
from jinja2.defaults import *
from jinja2.environment import Environment
from jinja2.runtime import Undefined, concat
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
from jinja2.utils import contextfunction, import_string, Markup, next
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
rv.identifier = rv.__module__ + '.' + rv.__name__
return rv
class Extension(object):
"""Extensions can be used to add extra functionality to the Jinja template
system at the parser level. Custom extensions are bound to an environment
but may not store environment specific data on `self`. The reason for
this is that an extension can be bound to another environment (for
overlays) by creating a copy and reassigning the `environment` attribute.
As extensions are created by the environment they cannot accept any
arguments for configuration. One may want to work around that by using
a factory function, but that is not possible as extensions are identified
by their import name. The correct way to configure the extension is
storing the configuration values on the environment. Because this way the
environment ends up acting as central configuration storage the
attributes may clash which is why extensions have to ensure that the names
they choose for configuration are not too generic. ``prefix`` for example
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
name as includes the name of the extension (fragment cache).
"""
__metaclass__ = ExtensionRegistry
#: if this extension parses this is the list of tags it's listening to.
tags = set()
#: the priority of that extension. This is especially useful for
#: extensions that preprocess values. A lower value means higher
#: priority.
#:
#: .. versionadded:: 2.4
priority = 100
def __init__(self, environment):
self.environment = environment
def bind(self, environment):
"""Create a copy of this extension bound to another environment."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.environment = environment
return rv
def preprocess(self, source, name, filename=None):
"""This method is called before the actual lexing and can be used to
preprocess the source. The `filename` is optional. The return value
must be the preprocessed source.
"""
return source
def filter_stream(self, stream):
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
In the `ext` folder of the Jinja2 source distribution there is a file
called `inlinegettext.py` which implements a filter that utilizes this
method.
"""
return stream
def parse(self, parser):
"""If any of the :attr:`tags` matched this method is called with the
parser as first argument. The token the parser stream is pointing at
is the name token that matched. This method has to return one or a
list of multiple nodes.
"""
raise NotImplementedError()
def attr(self, name, lineno=None):
"""Return an attribute node for the current extension. This is useful
to pass constants on extensions to generated template code::
self.attr('_my_attribute', lineno=lineno)
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
def call_method(self, name, args=None, kwargs=None, dyn_args=None,
dyn_kwargs=None, lineno=None):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
if args is None:
args = []
if kwargs is None:
kwargs = []
return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
dyn_args, dyn_kwargs, lineno=lineno)
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
return __context.call(__context.resolve('gettext'), *args, **kwargs)
def _make_new_gettext(func):
@contextfunction
def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
variables.setdefault('num', __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
return rv % variables
return ngettext
class InternationalizationExtension(Extension):
"""This extension adds gettext support to Jinja2."""
tags = set(['trans'])
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
# {% trans count=something() %}{{ count }} foo{% pluralize
# %}{{ count }} fooss{% endtrans %}
# something is called twice here. One time for the gettext value and
# the other time for the n-parameter of the ngettext function.
def __init__(self, environment):
Extension.__init__(self, environment)
environment.globals['_'] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
newstyle_gettext=False
)
def _install(self, translations, newstyle=None):
gettext = getattr(translations, 'ugettext', None)
if gettext is None:
gettext = translations.gettext
ngettext = getattr(translations, 'ungettext', None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
lambda x: x,
lambda s, p, n: (n != 1 and (p,) or (s,))[0],
newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
if newstyle is not None:
self.environment.newstyle_gettext = newstyle
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
self.environment.globals.update(
gettext=gettext,
ngettext=ngettext
)
def _uninstall(self, translations):
for key in 'gettext', 'ngettext':
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
if isinstance(source, basestring):
source = self.environment.parse(source)
return extract_from_ast(source, gettext_functions)
def parse(self, parser):
"""Parse a translatable tag."""
lineno = next(parser.stream).lineno
num_called_num = False
# find all the variables referenced. Additionally a variable can be
# defined in the body of the trans block too, but this is checked at
# a later state.
plural_expr = None
variables = {}
while parser.stream.current.type != 'block_end':
if variables:
parser.stream.expect('comma')
# skip colon for python compatibility
if parser.stream.skip_if('colon'):
break
name = parser.stream.expect('name')
if name.value in variables:
parser.fail('translatable variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
# expressions
if parser.stream.current.type == 'assign':
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
else:
variables[name.value] = var = nodes.Name(name.value, 'load')
if plural_expr is None:
plural_expr = var
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural = plural_names = None
have_plural = False
referenced = set()
# now parse until endtrans or pluralize
singular_names, singular = self._parse_block(parser, True)
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
plural_expr = nodes.Name(singular_names[0], 'load')
num_called_num = singular_names[0] == 'num'
# if we have a pluralize block, we parse that too
if parser.stream.current.test('name:pluralize'):
have_plural = True
next(parser.stream)
if parser.stream.current.type != 'block_end':
name = parser.stream.expect('name')
if name.value not in variables:
parser.fail('unknown variable %r for pluralization' %
name.value, name.lineno,
exc=TemplateAssertionError)
plural_expr = variables[name.value]
num_called_num = name.value == 'num'
parser.stream.expect('block_end')
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
else:
next(parser.stream)
# register free names as simple name expressions
for var in referenced:
if var not in variables:
variables[var] = nodes.Name(var, 'load')
if not have_plural:
plural_expr = None
elif plural_expr is None:
parser.fail('pluralize without variables', lineno)
node = self._make_node(singular, plural, variables, plural_expr,
bool(referenced),
num_called_num and have_plural)
node.set_lineno(lineno)
return node
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf)
def _make_node(self, singular, plural, variables, plural_expr,
vars_referenced, num_called_num):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
singular = singular.replace('%%', '%')
if plural:
plural = plural.replace('%%', '%')
# singular only:
if plural_expr is None:
gettext = nodes.Name('gettext', 'load')
node = nodes.Call(gettext, [nodes.Const(singular)],
[], None, None)
# singular and plural
else:
ngettext = nodes.Name('ngettext', 'load')
node = nodes.Call(ngettext, [
nodes.Const(singular),
nodes.Const(plural),
plural_expr
], [], None, None)
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
# handling itself
if self.environment.newstyle_gettext:
for key, value in variables.iteritems():
# the function adds that later anyways in case num was
# called num, so just skip it.
if num_called_num and key == 'num':
continue
node.kwargs.append(nodes.Keyword(key, value))
# otherwise do that here
else:
# mark the return value as safe if we are in an
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
node = nodes.Mod(node, nodes.Dict([
nodes.Pair(nodes.Const(key), value)
for key, value in variables.items()
]))
return nodes.Output([node])
class ExprStmtExtension(Extension):
"""Adds a `do` tag to Jinja2 that works like the print statement just
that it doesn't print the return value.
"""
tags = set(['do'])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
node.node = parser.parse_tuple()
return node
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
tags = set(['break', 'continue'])
def parse(self, parser):
token = next(parser.stream)
if token.value == 'break':
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
class WithExtension(Extension):
"""Adds support for a django-like with block."""
tags = set(['with'])
def parse(self, parser):
node = nodes.Scope(lineno=next(parser.stream).lineno)
assignments = []
while parser.stream.current.type != 'block_end':
lineno = parser.stream.current.lineno
if assignments:
parser.stream.expect('comma')
target = parser.parse_assign_target()
parser.stream.expect('assign')
expr = parser.parse_expression()
assignments.append(nodes.Assign(target, expr, lineno=lineno))
node.body = assignments + \
list(parser.parse_statements(('name:endwith',),
drop_needle=True))
return node
class AutoEscapeExtension(Extension):
"""Changes auto escape rules for a scope."""
tags = set(['autoescape'])
def parse(self, parser):
node = nodes.ScopedEvalContextModifier(lineno=next(parser.stream).lineno)
node.options = [
nodes.Keyword('autoescape', parser.parse_expression())
]
node.body = parser.parse_statements(('name:endautoescape',),
drop_needle=True)
return nodes.Scope([node])
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
allows Babel to figure out what you really meant if you are using
gettext functions that allow keyword arguments for placeholder expansion.
If you don't want that behavior set the `babel_style` parameter to `False`
which causes only strings to be returned and parameters are always stored
in tuples. As a consequence invalid gettext calls (calls without a single
string parameter or string parameters after non-string parameters) are
skipped.
This example explains the behavior:
>>> from jinja2 import Environment
>>> env = Environment()
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
>>> list(extract_from_ast(node))
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
>>> list(extract_from_ast(node, babel_style=False))
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
For every string found this function yields a ``(lineno, function,
message)`` tuple, where:
* ``lineno`` is the number of the line on which the string was found,
* ``function`` is the name of the ``gettext`` function used (if the
string was extracted from embedded Python code), and
* ``message`` is the string itself (a ``unicode`` object, or a tuple
of ``unicode`` objects for functions with multiple string arguments).
This extraction function operates on the AST and is because of that unable
to extract any comments. For comment support you have to use the babel
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
if not isinstance(node.node, nodes.Name) or \
node.node.name not in gettext_functions:
continue
strings = []
for arg in node.args:
if isinstance(arg, nodes.Const) and \
isinstance(arg.value, basestring):
strings.append(arg.value)
else:
strings.append(None)
for arg in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
if node.dyn_kwargs is not None:
strings.append(None)
if not babel_style:
strings = tuple(x for x in strings if x is not None)
if not strings:
continue
else:
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.lineno, node.node.name, strings
class _CommentFinder(object):
"""Helper class to find comments in a token stream. Can only
find comments for gettext calls forwards. Once the comment
from line 4 is found, a comment for line 1 will not return a
usable value.
"""
def __init__(self, tokens, comment_tags):
self.tokens = tokens
self.comment_tags = comment_tags
self.offset = 0
self.last_lineno = 0
def find_backwards(self, offset):
try:
for _, token_type, token_value in \
reversed(self.tokens[self.offset:offset]):
if token_type in ('comment', 'linecomment'):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
continue
if prefix in self.comment_tags:
return [comment.rstrip()]
return []
finally:
self.offset = offset
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
def babel_extract(fileobj, keywords, comment_tags, options):
"""Babel extraction method for Jinja templates.
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
try to find the best preceeding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
.. versionchanged:: 2.5.1
The `newstyle_gettext` flag can be set to `True` to enable newstyle
gettext calls.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results.
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
(comments will be empty currently)
"""
extensions = set()
for extension in options.get('extensions', '').split(','):
extension = extension.strip()
if not extension:
continue
extensions.add(import_string(extension))
if InternationalizationExtension not in extensions:
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
options.get(key, str(default)).lower() in ('1', 'on', 'yes', 'true')
environment = Environment(
options.get('block_start_string', BLOCK_START_STRING),
options.get('block_end_string', BLOCK_END_STRING),
options.get('variable_start_string', VARIABLE_START_STRING),
options.get('variable_end_string', VARIABLE_END_STRING),
options.get('comment_start_string', COMMENT_START_STRING),
options.get('comment_end_string', COMMENT_END_STRING),
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
getbool(options, 'trim_blocks', TRIM_BLOCKS),
NEWLINE_SEQUENCE, frozenset(extensions),
cache_size=0,
auto_reload=False
)
if getbool(options, 'newstyle_gettext'):
environment.newstyle_gettext = True
source = fileobj.read().decode(options.get('encoding', 'utf-8'))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
except TemplateSyntaxError, e:
# skip templates with syntax errors
return
finder = _CommentFinder(tokens, comment_tags)
for lineno, func, message in extract_from_ast(node, keywords):
yield lineno, func, message, finder.find_comments(lineno)
#: nicer import names
i18n = InternationalizationExtension
do = ExprStmtExtension
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
|
gpl-3.0
|
zackmdavis/swift
|
swift/account/reaper.py
|
2
|
22611
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
from swift import gettext_ as _
from logging import DEBUG
from math import sqrt
from time import time
import itertools
from eventlet import GreenPool, sleep, Timeout
import swift.common.db
from swift.account.backend import AccountBroker, DATADIR
from swift.common.direct_client import direct_delete_container, \
direct_delete_object, direct_get_container
from swift.common.exceptions import ClientException
from swift.common.ring import Ring
from swift.common.ring.utils import is_local_device
from swift.common.utils import get_logger, whataremyips, ismount, \
config_true_value, Timestamp
from swift.common.daemon import Daemon
from swift.common.storage_policy import POLICIES, PolicyError
class AccountReaper(Daemon):
"""
Removes data from status=DELETED accounts. These are accounts that have
been asked to be removed by the reseller via services
remove_storage_account XMLRPC call.
The account is not deleted immediately by the services call, but instead
the account is simply marked for deletion by setting the status column in
the account_stat table of the account database. This account reaper scans
for such accounts and removes the data in the background. The background
deletion process will occur on the primary account server for the account.
:param server_conf: The [account-server] dictionary of the account server
configuration file
:param reaper_conf: The [account-reaper] dictionary of the account server
configuration file
See the etc/account-server.conf-sample for information on the possible
configuration parameters.
"""
def __init__(self, conf, logger=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='account-reaper')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.interval = int(conf.get('interval', 3600))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.account_ring = None
self.container_ring = None
self.object_ring = None
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.myips = whataremyips(conf.get('bind_ip', '0.0.0.0'))
self.concurrency = int(conf.get('concurrency', 25))
self.container_concurrency = self.object_concurrency = \
sqrt(self.concurrency)
self.container_pool = GreenPool(size=self.container_concurrency)
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
self.delay_reaping = int(conf.get('delay_reaping') or 0)
reap_warn_after = float(conf.get('reap_warn_after') or 86400 * 30)
self.reap_not_done_after = reap_warn_after + self.delay_reaping
def get_account_ring(self):
"""The account :class:`swift.common.ring.Ring` for the cluster."""
if not self.account_ring:
self.account_ring = Ring(self.swift_dir, ring_name='account')
return self.account_ring
def get_container_ring(self):
"""The container :class:`swift.common.ring.Ring` for the cluster."""
if not self.container_ring:
self.container_ring = Ring(self.swift_dir, ring_name='container')
return self.container_ring
def get_object_ring(self, policy_idx):
"""
Get the ring identified by the policy index
:param policy_idx: Storage policy index
:returns: A ring matching the storage policy
"""
return POLICIES.get_object_ring(policy_idx, self.swift_dir)
def run_forever(self, *args, **kwargs):
"""Main entry point when running the reaper in normal daemon mode.
This repeatedly calls :func:`reap_once` no quicker than the
configuration interval.
"""
self.logger.debug('Daemon started.')
sleep(random.random() * self.interval)
while True:
begin = time()
self.run_once()
elapsed = time() - begin
if elapsed < self.interval:
sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""
Main entry point when running the reaper in 'once' mode, where it will
do a single pass over all accounts on the server. This is called
repeatedly by :func:`run_forever`. This will call :func:`reap_device`
once for each device on the server.
"""
self.logger.debug('Begin devices pass: %s', self.devices)
begin = time()
try:
for device in os.listdir(self.devices):
if self.mount_check and not ismount(
os.path.join(self.devices, device)):
self.logger.increment('errors')
self.logger.debug(
_('Skipping %s as it is not mounted'), device)
continue
self.reap_device(device)
except (Exception, Timeout):
self.logger.exception(_("Exception in top-level account reaper "
"loop"))
elapsed = time() - begin
self.logger.info(_('Devices pass completed: %.02fs'), elapsed)
def reap_device(self, device):
"""
Called once per pass for each device on the server. This will scan the
accounts directory for the device, looking for partitions this device
is the primary for, then looking for account databases that are marked
status=DELETED and still have containers and calling
:func:`reap_account`. Account databases marked status=DELETED that no
longer have containers will eventually be permanently removed by the
reclaim process within the account replicator (see
:mod:`swift.db_replicator`).
:param device: The device to look for accounts to be deleted.
"""
datadir = os.path.join(self.devices, device, DATADIR)
if not os.path.exists(datadir):
return
for partition in os.listdir(datadir):
partition_path = os.path.join(datadir, partition)
if not partition.isdigit():
continue
nodes = self.get_account_ring().get_part_nodes(int(partition))
if (not is_local_device(self.myips, None, nodes[0]['ip'], None)
or not os.path.isdir(partition_path)):
continue
for suffix in os.listdir(partition_path):
suffix_path = os.path.join(partition_path, suffix)
if not os.path.isdir(suffix_path):
continue
for hsh in os.listdir(suffix_path):
hsh_path = os.path.join(suffix_path, hsh)
if not os.path.isdir(hsh_path):
continue
for fname in sorted(os.listdir(hsh_path), reverse=True):
if fname.endswith('.ts'):
break
elif fname.endswith('.db'):
self.start_time = time()
broker = \
AccountBroker(os.path.join(hsh_path, fname))
if broker.is_status_deleted() and \
not broker.empty():
self.reap_account(broker, partition, nodes)
def reset_stats(self):
self.stats_return_codes = {}
self.stats_containers_deleted = 0
self.stats_objects_deleted = 0
self.stats_containers_remaining = 0
self.stats_objects_remaining = 0
self.stats_containers_possibly_remaining = 0
self.stats_objects_possibly_remaining = 0
def reap_account(self, broker, partition, nodes):
"""
Called once per pass for each account this server is the primary for
and attempts to delete the data for the given account. The reaper will
only delete one account at any given time. It will call
:func:`reap_container` up to sqrt(self.concurrency) times concurrently
while reaping the account.
If there is any exception while deleting a single container, the
process will continue for any other containers and the failed
containers will be tried again the next time this function is called
with the same parameters.
If there is any exception while listing the containers for deletion,
the process will stop (but will obviously be tried again the next time
this function is called with the same parameters). This isn't likely
since the listing comes from the local database.
After the process completes (successfully or not) statistics about what
was accomplished will be logged.
This function returns nothing and should raise no exception but only
update various self.stats_* values for what occurs.
:param broker: The AccountBroker for the account to delete.
:param partition: The partition in the account ring the account is on.
:param nodes: The primary node dicts for the account to delete.
.. seealso::
:class:`swift.account.backend.AccountBroker` for the broker class.
.. seealso::
:func:`swift.common.ring.Ring.get_nodes` for a description
of the node dicts.
"""
begin = time()
info = broker.get_info()
if time() - float(Timestamp(info['delete_timestamp'])) <= \
self.delay_reaping:
return False
account = info['account']
self.logger.info(_('Beginning pass on account %s'), account)
self.reset_stats()
try:
marker = ''
while True:
containers = \
list(broker.list_containers_iter(1000, marker, None, None,
None))
if not containers:
break
try:
for (container, _junk, _junk, _junk) in containers:
self.container_pool.spawn(self.reap_container, account,
partition, nodes, container)
self.container_pool.waitall()
except (Exception, Timeout):
self.logger.exception(
_('Exception with containers for account %s'), account)
marker = containers[-1][0]
if marker == '':
break
log = 'Completed pass on account %s' % account
except (Exception, Timeout):
self.logger.exception(
_('Exception with account %s'), account)
log = _('Incomplete pass on account %s') % account
if self.stats_containers_deleted:
log += _(', %s containers deleted') % self.stats_containers_deleted
if self.stats_objects_deleted:
log += _(', %s objects deleted') % self.stats_objects_deleted
if self.stats_containers_remaining:
log += _(', %s containers remaining') % \
self.stats_containers_remaining
if self.stats_objects_remaining:
log += _(', %s objects remaining') % self.stats_objects_remaining
if self.stats_containers_possibly_remaining:
log += _(', %s containers possibly remaining') % \
self.stats_containers_possibly_remaining
if self.stats_objects_possibly_remaining:
log += _(', %s objects possibly remaining') % \
self.stats_objects_possibly_remaining
if self.stats_return_codes:
log += _(', return codes: ')
for code in sorted(self.stats_return_codes):
log += '%s %sxxs, ' % (self.stats_return_codes[code], code)
log = log[:-2]
log += _(', elapsed: %.02fs') % (time() - begin)
self.logger.info(log)
self.logger.timing_since('timing', self.start_time)
delete_timestamp = Timestamp(info['delete_timestamp'])
if self.stats_containers_remaining and \
begin - float(delete_timestamp) >= self.reap_not_done_after:
self.logger.warn(_('Account %s has not been reaped since %s') %
(account, delete_timestamp.isoformat))
return True
def reap_container(self, account, account_partition, account_nodes,
container):
"""
Deletes the data and the container itself for the given container. This
will call :func:`reap_object` up to sqrt(self.concurrency) times
concurrently for the objects in the container.
If there is any exception while deleting a single object, the process
will continue for any other objects in the container and the failed
objects will be tried again the next time this function is called with
the same parameters.
If there is any exception while listing the objects for deletion, the
process will stop (but will obviously be tried again the next time this
function is called with the same parameters). This is a possibility
since the listing comes from querying just the primary remote container
server.
Once all objects have been attempted to be deleted, the container
itself will be attempted to be deleted by sending a delete request to
all container nodes. The format of the delete request is such that each
container server will update a corresponding account server, removing
the container from the account's listing.
This function returns nothing and should raise no exception but only
update various self.stats_* values for what occurs.
:param account: The name of the account for the container.
:param account_partition: The partition for the account on the account
ring.
:param account_nodes: The primary node dicts for the account.
:param container: The name of the container to delete.
* See also: :func:`swift.common.ring.Ring.get_nodes` for a description
of the account node dicts.
"""
account_nodes = list(account_nodes)
part, nodes = self.get_container_ring().get_nodes(account, container)
node = nodes[-1]
pool = GreenPool(size=self.object_concurrency)
marker = ''
while True:
objects = None
try:
headers, objects = direct_get_container(
node, part, account, container,
marker=marker,
conn_timeout=self.conn_timeout,
response_timeout=self.node_timeout)
self.stats_return_codes[2] = \
self.stats_return_codes.get(2, 0) + 1
self.logger.increment('return_codes.2')
except ClientException as err:
if self.logger.getEffectiveLevel() <= DEBUG:
self.logger.exception(
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
self.stats_return_codes[err.http_status / 100] = \
self.stats_return_codes.get(err.http_status / 100, 0) + 1
self.logger.increment(
'return_codes.%d' % (err.http_status / 100,))
if not objects:
break
try:
policy_index = headers.get('X-Backend-Storage-Policy-Index', 0)
policy = POLICIES.get_by_index(policy_index)
if not policy:
self.logger.error('ERROR: invalid storage policy index: %r'
% policy_index)
for obj in objects:
if isinstance(obj['name'], unicode):
obj['name'] = obj['name'].encode('utf8')
pool.spawn(self.reap_object, account, container, part,
nodes, obj['name'], policy_index)
pool.waitall()
except (Exception, Timeout):
self.logger.exception(_('Exception with objects for container '
'%(container)s for account %(account)s'
),
{'container': container,
'account': account})
marker = objects[-1]['name']
if marker == '':
break
successes = 0
failures = 0
timestamp = Timestamp(time())
for node in nodes:
anode = account_nodes.pop()
try:
direct_delete_container(
node, part, account, container,
conn_timeout=self.conn_timeout,
response_timeout=self.node_timeout,
headers={'X-Account-Host': '%(ip)s:%(port)s' % anode,
'X-Account-Partition': str(account_partition),
'X-Account-Device': anode['device'],
'X-Account-Override-Deleted': 'yes',
'X-Timestamp': timestamp.internal})
successes += 1
self.stats_return_codes[2] = \
self.stats_return_codes.get(2, 0) + 1
self.logger.increment('return_codes.2')
except ClientException as err:
if self.logger.getEffectiveLevel() <= DEBUG:
self.logger.exception(
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
failures += 1
self.logger.increment('containers_failures')
self.stats_return_codes[err.http_status / 100] = \
self.stats_return_codes.get(err.http_status / 100, 0) + 1
self.logger.increment(
'return_codes.%d' % (err.http_status / 100,))
if successes > failures:
self.stats_containers_deleted += 1
self.logger.increment('containers_deleted')
elif not successes:
self.stats_containers_remaining += 1
self.logger.increment('containers_remaining')
else:
self.stats_containers_possibly_remaining += 1
self.logger.increment('containers_possibly_remaining')
def reap_object(self, account, container, container_partition,
container_nodes, obj, policy_index):
"""
Deletes the given object by issuing a delete request to each node for
the object. The format of the delete request is such that each object
server will update a corresponding container server, removing the
object from the container's listing.
This function returns nothing and should raise no exception but only
update various self.stats_* values for what occurs.
:param account: The name of the account for the object.
:param container: The name of the container for the object.
:param container_partition: The partition for the container on the
container ring.
:param container_nodes: The primary node dicts for the container.
:param obj: The name of the object to delete.
:param policy_index: The storage policy index of the object's container
* See also: :func:`swift.common.ring.Ring.get_nodes` for a description
of the container node dicts.
"""
cnodes = itertools.cycle(container_nodes)
try:
ring = self.get_object_ring(policy_index)
except PolicyError:
self.stats_objects_remaining += 1
self.logger.increment('objects_remaining')
return
part, nodes = ring.get_nodes(account, container, obj)
successes = 0
failures = 0
timestamp = Timestamp(time())
for node in nodes:
cnode = next(cnodes)
try:
direct_delete_object(
node, part, account, container, obj,
conn_timeout=self.conn_timeout,
response_timeout=self.node_timeout,
headers={'X-Container-Host': '%(ip)s:%(port)s' % cnode,
'X-Container-Partition': str(container_partition),
'X-Container-Device': cnode['device'],
'X-Backend-Storage-Policy-Index': policy_index,
'X-Timestamp': timestamp.internal})
successes += 1
self.stats_return_codes[2] = \
self.stats_return_codes.get(2, 0) + 1
self.logger.increment('return_codes.2')
except ClientException as err:
if self.logger.getEffectiveLevel() <= DEBUG:
self.logger.exception(
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
failures += 1
self.logger.increment('objects_failures')
self.stats_return_codes[err.http_status / 100] = \
self.stats_return_codes.get(err.http_status / 100, 0) + 1
self.logger.increment(
'return_codes.%d' % (err.http_status / 100,))
if successes > failures:
self.stats_objects_deleted += 1
self.logger.increment('objects_deleted')
elif not successes:
self.stats_objects_remaining += 1
self.logger.increment('objects_remaining')
else:
self.stats_objects_possibly_remaining += 1
self.logger.increment('objects_possibly_remaining')
|
apache-2.0
|
abridgett/ansible
|
lib/ansible/runner/action_plugins/set_fact.py
|
28
|
1288
|
# Copyright 2013 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import utils
from ansible.runner.return_data import ReturnData
class ActionModule(object):
TRANSFERS_FILES = False
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
''' handler for running operations on master '''
# load up options
options = {}
if complex_args:
options.update(complex_args)
options.update(utils.parse_kv(module_args))
return ReturnData(conn=conn, result=dict(ansible_facts=options))
|
gpl-3.0
|
seemoo-lab/nexmon
|
utilities/wireshark/tools/dftestlib/time_relative.py
|
40
|
1244
|
# Copyright (c) 2013 by Gilbert Ramirez <gram@alumni.rice.edu>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from dftestlib import dftest
class testTimeRelative(dftest.DFTest):
trace_file = "nfs.pcap"
def test_relative_time_1(self):
dfilter = "frame.time_delta == 0.7"
self.assertDFilterCount(dfilter, 1)
def test_relative_time_2(self):
dfilter = "frame.time_delta > 0.7"
self.assertDFilterCount(dfilter, 0)
def test_relative_time_3(self):
dfilter = "frame.time_delta < 0.7"
self.assertDFilterCount(dfilter, 1)
|
gpl-3.0
|
ptchankue/youtube-dl
|
youtube_dl/extractor/playvid.py
|
115
|
2864
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
clean_html,
ExtractorError,
)
class PlayvidIE(InfoExtractor):
_VALID_URL = r'https?://www\.playvid\.com/watch(\?v=|/)(?P<id>.+?)(?:#|$)'
_TEST = {
'url': 'http://www.playvid.com/watch/RnmBNgtrrJu',
'md5': 'ffa2f6b2119af359f544388d8c01eb6c',
'info_dict': {
'id': 'RnmBNgtrrJu',
'ext': 'mp4',
'title': 'md5:9256d01c6317e3f703848b5906880dc8',
'duration': 82,
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m_error = re.search(
r'<div class="block-error">\s*<div class="heading">\s*<div>(?P<msg>.+?)</div>\s*</div>', webpage)
if m_error:
raise ExtractorError(clean_html(m_error.group('msg')), expected=True)
video_title = None
duration = None
video_thumbnail = None
formats = []
# most of the information is stored in the flashvars
flashvars = self._html_search_regex(
r'flashvars="(.+?)"', webpage, 'flashvars')
infos = compat_urllib_parse_unquote(flashvars).split(r'&')
for info in infos:
videovars_match = re.match(r'^video_vars\[(.+?)\]=(.+?)$', info)
if videovars_match:
key = videovars_match.group(1)
val = videovars_match.group(2)
if key == 'title':
video_title = compat_urllib_parse_unquote_plus(val)
if key == 'duration':
try:
duration = int(val)
except ValueError:
pass
if key == 'big_thumb':
video_thumbnail = val
videourl_match = re.match(
r'^video_urls\]\[(?P<resolution>[0-9]+)p', key)
if videourl_match:
height = int(videourl_match.group('resolution'))
formats.append({
'height': height,
'url': val,
})
self._sort_formats(formats)
# Extract title - should be in the flashvars; if not, look elsewhere
if video_title is None:
video_title = self._html_search_regex(
r'<title>(.*?)</title', webpage, 'title')
return {
'id': video_id,
'formats': formats,
'title': video_title,
'thumbnail': video_thumbnail,
'duration': duration,
'description': None,
'age_limit': 18
}
|
unlicense
|
spezi77/android_external_skia
|
platform_tools/android/bin/adb_list_devices.py
|
153
|
5077
|
#!/usr/bin/python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" adb_list_devices: list information about attached Android devices. """
import os
import re
import shlex
import subprocess
import sys
# This file, which resides on every Android device, contains a great deal of
# information about the device.
INFO_FILE = '/system/build.prop'
# Default set of properties to query about a device.
DEFAULT_PROPS_TO_GET = ['ro.product.device', 'ro.build.version.release',
'ro.build.type']
def GetDeviceInfo(adb, serial, props_to_get):
""" Return a list of values (or "<Unknown>" if no value can be found) for the
given set of properties for the device with the given serial number.
adb: path to the ADB program.
serial: serial number of the target device.
props_to_get: list of strings indicating which properties to determine.
"""
device_proc = subprocess.Popen([adb, '-s', serial, 'shell', 'cat',
INFO_FILE], stdout=subprocess.PIPE)
code = device_proc.wait()
if code != 0:
raise Exception('Could not query device with serial number %s.' % serial)
output = device_proc.stdout.read()
device_info = []
for prop in props_to_get:
# Find the property in the outputs
search_str = r'%s=(\S+)' % prop
match = re.search(search_str, output)
if not match:
value = '<Unknown>'
else:
value = match.group(1)
device_info.append(value)
return device_info
def PrintPrettyTable(data, file=None):
""" Print out the given data in a nicely-spaced format. This function scans
the list multiple times and uses extra memory, so don't use it for big data
sets.
data: list of lists of strings, where each list represents a row of data.
This table is assumed to be rectangular; if the length of any list differs
some of the output may not get printed.
file: file-like object into which the table should be written. If none is
provided, the table is written to stdout.
"""
if not file:
file = sys.stdout
column_widths = [0 for length in data[0]]
for line in data:
column_widths = [max(longest_len, len(prop)) for \
longest_len, prop in zip(column_widths, line)]
for line in data:
for prop, width in zip(line, column_widths):
file.write(prop.ljust(width + 1))
file.write('\n')
def FindADB(hint=None):
""" Attempt to find the ADB program using the following sequence of steps.
Returns the path to ADB if it can be found, or None otherwise.
1. If a hint was provided, is it a valid path to ADB?
2. Is ADB in PATH?
3. Is there an environment variable for ADB?
4. If the ANDROID_SDK_ROOT variable is set, try to find ADB in the SDK
directory.
hint: string indicating a possible path to ADB.
"""
# 1. If a hint was provided, does it point to ADB?
if hint:
if os.path.basename(hint) == 'adb':
adb = hint
else:
adb = os.path.join(hint, 'adb')
if subprocess.Popen([adb, 'version'], stdout=subprocess.PIPE).wait() == 0:
return adb
# 2. Is 'adb' in our PATH?
adb = 'adb'
if subprocess.Popen([adb, 'version'], stdout=subprocess.PIPE).wait() == 0:
return adb
# 3. Is there an environment variable for ADB?
try:
adb = os.environ.get('ADB')
if subprocess.Popen([adb, 'version'], stdout=subprocess.PIPE).wait() == 0:
return adb
except:
pass
# 4. If ANDROID_SDK_ROOT is set, try to find ADB in the SDK directory.
try:
sdk_dir = os.environ.get('ANDROID_SDK_ROOT')
adb = os.path.join(sdk_dir, 'platform-tools', 'adb')
if subprocess.Popen([adb, 'version'], stdout=subprocess.PIPE).wait() == 0:
return adb
except:
pass
return None
def main(argv):
""" Print out information about connected Android devices. By default, print
the serial number, status, device name, OS version, and build type of each
device. If any arguments are supplied on the command line, print the serial
number and status for each device along with values for those arguments
interpreted as properties.
"""
if len(argv) > 1:
props_to_get = argv[1:]
else:
props_to_get = DEFAULT_PROPS_TO_GET
adb = FindADB()
if not adb:
raise Exception('Could not find ADB!')
proc = subprocess.Popen([adb, 'devices'], stdout=subprocess.PIPE)
code = proc.wait()
if code != 0:
raise Exception('Failure in ADB: could not find attached devices.')
header = ['Serial', 'Status']
header.extend(props_to_get)
output_lines = [header]
for line in proc.stdout:
line = line.rstrip()
if line != 'List of devices attached' and line != '':
line_list = shlex.split(line)
serial = line_list[0]
status = line_list[1]
device_info = [serial, status]
device_info.extend(GetDeviceInfo(adb, serial, props_to_get))
output_lines.append(device_info)
PrintPrettyTable(output_lines)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
bsd-3-clause
|
greggian/TapdIn
|
django/contrib/sessions/backends/cache.py
|
13
|
1937
|
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.cache import cache
class SessionStore(SessionBase):
"""
A cache-based session store.
"""
def __init__(self, session_key=None):
self._cache = cache
super(SessionStore, self).__init__(session_key)
def load(self):
session_data = self._cache.get(self.session_key)
if session_data is not None:
return session_data
self.create()
return {}
def create(self):
# Because a cache can fail silently (e.g. memcache), we don't know if
# we are failing to create a new session because of a key collision or
# because the cache is missing. So we try for a (large) number of times
# and then raise an exception. That's the risk you shoulder if using
# cache backing.
for i in xrange(10000):
self.session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError("Unable to create a new session key.")
def save(self, must_create=False):
if must_create:
func = self._cache.add
else:
func = self._cache.set
result = func(self.session_key, self._get_session(no_load=must_create),
self.get_expiry_age())
if must_create and not result:
raise CreateError
def exists(self, session_key):
if self._cache.has_key(session_key):
return True
return False
def delete(self, session_key=None):
if session_key is None:
if self._session_key is None:
return
session_key = self._session_key
self._cache.delete(session_key)
|
apache-2.0
|
ottermegazord/ottermegazord.github.io
|
onexi/data_processing/s05_genPlots.py
|
1
|
1460
|
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import pdb
import sys
plt.style.use("ggplot")
os.chdir("..")
ipath = "./Data/Final_Data/"
ifile = "Final_Data"
opath = "./Data/Final_Data/Neighborhoods/"
imgpath = "./Plots/Neighborhood_TS/"
ext = ".csv"
input_var = raw_input("Run mode (analysis/plot): ")
if input_var == "analysis":
df = pd.read_csv(ipath + ifile + ext, low_memory=False)
df2 = df.groupby(["TIME", "NEIGHBORHOOD"]).mean().unstack()
time = df["TIME"].unique().tolist()
nhood = df["NEIGHBORHOOD"].unique().tolist()
nhood = [x for x in nhood if str(x) != 'nan']
for n in nhood:
mean = []
for t in time:
mean.append(df2.loc[t, ("AV_PER_SQFT", n)])
out_df = pd.DataFrame({'TIME': time, 'MEAN_AV_PER_SQFT': mean})
out_df.to_csv(opath + n + ext, index=False)
elif input_var == "plot":
def makePlot(x, y, xlabel, ylabel, title, filename):
x_pos = [i for i, _ in enumerate(x)]
plt.bar(x_pos, y, color='green')
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.title(title)
plt.xticks(x_pos, x, fontsize=8)
plt.savefig(filename, bbox_inches="tight", dpi=300)
plt.close()
nhood_files = os.listdir(opath)
for f in nhood_files:
nhood = f[:-4]
df = pd.read_csv(opath + f, low_memory=False)
makePlot(x=df["TIME"].tolist(), y=df["MEAN_AV_PER_SQFT"].tolist(), ylabel="AVG LAND VALUE ($/sqft)", xlabel="TIME (year)", title=nhood, filename=imgpath + nhood +".png")
|
mit
|
sean93park/mozjs24
|
js/src/python/mozbuild/mozbuild/frontend/sandbox.py
|
3
|
13484
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
r"""Python sandbox implementation for build files.
This module contains classes for Python sandboxes that execute in a
highly-controlled environment.
The main class is `Sandbox`. This provides an execution environment for Python
code.
The behavior inside sandboxes is mostly regulated by the `GlobalNamespace` and
`LocalNamespace` classes. These represent the global and local namespaces in
the sandbox, respectively.
Code in this module takes a different approach to exception handling compared
to what you'd see elsewhere in Python. Arguments to built-in exceptions like
KeyError are machine parseable. This machine-friendly data is used to present
user-friendly error messages in the case of errors.
"""
from __future__ import unicode_literals
import copy
import os
import sys
from contextlib import contextmanager
from mozbuild.util import (
ReadOnlyDefaultDict,
ReadOnlyDict,
)
class GlobalNamespace(dict):
"""Represents the globals namespace in a sandbox.
This is a highly specialized dictionary employing light magic.
At the crux we have the concept of a restricted keys set. Only very
specific keys may be retrieved or mutated. The rules are as follows:
- The '__builtins__' key is hardcoded and is read-only.
- The set of variables that can be assigned or accessed during
execution is passed into the constructor.
When variables are assigned to, we verify assignment is allowed. Assignment
is allowed if the variable is known (set defined at constructor time) and
if the value being assigned is the expected type (also defined at
constructor time).
When variables are read, we first try to read the existing value. If a
value is not found and it is defined in the allowed variables set, we
return the default value for it. We don't assign default values until
they are accessed because this makes debugging the end-result much
simpler. Instead of a data structure with lots of empty/default values,
you have a data structure with only the values that were read or touched.
Instantiators of this class are given a backdoor to perform setting of
arbitrary values. e.g.
ns = GlobalNamespace()
with ns.allow_all_writes():
ns['foo'] = True
ns['bar'] = True # KeyError raised.
"""
# The default set of builtins.
BUILTINS = ReadOnlyDict({
# Only real Python built-ins should go here.
'None': None,
'False': False,
'True': True,
})
def __init__(self, allowed_variables=None, builtins=None):
"""Create a new global namespace having specific variables.
allowed_variables is a dict of the variables that can be queried and
mutated. Keys in this dict are the strings representing keys in this
namespace which are valid. Values are tuples of stored type, assigned
type, default value, and a docstring describing the purpose of the variable.
builtins is the value to use for the special __builtins__ key. If not
defined, the BUILTINS constant attached to this class is used. The
__builtins__ object is read-only.
"""
builtins = builtins or self.BUILTINS
assert isinstance(builtins, ReadOnlyDict)
dict.__init__(self, {'__builtins__': builtins})
self._allowed_variables = allowed_variables or {}
# We need to record this because it gets swallowed as part of
# evaluation.
self.last_name_error = None
self._allow_all_writes = False
def __getitem__(self, name):
try:
return dict.__getitem__(self, name)
except KeyError:
pass
# The variable isn't present yet. Fall back to VARIABLES.
default = self._allowed_variables.get(name, None)
if default is None:
self.last_name_error = KeyError('global_ns', 'get_unknown', name)
raise self.last_name_error
dict.__setitem__(self, name, copy.deepcopy(default[2]))
return dict.__getitem__(self, name)
def __setitem__(self, name, value):
if self._allow_all_writes:
dict.__setitem__(self, name, value)
return
# We don't need to check for name.isupper() here because LocalNamespace
# only sends variables our way if isupper() is True.
stored_type, input_type, default, docs = \
self._allowed_variables.get(name, (None, None, None, None))
# Variable is unknown.
if stored_type is None:
self.last_name_error = KeyError('global_ns', 'set_unknown', name,
value)
raise self.last_name_error
# If the incoming value is not the type we store, we try to convert
# it to that type. This relies on proper coercion rules existing. This
# is the responsibility of whoever defined the symbols: a type should
# not be in the allowed set if the constructor function for the stored
# type does not accept an instance of that type.
if not isinstance(value, stored_type):
if not isinstance(value, input_type):
self.last_name_error = ValueError('global_ns', 'set_type', name,
value, input_type)
raise self.last_name_error
value = stored_type(value)
dict.__setitem__(self, name, value)
@contextmanager
def allow_all_writes(self):
"""Allow any variable to be written to this instance.
This is used as a context manager. When activated, all writes
(__setitem__ calls) are allowed. When the context manager is exited,
the instance goes back to its default behavior of only allowing
whitelisted mutations.
"""
self._allow_all_writes = True
yield self
self._allow_all_writes = False
class LocalNamespace(dict):
"""Represents the locals namespace in a Sandbox.
This behaves like a dict except with some additional behavior tailored
to our sandbox execution model.
Under normal rules of exec(), doing things like += could have interesting
consequences. Keep in mind that a += is really a read, followed by the
creation of a new variable, followed by a write. If the read came from the
global namespace, then the write would go to the local namespace, resulting
in fragmentation. This is not desired.
LocalNamespace proxies reads and writes for global-looking variables
(read: UPPERCASE) to the global namespace. This means that attempting to
read or write an unknown variable results in exceptions raised from the
GlobalNamespace.
"""
def __init__(self, global_ns):
"""Create a local namespace associated with a GlobalNamespace."""
dict.__init__({})
self._globals = global_ns
self.last_name_error = None
def __getitem__(self, name):
if name.isupper():
return self._globals[name]
return dict.__getitem__(self, name)
def __setitem__(self, name, value):
if name.isupper():
self._globals[name] = value
return
dict.__setitem__(self, name, value)
class SandboxError(Exception):
def __init__(self, file_stack):
self.file_stack = file_stack
class SandboxExecutionError(SandboxError):
"""Represents errors encountered during execution of a Sandbox.
This is a simple container exception. It's purpose is to capture state
so something else can report on it.
"""
def __init__(self, file_stack, exc_type, exc_value, trace):
SandboxError.__init__(self, file_stack)
self.exc_type = exc_type
self.exc_value = exc_value
self.trace = trace
class SandboxLoadError(SandboxError):
"""Represents errors encountered when loading a file for execution.
This exception represents errors in a Sandbox that occurred as part of
loading a file. The error could have occurred in the course of executing
a file. If so, the file_stack will be non-empty and the file that caused
the load will be on top of the stack.
"""
def __init__(self, file_stack, trace, illegal_path=None, read_error=None):
SandboxError.__init__(self, file_stack)
self.trace = trace
self.illegal_path = illegal_path
self.read_error = read_error
class Sandbox(object):
"""Represents a sandbox for executing Python code.
This class both provides a sandbox for execution of a single mozbuild
frontend file as well as an interface to the results of that execution.
Sandbox is effectively a glorified wrapper around compile() + exec(). You
point it at some Python code and it executes it. The main difference from
executing Python code like normal is that the executed code is very limited
in what it can do: the sandbox only exposes a very limited set of Python
functionality. Only specific types and functions are available. This
prevents executed code from doing things like import modules, open files,
etc.
Sandboxes are bound to a mozconfig instance. These objects are produced by
the output of configure.
Sandbox instances can be accessed like dictionaries to facilitate result
retrieval. e.g. foo = sandbox['FOO']. Direct assignment is not allowed.
Each sandbox has associated with it a GlobalNamespace and LocalNamespace.
Only data stored in the GlobalNamespace is retrievable via the dict
interface. This is because the local namespace should be irrelevant: it
should only contain throwaway variables.
"""
def __init__(self, allowed_variables=None, builtins=None):
"""Initialize a Sandbox ready for execution.
The arguments are proxied to GlobalNamespace.__init__.
"""
self._globals = GlobalNamespace(allowed_variables=allowed_variables,
builtins=builtins)
self._locals = LocalNamespace(self._globals)
self._execution_stack = []
self.main_path = None
self.all_paths = set()
def exec_file(self, path):
"""Execute code at a path in the sandbox.
The path must be absolute.
"""
assert os.path.isabs(path)
source = None
try:
with open(path, 'rt') as fd:
source = fd.read()
except Exception as e:
raise SandboxLoadError(list(self._execution_stack),
sys.exc_info()[2], read_error=path)
self.exec_source(source, path)
def exec_source(self, source, path):
"""Execute Python code within a string.
The passed string should contain Python code to be executed. The string
will be compiled and executed.
You should almost always go through exec_file() because exec_source()
does not perform extra path normalization. This can cause relative
paths to behave weirdly.
"""
self._execution_stack.append(path)
if self.main_path is None:
self.main_path = path
self.all_paths.add(path)
# We don't have to worry about bytecode generation here because we are
# too low-level for that. However, we could add bytecode generation via
# the marshall module if parsing performance were ever an issue.
try:
# compile() inherits the __future__ from the module by default. We
# do want Unicode literals.
code = compile(source, path, 'exec')
exec(code, self._globals, self._locals)
except SandboxError as e:
raise e
except NameError as e:
# A NameError is raised when a local or global could not be found.
# The original KeyError has been dropped by the interpreter.
# However, we should have it cached in our namespace instances!
# Unless a script is doing something wonky like catching NameError
# itself (that would be silly), if there is an exception on the
# global namespace, that's our error.
actual = e
if self._globals.last_name_error is not None:
actual = self._globals.last_name_error
elif self._locals.last_name_error is not None:
actual = self._locals.last_name_error
raise SandboxExecutionError(list(self._execution_stack),
type(actual), actual, sys.exc_info()[2])
except Exception as e:
# Need to copy the stack otherwise we get a reference and that is
# mutated during the finally.
exc = sys.exc_info()
raise SandboxExecutionError(list(self._execution_stack), exc[0],
exc[1], exc[2])
finally:
self._execution_stack.pop()
# Dict interface proxies reads to global namespace.
def __len__(self):
return len(self._globals)
def __getitem__(self, name):
return self._globals[name]
def __iter__(self):
return iter(self._globals)
def iterkeys(self):
return self.__iter__()
def __contains__(self, key):
return key in self._globals
def get(self, key, default=None):
return self._globals.get(key, default)
|
mpl-2.0
|
chen0031/nupic
|
tests/swarming/nupic/swarming/experiments/legacy_cla_multistep/permutations.py
|
38
|
4800
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by ExpGenerator to generate the actual
permutations.py file by replacing $XXXXXXXX tokens with desired values.
This permutations.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupicengine/frameworks/opf/expGenerator/ExpGenerator.pyc'
"""
import os
from nupic.swarming.permutationhelpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'consumption'
permutations = {
'aggregationInfo': { 'days': 0,
'fields': [ (u'timestamp', 'first'),
(u'gym', 'first'),
(u'consumption', 'sum')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'modelParams': {
'inferenceType': PermuteChoices(['NontemporalMultiStep', 'TemporalMultiStep']),
'sensorParams': {
'encoders': {
u'timestamp_timeOfDay': PermuteEncoder(
fieldName='timestamp',
encoderClass='DateEncoder.timeOfDay',
w=21,
radius=PermuteFloat(0.5, 12)),
u'timestamp_dayOfWeek': PermuteEncoder(
fieldName='timestamp',
encoderClass='DateEncoder.dayOfWeek',
w=21,
radius=PermuteFloat(1, 6)),
u'timestamp_weekend': PermuteEncoder(
fieldName='timestamp',
encoderClass='DateEncoder.weekend',
w=21,
radius=PermuteChoices([1])),
u'consumption': PermuteEncoder(
fieldName='consumption',
encoderClass='AdaptiveScalarEncoder',
w=21,
n=PermuteInt(28, 521),
clipInput=True),
},
},
'spParams': {
'synPermInactiveDec': PermuteFloat(0.005, 0.1),
},
'tpParams': {
'activationThreshold': PermuteInt(12, 16),
'minThreshold': PermuteInt(9, 12),
'pamLength': PermuteInt(1, 5),
},
'clParams': {
'alpha': PermuteFloat(0.0001, 0.1),
},
}
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*consumption.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = "multiStepBestPredictions:multiStep:errorMetric='altMAPE':steps=\[1\]:window=1000:field=consumption")
minimize = "multiStepBestPredictions:multiStep:errorMetric='altMAPE':steps=\[1\]:window=1000:field=consumption"
minParticlesPerSwarm = None
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
#if perm['__consumption_encoder']['maxval'] > 300:
# return False;
#
return True
|
agpl-3.0
|
manahl/pytest-plugins
|
pytest-shutil/pytest_shutil/run.py
|
1
|
8562
|
"""
Testing tools for running cmdline methods
"""
import sys
import os
import imp
import logging
from functools import update_wrapper
import inspect
import textwrap
from contextlib import closing
import subprocess
from mock import patch
import execnet
from six.moves import cPickle # @UnresolvedImport
from . import cmdline
try:
# Python 3
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
try: # Python 2
str_type = basestring
except NameError: # Python 3
str_type = str
log = logging.getLogger(__name__)
# TODO: add option to return results as a pipe to avoid buffering
# large amounts of output
def run(cmd, stdin=None, capture_stdout=True, capture_stderr=False,
check_rc=True, background=False, **kwargs):
"""
Run a command; raises `subprocess.CalledProcessError` on failure.
Parameters
----------
stdin : file object
text piped to standard input
capture_stdout : `bool` or `stream`
If set, stdout will be captured and returned
capture_stderr : `bool`
If set, stderr will be piped to stdout and returned
**kwargs : optional arguments
Other arguments are passed to Popen()
"""
log.debug('exec: %s' % str(cmd))
stdout = subprocess.PIPE if capture_stdout is True else capture_stdout if capture_stdout else None
stderr = subprocess.STDOUT if capture_stderr else None
stdin_arg = None if stdin is None else subprocess.PIPE
p = subprocess.Popen(cmd, stdin=stdin_arg, stdout=stdout, stderr=stderr, **kwargs)
if background:
return p
(out, _) = p.communicate(stdin)
if out is not None and not isinstance(out, str_type):
try:
out = out.decode('utf-8')
except:
log.warning("Unable to decode command output to UTF-8")
if check_rc and p.returncode != 0:
err_msg = ((out if out else 'No output') if capture_stdout is True
else '<not captured>')
cmd = cmd if isinstance(cmd, str) else ' '.join(cmd)
log.error("Command failed: \"%s\"\n%s" % (cmd, err_msg.strip()))
ex = subprocess.CalledProcessError(p.returncode, cmd)
ex.output = err_msg
raise ex
return out
def run_as_main(fn, *argv):
""" Run a given function as if it was the system entry point,
eg for testing scripts.
Eg::
from scripts.Foo import main
run_as_main(main, 'foo','bar')
This is equivalent to ``Foo foo bar``, assuming
``scripts.Foo.main`` is registered as an entry point.
"""
with patch("sys.argv", new=['progname'] + list(argv)):
log.info("run_as_main: %s" % str(argv))
return fn()
def run_module_as_main(module, argv=[]):
""" Run a given module as if it was the system entry point.
"""
where = os.path.dirname(module.__file__)
filename = os.path.basename(module.__file__)
filename = os.path.splitext(filename)[0] + ".py"
with patch("sys.argv", new=argv):
imp.load_source('__main__', os.path.join(where, filename))
def _evaluate_fn_source(src, *args, **kwargs):
locals_ = {}
eval(compile(src, '<string>', 'single'), {}, locals_)
fn = next(iter(locals_.values()))
if isinstance(fn, staticmethod):
fn = fn.__get__(None, object)
return fn(*args, **kwargs)
def _invoke_method(obj, name, *args, **kwargs):
return getattr(obj, name)(*args, **kwargs)
def _find_class_from_staticmethod(fn):
for _, cls in inspect.getmembers(sys.modules[fn.__module__], inspect.isclass):
for name, member in inspect.getmembers(cls):
if member is fn or (isinstance(member, staticmethod) and member.__get__(None, object) is fn):
return cls, name
return None, None
def _make_pickleable(fn):
# return a pickleable function followed by a tuple of initial arguments
# could use partial but this is more efficient
try:
cPickle.dumps(fn, protocol=0)
except (TypeError, cPickle.PickleError, AttributeError):
pass
else:
return fn, ()
if inspect.ismethod(fn):
name, self_ = fn.__name__, fn.__self__
if self_ is None: # Python 2 unbound method
self_ = fn.im_class
return _invoke_method, (self_, name)
elif inspect.isfunction(fn) and fn.__module__ in sys.modules:
cls, name = _find_class_from_staticmethod(fn)
if (cls, name) != (None, None):
try:
cPickle.dumps((cls, name), protocol=0)
except cPickle.PicklingError:
pass
else:
return _invoke_method, (cls, name)
# Fall back to sending the source code
return _evaluate_fn_source, (textwrap.dedent(inspect.getsource(fn)),)
def _run_in_subprocess_redirect_stdout(fd):
import os # @Reimport
import sys # @Reimport
sys.stdout.close()
os.dup2(fd, 1)
os.close(fd)
sys.stdout = os.fdopen(1, 'w', 1)
def _run_in_subprocess_remote_fn(channel):
from six.moves import cPickle # @UnresolvedImport @Reimport # NOQA
fn, args, kwargs = cPickle.loads(channel.receive(None))
channel.send(cPickle.dumps(fn(*args, **kwargs), protocol=0))
def run_in_subprocess(fn, python=sys.executable, cd=None, timeout=None):
""" Wrap a function to run in a subprocess. The function must be
pickleable or otherwise must be totally self-contained; it must not
reference a closure or any globals. It can also be the source of a
function (def fn(...): ...).
Raises execnet.RemoteError on exception.
"""
pkl_fn, preargs = (_evaluate_fn_source, (fn,)) if isinstance(fn, str) else _make_pickleable(fn)
spec = '//'.join(filter(None, ['popen', 'python=' + python, 'chdir=' + cd if cd else None]))
def inner(*args, **kwargs):
# execnet sends stdout to /dev/null :(
fix_stdout = sys.version_info < (3, 0, 0) # Python 3 passes close_fds=True to subprocess.Popen
with ExitStack() as stack:
with ExitStack() as stack2:
if fix_stdout:
fd = os.dup(1)
stack2.callback(os.close, fd)
gw = execnet.makegateway(spec) # @UndefinedVariable
stack.callback(gw.exit)
if fix_stdout:
with closing(gw.remote_exec(_run_in_subprocess_remote_fn)) as chan:
chan.send(cPickle.dumps((_run_in_subprocess_redirect_stdout, (fd,), {}), protocol=0))
chan.receive(None)
with closing(gw.remote_exec(_run_in_subprocess_remote_fn)) as chan:
payload = (pkl_fn, tuple(i for t in (preargs, args) for i in t), kwargs)
chan.send(cPickle.dumps(payload, protocol=0))
return cPickle.loads(chan.receive(timeout))
return inner if isinstance(fn, str) else update_wrapper(inner, fn)
def run_with_coverage(cmd, pytestconfig, coverage=None, cd=None, **kwargs):
"""
Run a given command with coverage enabled. This won't make any sense
if the command isn't a python script.
This must be run within a pytest session that has been setup with
the '--cov=xxx' options, and therefore requires the pytestconfig
argument that can be retrieved from the standard pytest funcarg
of the same name.
Parameters
----------
cmd: `List`
Command to run
pytestconfig: `pytest._config.Config`
Pytest configuration object
coverage: `str`
Path to the coverage executable
cd: `str`
If not None, will change to this directory before running the cmd.
This is the directory that the coverage files will be created in.
kwargs: keyword arguments
Any extra arguments to pass to `pkglib.cmdline.run`
Returns
-------
`str` standard output
Examples
--------
>>> def test_example(pytestconfig):
... cmd = ['python','myscript.py']
... run_with_coverage(cmd, pytestconfig)
"""
if isinstance(cmd, str):
cmd = [cmd]
if coverage is None:
coverage = [sys.executable, '-mcoverage.__main__']
elif isinstance(coverage, str):
coverage = [coverage]
args = coverage + ['run', '-p']
if getattr(pytestconfig.option, 'cov_source', None):
source_dirs = ",".join(pytestconfig.option.cov_source)
args += ['--source=%s' % source_dirs]
args += cmd
if cd:
with cmdline.chdir(cd):
return run(args, **kwargs)
return run(args, **kwargs)
|
mit
|
joliveros/bitmex-websocket
|
tests/test_instrument.py
|
1
|
2865
|
import alog
import pytest
from mock import PropertyMock, MagicMock, mock
from bitmex_websocket import Instrument
from bitmex_websocket._instrument import SubscribeToAtLeastOneChannelException, \
SubscribeToSecureChannelException
from bitmex_websocket.constants import Channels, SecureChannels, \
SecureInstrumentChannels
from tests.helpers import message_fixtures
fixtures = message_fixtures()
orderBookL2_data = fixtures['orderBookL2']
instrument_data = fixtures['instrument']
class TestInstrument(object):
def test_raises_exception_no_channel_subscribed(self):
with pytest.raises(SubscribeToAtLeastOneChannelException):
Instrument()
def test_only_public_channels(self):
instrument = Instrument(channels=[Channels.connected])
assert [Channels.connected] == instrument.channels
def test_public_and_secure_channels_are_in_channels_list(self, mocker):
header_mock = mocker.patch(
'bitmex_websocket._bitmex_websocket.BitMEXWebsocket.header')
header_mock.return_value = []
channels = [
Channels.connected, SecureChannels.margin]
instrument = Instrument(should_auth=True, channels=channels)
assert channels == instrument.channels
def test_raises_exception_if_secure_channels_are_specified_without_auth(
self):
with pytest.raises(SubscribeToSecureChannelException):
channels = [
Channels.connected, SecureChannels.margin]
instrument = Instrument(channels=channels)
assert channels == instrument.channels
def test_does_not_raise_exception_when_subscribing_secure_channel(
self,
mocker):
header_mock = mocker.patch(
'bitmex_websocket._bitmex_websocket.BitMEXWebsocket.header')
header_mock.return_value = []
channels = [
Channels.connected, SecureChannels.margin]
instrument = Instrument(channels=channels, should_auth=True)
instrument.emit('open')
assert channels == instrument.channels
def test_subscribe_to_public_channels(self, mocker):
header_mock = mocker.patch(
'bitmex_websocket._bitmex_websocket.BitMEXWebsocket.header')
header_mock.return_value = []
subscribe_mock: MagicMock = mocker.patch(
'bitmex_websocket._bitmex_websocket.BitMEXWebsocket.subscribe')
channels = [
Channels.connected,
SecureChannels.margin,
SecureInstrumentChannels.order
]
instrument = Instrument(channels=channels, should_auth=True)
instrument.subscribe_channels()
assert channels == instrument.channels
subscribe_mock.assert_has_calls([mock.call('margin:XBTUSD'),
mock.call('order:XBTUSD')])
|
mit
|
pizzathief/scipy
|
scipy/misc/doccer.py
|
21
|
1732
|
''' Utilities to allow inserting docstring fragments for common
parameters into function and method docstrings'''
import numpy as np
from .._lib import doccer as _ld
__all__ = ['docformat', 'inherit_docstring_from', 'indentcount_lines',
'filldoc', 'unindent_dict', 'unindent_string']
@np.deprecate(message="scipy.misc.docformat is deprecated in Scipy 1.3.0")
def docformat(docstring, docdict=None):
return _ld.docformat(docstring, docdict)
@np.deprecate(message="scipy.misc.inherit_docstring_from is deprecated "
"in SciPy 1.3.0")
def inherit_docstring_from(cls):
return _ld.inherit_docstring_from(cls)
@np.deprecate(message="scipy.misc.extend_notes_in_docstring is deprecated "
"in SciPy 1.3.0")
def extend_notes_in_docstring(cls, notes):
return _ld.extend_notes_in_docstring(cls, notes)
@np.deprecate(message="scipy.misc.replace_notes_in_docstring is deprecated "
"in SciPy 1.3.0")
def replace_notes_in_docstring(cls, notes):
return _ld.replace_notes_in_docstring(cls, notes)
@np.deprecate(message="scipy.misc.indentcount_lines is deprecated "
"in SciPy 1.3.0")
def indentcount_lines(lines):
return _ld.indentcount_lines(lines)
@np.deprecate(message="scipy.misc.filldoc is deprecated in SciPy 1.3.0")
def filldoc(docdict, unindent_params=True):
return _ld.filldoc(docdict, unindent_params)
@np.deprecate(message="scipy.misc.unindent_dict is deprecated in SciPy 1.3.0")
def unindent_dict(docdict):
return _ld.unindent_dict(docdict)
@np.deprecate(message="scipy.misc.unindent_string is deprecated in SciPy 1.3.0")
def unindent_string(docstring):
return _ld.unindent_string(docstring)
|
bsd-3-clause
|
andris210296/andris-projeto
|
backend/build_scripts/babel/i18n_extractor.py
|
35
|
2400
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
import shutil
import sys
import os
def patch(system):
def sys(cmd):
print cmd
return system(cmd)
return sys
os.system = patch(os.system)
# workaround to add src to path
babel_dir = os.path.dirname(__file__)
logging.info("babel dir: %s" % babel_dir)
proj_dir = os.path.join(babel_dir, "..", '..')
proj_dir = os.path.normpath(proj_dir)
logging.info("project dir: %s" % proj_dir)
sys.path.append(os.path.join(proj_dir, 'appengine'))
sys.path.append(os.path.join(proj_dir, 'apps'))
if 'GAE_SDK' in os.environ:
SDK_PATH = os.environ['GAE_SDK']
sys.path.insert(0, SDK_PATH)
import dev_appserver
dev_appserver.fix_sys_path()
import settings
def create_or_update_catalog(loc, compile_target, msgs_pot):
result = os.system("pybabel update -l %s -d %s -i %s" % (loc, compile_target, msgs_pot))
if result != 0:
os.system("pybabel init -l %s -d %s -i %s" % (loc, compile_target, msgs_pot))
def compile_po_files(compile_target, locale_target):
for root, dirs, files in os.walk(compile_target):
file_name = "messages.po" if "messages.po" in files else None
if file_name:
po_file = os.path.join(root, file_name)
mo_dir = root.replace(os.path.join('.', 'locale'), locale_target)
mo_file = os.path.join(mo_dir, "messages.mo")
if not os.path.exists(mo_dir):
os.makedirs(mo_dir)
print "Created dir: %s" % mo_dir
c = "pybabel compile -f -i %s -o %s " % (po_file, mo_file)
print c
os.system(c)
if __name__ == "__main__":
compile_target = os.path.join(".", "locale")
target = os.path.join(proj_dir, 'appengine')
if os.path.sep == r'/':
compile_targets = target + ' ' + os.path.join(proj_dir, "venv")
else:
compile_targets = target
# if len(sys.argv) == 1:
babel_cfg = os.path.join(babel_dir, "babel.cfg")
msgs_pot = os.path.join(compile_target, "messages.pot")
os.system("pybabel extract -F %s -o %s %s" % (babel_cfg, msgs_pot, compile_targets))
locales = settings.LOCALES
for loc in locales:
create_or_update_catalog(loc, compile_target, msgs_pot)
locale_target = os.path.join(target, "locale")
compile_po_files(compile_target, locale_target)
|
mit
|
apollo13/ansible
|
test/lib/ansible_test/_internal/powershell_import_analysis.py
|
31
|
3190
|
"""Analyze powershell import statements."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
from .io import (
read_text_file,
)
from .util import (
display,
)
from .util_common import (
resolve_csharp_ps_util,
)
from .data import (
data_context,
)
def get_powershell_module_utils_imports(powershell_targets):
"""Return a dictionary of module_utils names mapped to sets of powershell file paths.
:type powershell_targets: list[TestTarget]
:rtype: dict[str, set[str]]
"""
module_utils = enumerate_module_utils()
imports_by_target_path = {}
for target in powershell_targets:
imports_by_target_path[target.path] = extract_powershell_module_utils_imports(target.path, module_utils)
imports = dict([(module_util, set()) for module_util in module_utils])
for target_path in imports_by_target_path:
for module_util in imports_by_target_path[target_path]:
imports[module_util].add(target_path)
for module_util in sorted(imports):
if not imports[module_util]:
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_powershell_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_powershell_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.'
else:
prefix = ''
name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
return name
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
return set(get_powershell_module_utils_name(p)
for p in data_context().content.walk_files(data_context().content.module_utils_powershell_path)
if os.path.splitext(p)[1] == '.psm1')
def extract_powershell_module_utils_imports(path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
:rtype: set[str]
"""
imports = set()
code = read_text_file(path)
if data_context().content.is_ansible and '# POWERSHELL_COMMON' in code:
imports.add('Ansible.ModuleUtils.Legacy')
lines = code.splitlines()
line_number = 0
for line in lines:
line_number += 1
match = re.search(r'(?i)^#\s*(?:requires\s+-module(?:s?)|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections|\.)\..+)', line)
if not match:
continue
import_name = resolve_csharp_ps_util(match.group(1), path)
if import_name in module_utils:
imports.add(import_name)
elif data_context().content.is_ansible or \
import_name.startswith('ansible_collections.%s' % data_context().content.prefix):
display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
return imports
|
gpl-3.0
|
stevekuznetsov/ansible
|
test/units/plugins/connection/test_network_cli.py
|
48
|
5631
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from io import StringIO
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleConnectionFailure
from ansible.playbook.play_context import PlayContext
from ansible.plugins.connection import network_cli
class TestConnectionClass(unittest.TestCase):
@patch("ansible.plugins.connection.network_cli.terminal_loader")
@patch("ansible.plugins.connection.network_cli._Connection._connect")
def test_network_cli__connect(self, mocked_super, mocked_terminal_loader):
pc = PlayContext()
new_stdin = StringIO()
conn = network_cli.Connection(pc, new_stdin)
conn.ssh = None
self.assertRaises(AnsibleConnectionFailure, conn._connect)
mocked_terminal_loader.reset_mock()
mocked_terminal_loader.get.return_value = None
pc.network_os = 'invalid'
self.assertRaises(AnsibleConnectionFailure, conn._connect)
self.assertFalse(mocked_terminal_loader.all.called)
mocked_terminal_loader.reset_mock()
mocked_terminal_loader.get.return_value = 'valid'
conn._connect()
self.assertEqual(conn._terminal, 'valid')
def test_network_cli_open_shell(self):
pc = PlayContext()
new_stdin = StringIO()
conn = network_cli.Connection(pc, new_stdin)
conn.ssh = MagicMock()
conn.receive = MagicMock()
mock_terminal = MagicMock()
conn._terminal = mock_terminal
mock__connect = MagicMock()
conn._connect = mock__connect
conn.open_shell()
self.assertTrue(mock__connect.called)
self.assertTrue(mock_terminal.on_open_shell.called)
self.assertFalse(mock_terminal.on_authorize.called)
mock_terminal.reset_mock()
conn._play_context.become = True
conn._play_context.become_pass = 'password'
conn.open_shell()
self.assertTrue(mock__connect.called)
mock_terminal.on_authorize.assert_called_with(passwd='password')
def test_network_cli_close_shell(self):
pc = PlayContext()
new_stdin = StringIO()
conn = network_cli.Connection(pc, new_stdin)
terminal = MagicMock(supports_multiplexing=False)
conn._terminal = terminal
conn.close_shell()
conn._shell = MagicMock()
conn.close_shell()
self.assertTrue(terminal.on_close_shell.called)
terminal.supports_multiplexing = True
conn.close_shell()
self.assertIsNone(conn._shell)
def test_network_cli_exec_command(self):
pc = PlayContext()
new_stdin = StringIO()
conn = network_cli.Connection(pc, new_stdin)
mock_open_shell = MagicMock()
conn.open_shell = mock_open_shell
mock_send = MagicMock(return_value='command response')
conn.send = mock_send
# test sending a single command and converting to dict
rc, out, err = conn.exec_command('command')
self.assertEqual(out, 'command response')
self.assertTrue(mock_open_shell.called)
mock_send.assert_called_with({'command': 'command'})
mock_open_shell.reset_mock()
# test sending a json string
rc, out, err = conn.exec_command(json.dumps({'command': 'command'}))
self.assertEqual(out, 'command response')
mock_send.assert_called_with({'command': 'command'})
self.assertTrue(mock_open_shell.called)
mock_open_shell.reset_mock()
conn._shell = MagicMock()
# test _shell already open
rc, out, err = conn.exec_command('command')
self.assertEqual(out, 'command response')
self.assertFalse(mock_open_shell.called)
mock_send.assert_called_with({'command': 'command'})
def test_network_cli_send(self):
pc = PlayContext()
new_stdin = StringIO()
conn = network_cli.Connection(pc, new_stdin)
mock__terminal = MagicMock()
mock__terminal.terminal_stdout_re = [re.compile('device#')]
mock__terminal.terminal_stderr_re = [re.compile('^ERROR')]
conn._terminal = mock__terminal
mock__shell = MagicMock()
conn._shell = mock__shell
response = """device#command
command response
device#
"""
mock__shell.recv.return_value = response
output = conn.send({'command': 'command'})
mock__shell.sendall.assert_called_with('command\r')
self.assertEqual(output, 'command response')
mock__shell.reset_mock()
mock__shell.recv.return_value = "ERROR: error message"
with self.assertRaises(AnsibleConnectionFailure) as exc:
conn.send({'command': 'command'})
self.assertEqual(str(exc.exception), 'ERROR: error message')
|
gpl-3.0
|
hanw/p4c-fpga
|
ext/src/gtest/xcode/Scripts/versiongenerate.py
|
3088
|
4536
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
|
apache-2.0
|
fennekki/unikko
|
unikko/output/html.py
|
1
|
3802
|
from yattag import Doc, indent
from sys import stderr
def _inner_recurse_tags(obj, tree, doc, tag, text):
"""Execute inner loop structure of HTML generation.
Params:
obj (Object): The object currently being looped over
tree (Object): A VOTL Object containing the subtree-to-be
-generated
doc: yattag Doc().doc
tag: yattag Doc().tag
text: yattag Doc().text
"""
if obj.object_type == "header":
# Only generate a header element for headers
with tag("h{:d}".format(obj.level + 1)):
text(obj.first_line)
elif obj.object_type == "body":
paragraphs = []
current = []
for line in obj.lines:
if line == "":
if len(current) > 0:
paragraphs.append("\n".join(current))
current = []
else:
current.append(line)
# Last paragraph
if len(current) > 0:
paragraphs.append("\n".join(current))
for paragraph in paragraphs:
with tag("p"):
text(paragraph)
elif obj.object_type == "body-pre":
# Just print the content in a pre-tag
with tag("pre"):
for line in obj.lines:
text(line, "\n")
elif obj.object_type[-4:] == "-pre":
# Class is name without -pre
klass = obj.object_type[:-4]
# Custom preformatted
with tag("pre", klass=klass):
for line in obj.lines:
text(line, "\n")
else:
# Body, but custom
klass = obj.object_type
paragraphs = []
current = []
for line in obj.lines:
# Construct paragraphs into paragraphs from lines in obj
if line == "":
if len(current) > 0:
paragraphs.append("\n".join(current))
current = []
else:
current.append(line)
if len(current) > 0:
paragraphs.append("\n".join(current))
for paragraph in paragraphs:
# Each generated paragraph becomes a <p> tag
with tag("p", klass=klass):
text(paragraph)
_recurse_tags(obj, doc, tag, text)
def _recurse_tags(tree, doc, tag, text):
"""Recursively generate HTML from children.
Params:
tree (Object): A VOTL Object containing the subtree-to-be
-generated
doc: yattag Doc().doc
tag: yattag Doc().tag
text: yattag Doc().text
"""
if len(tree.children) > 0:
for o in tree.children:
if len(o.children) > 0:
# Only create divs for objects with children
with tag("div"):
_inner_recurse_tags(o, tree, doc, tag, text)
else:
_inner_recurse_tags(o, tree, doc, tag, text)
def html_from_tree(tree):
"""Generate indented HTML from VOTL Object tree.
Params:
tree (Object): A VOTL Object containing the tree, from which HTML will
be generated.
Returns: String containing a pretty-formatted HTML document.
"""
doc, tag, text = Doc().tagtext()
doc.asis("<!DOCTYPE html>")
try:
first = tree.children[0]
except IndexError:
print("Error generating markdown: Tree has no children!", file=stderr)
return ""
if first.object_type == "header":
title = first.first_line
else:
title = "Untitled"
with tag("html"):
with tag("head"):
with tag("title"):
text(title)
doc.stag("meta", charset="utf-8")
with tag("body"):
_recurse_tags(tree, doc, tag, text)
return indent(doc.getvalue())
|
bsd-2-clause
|
HonzaKral/django
|
tests/template_tests/filter_tests/test_addslashes.py
|
473
|
1202
|
from django.template.defaultfilters import addslashes
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class AddslashesTests(SimpleTestCase):
@setup({'addslashes01': '{% autoescape off %}{{ a|addslashes }} {{ b|addslashes }}{% endautoescape %}'})
def test_addslashes01(self):
output = self.engine.render_to_string('addslashes01', {"a": "<a>'", "b": mark_safe("<a>'")})
self.assertEqual(output, r"<a>\' <a>\'")
@setup({'addslashes02': '{{ a|addslashes }} {{ b|addslashes }}'})
def test_addslashes02(self):
output = self.engine.render_to_string('addslashes02', {"a": "<a>'", "b": mark_safe("<a>'")})
self.assertEqual(output, r"<a>\' <a>\'")
class FunctionTests(SimpleTestCase):
def test_quotes(self):
self.assertEqual(
addslashes('"double quotes" and \'single quotes\''),
'\\"double quotes\\" and \\\'single quotes\\\'',
)
def test_backslashes(self):
self.assertEqual(addslashes(r'\ : backslashes, too'), '\\\\ : backslashes, too')
def test_non_string_input(self):
self.assertEqual(addslashes(123), '123')
|
bsd-3-clause
|
wenyu1001/scrapy
|
scrapy/spiders/sitemap.py
|
11
|
2736
|
import re
import logging
import six
from scrapy.spiders import Spider
from scrapy.http import Request, XmlResponse
from scrapy.utils.sitemap import Sitemap, sitemap_urls_from_robots
from scrapy.utils.gz import gunzip, is_gzipped
logger = logging.getLogger(__name__)
class SitemapSpider(Spider):
sitemap_urls = ()
sitemap_rules = [('', 'parse')]
sitemap_follow = ['']
sitemap_alternate_links = False
def __init__(self, *a, **kw):
super(SitemapSpider, self).__init__(*a, **kw)
self._cbs = []
for r, c in self.sitemap_rules:
if isinstance(c, six.string_types):
c = getattr(self, c)
self._cbs.append((regex(r), c))
self._follow = [regex(x) for x in self.sitemap_follow]
def start_requests(self):
for url in self.sitemap_urls:
yield Request(url, self._parse_sitemap)
def _parse_sitemap(self, response):
if response.url.endswith('/robots.txt'):
for url in sitemap_urls_from_robots(response.text):
yield Request(url, callback=self._parse_sitemap)
else:
body = self._get_sitemap_body(response)
if body is None:
logger.warning("Ignoring invalid sitemap: %(response)s",
{'response': response}, extra={'spider': self})
return
s = Sitemap(body)
if s.type == 'sitemapindex':
for loc in iterloc(s, self.sitemap_alternate_links):
if any(x.search(loc) for x in self._follow):
yield Request(loc, callback=self._parse_sitemap)
elif s.type == 'urlset':
for loc in iterloc(s):
for r, c in self._cbs:
if r.search(loc):
yield Request(loc, callback=c)
break
def _get_sitemap_body(self, response):
"""Return the sitemap body contained in the given response,
or None if the response is not a sitemap.
"""
if isinstance(response, XmlResponse):
return response.body
elif is_gzipped(response):
return gunzip(response.body)
elif response.url.endswith('.xml'):
return response.body
elif response.url.endswith('.xml.gz'):
return gunzip(response.body)
def regex(x):
if isinstance(x, six.string_types):
return re.compile(x)
return x
def iterloc(it, alt=False):
for d in it:
yield d['loc']
# Also consider alternate URLs (xhtml:link rel="alternate")
if alt and 'alternate' in d:
for l in d['alternate']:
yield l
|
bsd-3-clause
|
sriprasanna/django-1.3.1
|
django/contrib/gis/geos/linestring.py
|
411
|
5568
|
from django.contrib.gis.geos.base import numpy
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos import prototypes as capi
class LineString(GEOSGeometry):
_init_func = capi.create_linestring
_minlength = 2
#### Python 'magic' routines ####
def __init__(self, *args, **kwargs):
"""
Initializes on the given sequence -- may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
"""
# If only one argument provided, set the coords array appropriately
if len(args) == 1: coords = args[0]
else: coords = args
if isinstance(coords, (tuple, list)):
# Getting the number of coords and the number of dimensions -- which
# must stay the same, e.g., no LineString((1, 2), (1, 2, 3)).
ncoords = len(coords)
if coords: ndim = len(coords[0])
else: raise TypeError('Cannot initialize on empty sequence.')
self._checkdim(ndim)
# Incrementing through each of the coordinates and verifying
for i in xrange(1, ncoords):
if not isinstance(coords[i], (tuple, list, Point)):
raise TypeError('each coordinate should be a sequence (list or tuple)')
if len(coords[i]) != ndim: raise TypeError('Dimension mismatch.')
numpy_coords = False
elif numpy and isinstance(coords, numpy.ndarray):
shape = coords.shape # Using numpy's shape.
if len(shape) != 2: raise TypeError('Too many dimensions.')
self._checkdim(shape[1])
ncoords = shape[0]
ndim = shape[1]
numpy_coords = True
else:
raise TypeError('Invalid initialization input for LineStrings.')
# Creating a coordinate sequence object because it is easier to
# set the points using GEOSCoordSeq.__setitem__().
cs = GEOSCoordSeq(capi.create_cs(ncoords, ndim), z=bool(ndim==3))
for i in xrange(ncoords):
if numpy_coords: cs[i] = coords[i,:]
elif isinstance(coords[i], Point): cs[i] = coords[i].tuple
else: cs[i] = coords[i]
# If SRID was passed in with the keyword arguments
srid = kwargs.get('srid', None)
# Calling the base geometry initialization with the returned pointer
# from the function.
super(LineString, self).__init__(self._init_func(cs.ptr), srid=srid)
def __iter__(self):
"Allows iteration over this LineString."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of points in this LineString."
return len(self._cs)
def _get_single_external(self, index):
return self._cs[index]
_get_single_internal = _get_single_external
def _set_list(self, length, items):
ndim = self._cs.dims #
hasz = self._cs.hasz # I don't understand why these are different
# create a new coordinate sequence and populate accordingly
cs = GEOSCoordSeq(capi.create_cs(length, ndim), z=hasz)
for i, c in enumerate(items):
cs[i] = c
ptr = self._init_func(cs.ptr)
if ptr:
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(self.srid)
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._checkindex(index)
self._cs[index] = value
def _checkdim(self, dim):
if dim not in (2, 3): raise TypeError('Dimension mismatch.')
#### Sequence Properties ####
@property
def tuple(self):
"Returns a tuple version of the geometry from the coordinate sequence."
return self._cs.tuple
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function. Will return a numpy array if possible.
"""
lst = [func(i) for i in xrange(len(self))]
if numpy: return numpy.array(lst) # ARRRR!
else: return lst
@property
def array(self):
"Returns a numpy array for the LineString."
return self._listarr(self._cs.__getitem__)
@property
def merged(self):
"Returns the line merge of this LineString."
return self._topology(capi.geos_linemerge(self.ptr))
@property
def x(self):
"Returns a list or numpy array of the X variable."
return self._listarr(self._cs.getX)
@property
def y(self):
"Returns a list or numpy array of the Y variable."
return self._listarr(self._cs.getY)
@property
def z(self):
"Returns a list or numpy array of the Z variable."
if not self.hasz: return None
else: return self._listarr(self._cs.getZ)
# LinearRings are LineStrings used within Polygons.
class LinearRing(LineString):
_minLength = 4
_init_func = capi.create_linearring
|
bsd-3-clause
|
agry/NGECore2
|
scripts/mobiles/endor/shimmering_lantern_bird.py
|
2
|
1625
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('shimmering_lantern_bird')
mobileTemplate.setLevel(61)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setHideType("Leathery Hide")
mobileTemplate.setHideAmount(41)
mobileTemplate.setBoneType("Avian Bones")
mobileTemplate.setBoneAmount(46)
mobileTemplate.setSocialGroup("lantern")
mobileTemplate.setAssistRange(12)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
mobileTemplate.setStalker(False)
templates = Vector()
templates.add('object/mobile/shared_lantern_bird.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_claw_4')
attacks.add('bm_slash_4')
attacks.add('bm_wing_buffet_4')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('shimmering_lantern_bird', mobileTemplate)
return
|
lgpl-3.0
|
jrabbit/ubotu-fr
|
plugins/Owner/plugin.py
|
6
|
25949
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2008, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import gc
import os
import sys
import time
import socket
import linecache
if sys.version_info >= (2, 5, 0):
import re as sre
else:
import sre
import supybot.log as log
import supybot.conf as conf
import supybot.utils as utils
import supybot.world as world
import supybot.ircdb as ircdb
from supybot.commands import *
import supybot.irclib as irclib
import supybot.plugin as plugin
import supybot.plugins as plugins
import supybot.drivers as drivers
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.registry as registry
import supybot.callbacks as callbacks
###
# supybot.commands.
###
def registerDefaultPlugin(command, plugin):
command = callbacks.canonicalName(command)
conf.registerGlobalValue(conf.supybot.commands.defaultPlugins,
command, registry.String(plugin, ''))
# This must be set, or the quotes won't be removed.
conf.supybot.commands.defaultPlugins.get(command).set(plugin)
def registerRename(plugin, command=None, newName=None):
g = conf.registerGlobalValue(conf.supybot.commands.renames, plugin,
registry.SpaceSeparatedSetOfStrings([], """Determines what commands
in this plugin are to be renamed."""))
if command is not None:
g().add(command)
v = conf.registerGlobalValue(g, command, registry.String('', ''))
if newName is not None:
v.setValue(newName) # In case it was already registered.
return v
else:
return g
def renameCommand(cb, name, newName):
assert not hasattr(cb, newName), 'Cannot rename over existing attributes.'
assert newName == callbacks.canonicalName(newName), \
'newName must already be canonized.'
if name != newName:
method = getattr(cb.__class__, name)
setattr(cb.__class__, newName, method)
delattr(cb.__class__, name)
registerDefaultPlugin('list', 'Misc')
registerDefaultPlugin('help', 'Misc')
registerDefaultPlugin('ignore', 'Admin')
registerDefaultPlugin('reload', 'Owner')
registerDefaultPlugin('enable', 'Owner')
registerDefaultPlugin('disable', 'Owner')
registerDefaultPlugin('unignore', 'Admin')
registerDefaultPlugin('capabilities', 'User')
registerDefaultPlugin('addcapability', 'Admin')
registerDefaultPlugin('removecapability', 'Admin')
class holder(object):
pass
# This is used so we can support a "log" command as well as a "self.log"
# Logger.
class LogProxy(object):
"""<text>
Logs <text> to the global Supybot log at critical priority. Useful for
marking logfiles for later searching.
"""
__name__ = 'log' # Necessary for help.
def __init__(self, log):
self.log = log
self.im_func = holder()
self.im_func.func_name = 'log'
def __call__(self, irc, msg, args, text):
log.critical(text)
irc.replySuccess()
__call__ = wrap(__call__, ['text'])
def __getattr__(self, attr):
return getattr(self.log, attr)
class Owner(callbacks.Plugin):
# This plugin must be first; its priority must be lowest; otherwise odd
# things will happen when adding callbacks.
def __init__(self, irc=None):
if irc is not None:
assert not irc.getCallback(self.name())
self.__parent = super(Owner, self)
self.__parent.__init__(irc)
# Setup log object/command.
self.log = LogProxy(self.log)
# Setup command flood detection.
self.commands = ircutils.FloodQueue(60)
# Setup plugins and default plugins for commands.
#
# This needs to be done before we connect to any networks so that the
# children of supybot.plugins (the actual plugins) exist and can be
# loaded.
for (name, s) in registry._cache.iteritems():
if 'alwaysLoadDefault' in name or 'alwaysLoadImportant' in name:
continue
if name.startswith('supybot.plugins'):
try:
(_, _, name) = registry.split(name)
except ValueError: # unpack list of wrong size.
continue
# This is just for the prettiness of the configuration file.
# There are no plugins that are all-lowercase, so we'll at
# least attempt to capitalize them.
if name == name.lower():
name = name.capitalize()
conf.registerPlugin(name)
if name.startswith('supybot.commands.defaultPlugins'):
try:
(_, _, _, name) = registry.split(name)
except ValueError: # unpack list of wrong size.
continue
registerDefaultPlugin(name, s)
# Setup Irc objects, connected to networks. If world.ircs is already
# populated, chances are that we're being reloaded, so don't do this.
if not world.ircs:
for network in conf.supybot.networks():
try:
self._connect(network)
except socket.error, e:
self.log.error('Could not connect to %s: %s.', network, e)
except Exception, e:
self.log.exception('Exception connecting to %s:', network)
self.log.error('Could not connect to %s: %s.', network, e)
def callPrecedence(self, irc):
return ([], [cb for cb in irc.callbacks if cb is not self])
def outFilter(self, irc, msg):
if msg.command == 'PRIVMSG' and not world.testing:
if ircutils.strEqual(msg.args[0], irc.nick):
self.log.warning('Tried to send a message to myself: %r.', msg)
return None
return msg
def isCommandMethod(self, name):
return name == 'log' or \
self.__parent.isCommandMethod(name)
def reset(self):
# This has to be done somewhere, I figure here is as good place as any.
callbacks.IrcObjectProxy._mores.clear()
self.__parent.reset()
def _connect(self, network, serverPort=None, password='', ssl=False):
try:
group = conf.supybot.networks.get(network)
(server, port) = group.servers()[0]
except (registry.NonExistentRegistryEntry, IndexError):
if serverPort is None:
raise ValueError, 'connect requires a (server, port) ' \
'if the network is not registered.'
conf.registerNetwork(network, password, ssl)
serverS = '%s:%s' % serverPort
conf.supybot.networks.get(network).servers.append(serverS)
assert conf.supybot.networks.get(network).servers(), \
'No servers are set for the %s network.' % network
self.log.info('Creating new Irc for %s.', network)
newIrc = irclib.Irc(network)
for irc in world.ircs:
if irc != newIrc:
newIrc.state.history = irc.state.history
driver = drivers.newDriver(newIrc)
self._loadPlugins(newIrc)
return newIrc
def _loadPlugins(self, irc):
self.log.info('Loading plugins (connecting to %s).', irc.network)
alwaysLoadImportant = conf.supybot.plugins.alwaysLoadImportant()
important = conf.supybot.commands.defaultPlugins.importantPlugins()
for (name, value) in conf.supybot.plugins.getValues(fullNames=False):
if irc.getCallback(name) is None:
load = value()
if not load and name in important:
if alwaysLoadImportant:
s = '%s is configured not to be loaded, but is being '\
'loaded anyway because ' \
'supybot.plugins.alwaysLoadImportant is True.'
self.log.warning(s, name)
load = True
if load:
# We don't load plugins that don't start with a capital
# letter.
if name[0].isupper() and not irc.getCallback(name):
# This is debug because each log logs its beginning.
self.log.debug('Loading %s.', name)
try:
m = plugin.loadPluginModule(name,
ignoreDeprecation=True)
plugin.loadPluginClass(irc, m)
except callbacks.Error, e:
# This is just an error message.
log.warning(str(e))
except (plugins.NoSuitableDatabase, ImportError), e:
s = 'Failed to load %s: %s' % (name, e)
if not s.endswith('.'):
s += '.'
log.warning(s)
except Exception, e:
log.exception('Failed to load %s:', name)
else:
# Let's import the module so configuration is preserved.
try:
_ = plugin.loadPluginModule(name)
except Exception, e:
log.debug('Attempted to load %s to preserve its '
'configuration, but load failed: %s',
name, e)
world.starting = False
def do376(self, irc, msg):
networkGroup = conf.supybot.networks.get(irc.network)
for channel in networkGroup.channels():
irc.queueMsg(networkGroup.channels.join(channel))
do422 = do377 = do376
def doPrivmsg(self, irc, msg):
assert self is irc.callbacks[0], \
'Owner isn\'t first callback: %r' % irc.callbacks
if ircmsgs.isCtcp(msg):
return
s = callbacks.addressed(irc.nick, msg)
if s:
ignored = ircdb.checkIgnored(msg.prefix)
if ignored:
self.log.info('Ignoring command from %s.', msg.prefix)
return
maximum = conf.supybot.abuse.flood.command.maximum()
self.commands.enqueue(msg)
if conf.supybot.abuse.flood.command() \
and self.commands.len(msg) > maximum \
and not ircdb.checkCapability(msg.prefix, 'owner'):
punishment = conf.supybot.abuse.flood.command.punishment()
banmask = ircutils.banmask(msg.prefix)
self.log.info('Ignoring %s for %s seconds due to an apparent '
'command flood.', banmask, punishment)
ircdb.ignores.add(banmask, time.time() + punishment)
irc.reply('You\'ve given me %s commands within the last '
'minute; I\'m now ignoring you for %s.' %
(maximum,
utils.timeElapsed(punishment, seconds=False)))
return
try:
tokens = callbacks.tokenize(s, channel=msg.args[0])
self.Proxy(irc, msg, tokens)
except SyntaxError, e:
irc.queueMsg(callbacks.error(msg, str(e)))
def announce(self, irc, msg, args, text):
"""<text>
Sends <text> to all channels the bot is currently on and not
lobotomized in.
"""
u = ircdb.users.getUser(msg.prefix)
text = 'Announcement from my owner (%s): %s' % (u.name, text)
for channel in irc.state.channels:
c = ircdb.channels.getChannel(channel)
if not c.lobotomized:
irc.queueMsg(ircmsgs.privmsg(channel, text))
irc.noReply()
announce = wrap(announce, ['text'])
def defaultplugin(self, irc, msg, args, optlist, command, plugin):
"""[--remove] <command> [<plugin>]
Sets the default plugin for <command> to <plugin>. If --remove is
given, removes the current default plugin for <command>. If no plugin
is given, returns the current default plugin set for <command>. See
also, supybot.commands.defaultPlugins.importantPlugins.
"""
remove = False
for (option, arg) in optlist:
if option == 'remove':
remove = True
(_, cbs) = irc.findCallbacksForArgs([command])
if remove:
try:
conf.supybot.commands.defaultPlugins.unregister(command)
irc.replySuccess()
except registry.NonExistentRegistryEntry:
s = 'I don\'t have a default plugin set for that command.'
irc.error(s)
elif not cbs:
irc.errorInvalid('command', command)
elif plugin:
if not plugin.isCommand(command):
irc.errorInvalid('command in the %s plugin' % plugin, command)
registerDefaultPlugin(command, plugin.name())
irc.replySuccess()
else:
try:
irc.reply(conf.supybot.commands.defaultPlugins.get(command)())
except registry.NonExistentRegistryEntry:
s = 'I don\'t have a default plugin set for that command.'
irc.error(s)
defaultplugin = wrap(defaultplugin, [getopts({'remove': ''}),
'commandName',
additional('plugin')])
def ircquote(self, irc, msg, args, s):
"""<string to be sent to the server>
Sends the raw string given to the server.
"""
try:
m = ircmsgs.IrcMsg(s)
except Exception, e:
irc.error(utils.exnToString(e))
else:
irc.queueMsg(m)
irc.noReply()
ircquote = wrap(ircquote, ['text'])
def quit(self, irc, msg, args, text):
"""[<text>]
Exits the bot with the QUIT message <text>. If <text> is not given,
the default quit message (supybot.plugins.Owner.quitMsg) will be used.
If there is no default quitMsg set, your nick will be used.
"""
text = text or self.registryValue('quitMsg') or msg.nick
irc.noReply()
m = ircmsgs.quit(text)
world.upkeep()
for irc in world.ircs[:]:
irc.queueMsg(m)
irc.die()
quit = wrap(quit, [additional('text')])
def flush(self, irc, msg, args):
"""takes no arguments
Runs all the periodic flushers in world.flushers. This includes
flushing all logs and all configuration changes to disk.
"""
world.flush()
irc.replySuccess()
flush = wrap(flush)
def upkeep(self, irc, msg, args, level):
"""[<level>]
Runs the standard upkeep stuff (flushes and gc.collects()). If given
a level, runs that level of upkeep (currently, the only supported
level is "high", which causes the bot to flush a lot of caches as well
as do normal upkeep stuff.
"""
L = []
if level == 'high':
L.append(format('Regexp cache flushed: %n cleared.',
(len(sre._cache), 'regexp')))
sre.purge()
L.append(format('Pattern cache flushed: %n cleared.',
(len(ircutils._patternCache), 'compiled pattern')))
ircutils._patternCache.clear()
L.append(format('hostmaskPatternEqual cache flushed: %n cleared.',
(len(ircutils._hostmaskPatternEqualCache),
'result')))
ircutils._hostmaskPatternEqualCache.clear()
L.append(format('ircdb username cache flushed: %n cleared.',
(len(ircdb.users._nameCache),
'username to id mapping')))
ircdb.users._nameCache.clear()
L.append(format('ircdb hostmask cache flushed: %n cleared.',
(len(ircdb.users._hostmaskCache),
'hostmask to id mapping')))
ircdb.users._hostmaskCache.clear()
L.append(format('linecache line cache flushed: %n cleared.',
(len(linecache.cache), 'line')))
linecache.clearcache()
sys.exc_clear()
collected = world.upkeep()
if gc.garbage:
L.append('Garbage! %r.' % gc.garbage)
L.append(format('%n collected.', (collected, 'object')))
irc.reply(' '.join(L))
upkeep = wrap(upkeep, [additional(('literal', ['high']))])
def load(self, irc, msg, args, optlist, name):
"""[--deprecated] <plugin>
Loads the plugin <plugin> from any of the directories in
conf.supybot.directories.plugins; usually this includes the main
installed directory and 'plugins' in the current directory.
--deprecated is necessary if you wish to load deprecated plugins.
"""
ignoreDeprecation = False
for (option, argument) in optlist:
if option == 'deprecated':
ignoreDeprecation = True
if name.endswith('.py'):
name = name[:-3]
if irc.getCallback(name):
irc.error('%s is already loaded.' % name.capitalize())
return
try:
module = plugin.loadPluginModule(name, ignoreDeprecation)
except plugin.Deprecated:
irc.error('%s is deprecated. Use --deprecated '
'to force it to load.' % name.capitalize())
return
except ImportError, e:
if name in str(e):
irc.error('No plugin named %s exists.' % utils.str.dqrepr(name))
else:
irc.error(str(e))
return
cb = plugin.loadPluginClass(irc, module)
name = cb.name() # Let's normalize this.
conf.registerPlugin(name, True)
irc.replySuccess()
load = wrap(load, [getopts({'deprecated': ''}), 'something'])
def reload(self, irc, msg, args, name):
"""<plugin>
Unloads and subsequently reloads the plugin by name; use the 'list'
command to see a list of the currently loaded plugins.
"""
callbacks = irc.removeCallback(name)
if callbacks:
module = sys.modules[callbacks[0].__module__]
if hasattr(module, 'reload'):
x = module.reload()
try:
module = plugin.loadPluginModule(name)
if hasattr(module, 'reload'):
module.reload(x)
for callback in callbacks:
callback.die()
del callback
gc.collect() # This makes sure the callback is collected.
callback = plugin.loadPluginClass(irc, module)
irc.replySuccess()
except ImportError:
for callback in callbacks:
irc.addCallback(callback)
irc.error('No plugin %s exists.' % name)
else:
irc.error('There was no plugin %s.' % name)
reload = wrap(reload, ['something'])
def unload(self, irc, msg, args, name):
"""<plugin>
Unloads the callback by name; use the 'list' command to see a list
of the currently loaded callbacks. Obviously, the Owner plugin can't
be unloaded.
"""
if ircutils.strEqual(name, self.name()):
irc.error('You can\'t unload the %s plugin.' % name)
return
# Let's do this so even if the plugin isn't currently loaded, it doesn't
# stay attempting to load.
conf.registerPlugin(name, False)
callbacks = irc.removeCallback(name)
if callbacks:
for callback in callbacks:
callback.die()
del callback
gc.collect()
irc.replySuccess()
else:
irc.error('There was no plugin %s.' % name)
unload = wrap(unload, ['something'])
def defaultcapability(self, irc, msg, args, action, capability):
"""{add|remove} <capability>
Adds or removes (according to the first argument) <capability> from the
default capabilities given to users (the configuration variable
supybot.capabilities stores these).
"""
if action == 'add':
conf.supybot.capabilities().add(capability)
irc.replySuccess()
elif action == 'remove':
try:
conf.supybot.capabilities().remove(capability)
irc.replySuccess()
except KeyError:
if ircdb.isAntiCapability(capability):
irc.error('That capability wasn\'t in '
'supybot.capabilities.')
else:
anticap = ircdb.makeAntiCapability(capability)
conf.supybot.capabilities().add(anticap)
irc.replySuccess()
defaultcapability = wrap(defaultcapability,
[('literal', ['add','remove']), 'capability'])
def disable(self, irc, msg, args, plugin, command):
"""[<plugin>] <command>
Disables the command <command> for all users (including the owners).
If <plugin> is given, only disables the <command> from <plugin>. If
you want to disable a command for most users but not for yourself, set
a default capability of -plugin.command or -command (if you want to
disable the command in all plugins).
"""
if command in ('enable', 'identify'):
irc.error('You can\'t disable %s.' % command)
return
if plugin:
if plugin.isCommand(command):
pluginCommand = '%s.%s' % (plugin.name(), command)
conf.supybot.commands.disabled().add(pluginCommand)
else:
irc.error('%s is not a command in the %s plugin.' %
(command, plugin.name()))
return
self._disabled.add(pluginCommand, plugin.name())
else:
conf.supybot.commands.disabled().add(command)
self._disabled.add(command)
irc.replySuccess()
disable = wrap(disable, [optional('plugin'), 'commandName'])
def enable(self, irc, msg, args, plugin, command):
"""[<plugin>] <command>
Enables the command <command> for all users. If <plugin>
if given, only enables the <command> from <plugin>. This command is
the inverse of disable.
"""
try:
if plugin:
command = '%s.%s' % (plugin.name(), command)
self._disabled.remove(command, plugin.name())
else:
self._disabled.remove(command)
conf.supybot.commands.disabled().remove(command)
irc.replySuccess()
except KeyError:
irc.error('That command wasn\'t disabled.')
enable = wrap(enable, [optional('plugin'), 'commandName'])
def rename(self, irc, msg, args, plugin, command, newName):
"""<plugin> <command> <new name>
Renames <command> in <plugin> to the <new name>.
"""
if not plugin.isCommand(command):
what = 'command in the %s plugin' % plugin.name()
irc.errorInvalid(what, command)
if hasattr(plugin, newName):
irc.error('The %s plugin already has an attribute named %s.' %
(plugin, newName))
return
registerRename(plugin.name(), command, newName)
renameCommand(plugin, command, newName)
irc.replySuccess()
rename = wrap(rename, ['plugin', 'commandName', 'commandName'])
def unrename(self, irc, msg, args, plugin):
"""<plugin>
Removes all renames in <plugin>. The plugin will be reloaded after
this command is run.
"""
try:
conf.supybot.commands.renames.unregister(plugin.name())
except registry.NonExistentRegistryEntry:
irc.errorInvalid('plugin', plugin.name())
self.reload(irc, msg, [plugin.name()]) # This makes the replySuccess.
unrename = wrap(unrename, ['plugin'])
Class = Owner
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
bsd-3-clause
|
sdg32/flask-celery3-boilerplate
|
migrations/env.py
|
557
|
2883
|
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import logging
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option('sqlalchemy.url',
current_app.config.get('SQLALCHEMY_DATABASE_URI'))
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.readthedocs.org/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
engine = engine_from_config(config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
mit
|
buguelos/odoo
|
addons/product/pricelist.py
|
7
|
26429
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from itertools import chain
import time
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.exceptions import except_orm
import openerp.addons.decimal_precision as dp
class price_type(osv.osv):
"""
The price type is used to points which field in the product form
is a price and in which currency is this price expressed.
When a field is a price, you can use it in pricelists to base
sale and purchase prices based on some fields of the product.
"""
def _price_field_get(self, cr, uid, context=None):
mf = self.pool.get('ir.model.fields')
ids = mf.search(cr, uid, [('model','in', (('product.product'),('product.template'))), ('ttype','=','float')], context=context)
res = []
for field in mf.browse(cr, uid, ids, context=context):
res.append((field.name, field.field_description))
return res
def _get_field_currency(self, cr, uid, fname, ctx):
ids = self.search(cr, uid, [('field','=',fname)], context=ctx)
return self.browse(cr, uid, ids, context=ctx)[0].currency_id
def _get_currency(self, cr, uid, ctx):
comp = self.pool.get('res.users').browse(cr,uid,uid).company_id
if not comp:
comp_id = self.pool.get('res.company').search(cr, uid, [])[0]
comp = self.pool.get('res.company').browse(cr, uid, comp_id)
return comp.currency_id.id
_name = "product.price.type"
_description = "Price Type"
_columns = {
"name" : fields.char("Price Name", required=True, translate=True, help="Name of this kind of price."),
"active" : fields.boolean("Active"),
"field" : fields.selection(_price_field_get, "Product Field", size=32, required=True, help="Associated field in the product form."),
"currency_id" : fields.many2one('res.currency', "Currency", required=True, help="The currency the field is expressed in."),
}
_defaults = {
"active": lambda *args: True,
"currency_id": _get_currency
}
#----------------------------------------------------------
# Price lists
#----------------------------------------------------------
class product_pricelist_type(osv.osv):
_name = "product.pricelist.type"
_description = "Pricelist Type"
_columns = {
'name': fields.char('Name', required=True, translate=True),
'key': fields.char('Key', required=True, help="Used in the code to select specific prices based on the context. Keep unchanged."),
}
class product_pricelist(osv.osv):
def _pricelist_type_get(self, cr, uid, context=None):
pricelist_type_obj = self.pool.get('product.pricelist.type')
pricelist_type_ids = pricelist_type_obj.search(cr, uid, [], order='name')
pricelist_types = pricelist_type_obj.read(cr, uid, pricelist_type_ids, ['key','name'], context=context)
res = []
for type in pricelist_types:
res.append((type['key'],type['name']))
return res
_name = "product.pricelist"
_description = "Pricelist"
_order = 'name'
_columns = {
'name': fields.char('Pricelist Name', required=True, translate=True),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the pricelist without removing it."),
'type': fields.selection(_pricelist_type_get, 'Pricelist Type', required=True),
'version_id': fields.one2many('product.pricelist.version', 'pricelist_id', 'Pricelist Versions', copy=True),
'currency_id': fields.many2one('res.currency', 'Currency', required=True),
'company_id': fields.many2one('res.company', 'Company'),
}
def name_get(self, cr, uid, ids, context=None):
result= []
if not all(ids):
return result
for pl in self.browse(cr, uid, ids, context=context):
name = pl.name + ' ('+ pl.currency_id.name + ')'
result.append((pl.id,name))
return result
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if name and operator == '=' and not args:
# search on the name of the pricelist and its currency, opposite of name_get(),
# Used by the magic context filter in the product search view.
query_args = {'name': name, 'limit': limit, 'lang': (context or {}).get('lang') or 'en_US'}
query = """SELECT p.id
FROM ((
SELECT pr.id, pr.name
FROM product_pricelist pr JOIN
res_currency cur ON
(pr.currency_id = cur.id)
WHERE pr.name || ' (' || cur.name || ')' = %(name)s
)
UNION (
SELECT tr.res_id as id, tr.value as name
FROM ir_translation tr JOIN
product_pricelist pr ON (
pr.id = tr.res_id AND
tr.type = 'model' AND
tr.name = 'product.pricelist,name' AND
tr.lang = %(lang)s
) JOIN
res_currency cur ON
(pr.currency_id = cur.id)
WHERE tr.value || ' (' || cur.name || ')' = %(name)s
)
) p
ORDER BY p.name"""
if limit:
query += " LIMIT %(limit)s"
cr.execute(query, query_args)
ids = [r[0] for r in cr.fetchall()]
# regular search() to apply ACLs - may limit results below limit in some cases
ids = self.search(cr, uid, [('id', 'in', ids)], limit=limit, context=context)
if ids:
return self.name_get(cr, uid, ids, context)
return super(product_pricelist, self).name_search(
cr, uid, name, args, operator=operator, context=context, limit=limit)
def _get_currency(self, cr, uid, ctx):
comp = self.pool.get('res.users').browse(cr, uid, uid).company_id
if not comp:
comp_id = self.pool.get('res.company').search(cr, uid, [])[0]
comp = self.pool.get('res.company').browse(cr, uid, comp_id)
return comp.currency_id.id
_defaults = {
'active': lambda *a: 1,
"currency_id": _get_currency
}
def price_get_multi(self, cr, uid, ids, products_by_qty_by_partner, context=None):
return dict((key, dict((key, price[0]) for key, price in value.items())) for key, value in self.price_rule_get_multi(cr, uid, ids, products_by_qty_by_partner, context=context).items())
def price_rule_get_multi(self, cr, uid, ids, products_by_qty_by_partner, context=None):
"""multi products 'price_get'.
@param ids:
@param products_by_qty:
@param partner:
@param context: {
'date': Date of the pricelist (%Y-%m-%d),}
@return: a dict of dict with product_id as key and a dict 'price by pricelist' as value
"""
if not ids:
ids = self.pool.get('product.pricelist').search(cr, uid, [], context=context)
results = {}
for pricelist in self.browse(cr, uid, ids, context=context):
subres = self._price_rule_get_multi(cr, uid, pricelist, products_by_qty_by_partner, context=context)
for product_id,price in subres.items():
results.setdefault(product_id, {})
results[product_id][pricelist.id] = price
return results
def _price_get_multi(self, cr, uid, pricelist, products_by_qty_by_partner, context=None):
return dict((key, price[0]) for key, price in self._price_rule_get_multi(cr, uid, pricelist, products_by_qty_by_partner, context=context).items())
def _price_rule_get_multi(self, cr, uid, pricelist, products_by_qty_by_partner, context=None):
context = context or {}
date = context.get('date') or time.strftime('%Y-%m-%d')
date = date[0:10]
products = map(lambda x: x[0], products_by_qty_by_partner)
currency_obj = self.pool.get('res.currency')
product_obj = self.pool.get('product.template')
product_uom_obj = self.pool.get('product.uom')
price_type_obj = self.pool.get('product.price.type')
if not products:
return {}
version = False
for v in pricelist.version_id:
if ((v.date_start is False) or (v.date_start <= date)) and ((v.date_end is False) or (v.date_end >= date)):
version = v
break
if not version:
raise osv.except_osv(_('Warning!'), _("At least one pricelist has no active version !\nPlease create or activate one."))
categ_ids = {}
for p in products:
categ = p.categ_id
while categ:
categ_ids[categ.id] = True
categ = categ.parent_id
categ_ids = categ_ids.keys()
is_product_template = products[0]._name == "product.template"
if is_product_template:
prod_tmpl_ids = [tmpl.id for tmpl in products]
# all variants of all products
prod_ids = [p.id for p in
list(chain.from_iterable([t.product_variant_ids for t in products]))]
else:
prod_ids = [product.id for product in products]
prod_tmpl_ids = [product.product_tmpl_id.id for product in products]
# Load all rules
cr.execute(
'SELECT i.id '
'FROM product_pricelist_item AS i '
'WHERE (product_tmpl_id IS NULL OR product_tmpl_id = any(%s)) '
'AND (product_id IS NULL OR (product_id = any(%s))) '
'AND ((categ_id IS NULL) OR (categ_id = any(%s))) '
'AND (price_version_id = %s) '
'ORDER BY sequence, min_quantity desc',
(prod_tmpl_ids, prod_ids, categ_ids, version.id))
item_ids = [x[0] for x in cr.fetchall()]
items = self.pool.get('product.pricelist.item').browse(cr, uid, item_ids, context=context)
price_types = {}
results = {}
for product, qty, partner in products_by_qty_by_partner:
results[product.id] = 0.0
rule_id = False
price = False
# Final unit price is computed according to `qty` in the `qty_uom_id` UoM.
# An intermediary unit price may be computed according to a different UoM, in
# which case the price_uom_id contains that UoM.
# The final price will be converted to match `qty_uom_id`.
qty_uom_id = context.get('uom') or product.uom_id.id
price_uom_id = product.uom_id.id
qty_in_product_uom = qty
if qty_uom_id != product.uom_id.id:
try:
qty_in_product_uom = product_uom_obj._compute_qty(
cr, uid, context['uom'], qty, product.uom_id.id or product.uos_id.id)
except except_orm:
# Ignored - incompatible UoM in context, use default product UoM
pass
for rule in items:
if rule.min_quantity and qty_in_product_uom < rule.min_quantity:
continue
if is_product_template:
if rule.product_tmpl_id and product.id != rule.product_tmpl_id.id:
continue
if rule.product_id and \
(product.product_variant_count > 1 or product.product_variant_ids[0].id != rule.product_id.id):
# product rule acceptable on template if has only one variant
continue
else:
if rule.product_tmpl_id and product.product_tmpl_id.id != rule.product_tmpl_id.id:
continue
if rule.product_id and product.id != rule.product_id.id:
continue
if rule.categ_id:
cat = product.categ_id
while cat:
if cat.id == rule.categ_id.id:
break
cat = cat.parent_id
if not cat:
continue
if rule.base == -1:
if rule.base_pricelist_id:
price_tmp = self._price_get_multi(cr, uid,
rule.base_pricelist_id, [(product,
qty, partner)], context=context)[product.id]
ptype_src = rule.base_pricelist_id.currency_id.id
price_uom_id = qty_uom_id
price = currency_obj.compute(cr, uid,
ptype_src, pricelist.currency_id.id,
price_tmp, round=False,
context=context)
elif rule.base == -2:
seller = False
for seller_id in product.seller_ids:
if (not partner) or (seller_id.name.id != partner):
continue
seller = seller_id
if not seller and product.seller_ids:
seller = product.seller_ids[0]
if seller:
qty_in_seller_uom = qty
seller_uom = seller.product_uom.id
if qty_uom_id != seller_uom:
qty_in_seller_uom = product_uom_obj._compute_qty(cr, uid, qty_uom_id, qty, to_uom_id=seller_uom)
price_uom_id = seller_uom
for line in seller.pricelist_ids:
if line.min_quantity <= qty_in_seller_uom:
price = line.price
else:
if rule.base not in price_types:
price_types[rule.base] = price_type_obj.browse(cr, uid, int(rule.base))
price_type = price_types[rule.base]
# price_get returns the price in the context UoM, i.e. qty_uom_id
price_uom_id = qty_uom_id
price = currency_obj.compute(
cr, uid,
price_type.currency_id.id, pricelist.currency_id.id,
product_obj._price_get(cr, uid, [product], price_type.field, context=context)[product.id],
round=False, context=context)
if price is not False:
price_limit = price
price = price * (1.0+(rule.price_discount or 0.0))
if rule.price_round:
price = tools.float_round(price, precision_rounding=rule.price_round)
convert_to_price_uom = (lambda price: product_uom_obj._compute_price(
cr, uid, product.uom_id.id,
price, price_uom_id))
if rule.price_surcharge:
price_surcharge = convert_to_price_uom(rule.price_surcharge)
price += price_surcharge
if rule.price_min_margin:
price_min_margin = convert_to_price_uom(rule.price_min_margin)
price = max(price, price_limit + price_min_margin)
if rule.price_max_margin:
price_max_margin = convert_to_price_uom(rule.price_max_margin)
price = min(price, price_limit + price_max_margin)
rule_id = rule.id
break
# Final price conversion to target UoM
price = product_uom_obj._compute_price(cr, uid, price_uom_id, price, qty_uom_id)
results[product.id] = (price, rule_id)
return results
def price_get(self, cr, uid, ids, prod_id, qty, partner=None, context=None):
return dict((key, price[0]) for key, price in self.price_rule_get(cr, uid, ids, prod_id, qty, partner=partner, context=context).items())
def price_rule_get(self, cr, uid, ids, prod_id, qty, partner=None, context=None):
product = self.pool.get('product.product').browse(cr, uid, prod_id, context=context)
res_multi = self.price_rule_get_multi(cr, uid, ids, products_by_qty_by_partner=[(product, qty, partner)], context=context)
res = res_multi[prod_id]
return res
class product_pricelist_version(osv.osv):
_name = "product.pricelist.version"
_description = "Pricelist Version"
_columns = {
'pricelist_id': fields.many2one('product.pricelist', 'Price List',
required=True, select=True, ondelete='cascade'),
'name': fields.char('Name', required=True, translate=True),
'active': fields.boolean('Active',
help="When a version is duplicated it is set to non active, so that the " \
"dates do not overlaps with original version. You should change the dates " \
"and reactivate the pricelist"),
'items_id': fields.one2many('product.pricelist.item',
'price_version_id', 'Price List Items', required=True, copy=True),
'date_start': fields.date('Start Date', help="First valid date for the version."),
'date_end': fields.date('End Date', help="Last valid date for the version."),
'company_id': fields.related('pricelist_id','company_id',type='many2one',
readonly=True, relation='res.company', string='Company', store=True)
}
_defaults = {
'active': lambda *a: 1,
}
def _check_date(self, cursor, user, ids, context=None):
for pricelist_version in self.browse(cursor, user, ids, context=context):
if not pricelist_version.active:
continue
where = []
if pricelist_version.date_start:
where.append("((date_end>='%s') or (date_end is null))" % (pricelist_version.date_start,))
if pricelist_version.date_end:
where.append("((date_start<='%s') or (date_start is null))" % (pricelist_version.date_end,))
cursor.execute('SELECT id ' \
'FROM product_pricelist_version ' \
'WHERE '+' and '.join(where) + (where and ' and ' or '')+
'pricelist_id = %s ' \
'AND active ' \
'AND id <> %s', (
pricelist_version.pricelist_id.id,
pricelist_version.id))
if cursor.fetchall():
return False
return True
_constraints = [
(_check_date, 'You cannot have 2 pricelist versions that overlap!',
['date_start', 'date_end'])
]
def copy(self, cr, uid, id, default=None, context=None):
# set active False to prevent overlapping active pricelist
# versions
if not default:
default = {}
default['active'] = False
return super(product_pricelist_version, self).copy(cr, uid, id, default, context=context)
class product_pricelist_item(osv.osv):
def _price_field_get(self, cr, uid, context=None):
pt = self.pool.get('product.price.type')
ids = pt.search(cr, uid, [], context=context)
result = []
for line in pt.browse(cr, uid, ids, context=context):
result.append((line.id, line.name))
result.append((-1, _('Other Pricelist')))
result.append((-2, _('Supplier Prices on the product form')))
return result
# Added default function to fetch the Price type Based on Pricelist type.
def _get_default_base(self, cr, uid, fields, context=None):
product_price_type_obj = self.pool.get('product.price.type')
if fields.get('type') == 'purchase':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field', '=', 'standard_price')], context=context)
elif fields.get('type') == 'sale':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','list_price')], context=context)
else:
return -1
if not product_price_type_ids:
return False
else:
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
return pricetype.id
_name = "product.pricelist.item"
_description = "Pricelist item"
_order = "sequence, min_quantity desc"
_defaults = {
'base': _get_default_base,
'min_quantity': lambda *a: 0,
'sequence': lambda *a: 5,
'price_discount': lambda *a: 0,
}
def _check_recursion(self, cr, uid, ids, context=None):
for obj_list in self.browse(cr, uid, ids, context=context):
if obj_list.base == -1:
main_pricelist = obj_list.price_version_id.pricelist_id.id
other_pricelist = obj_list.base_pricelist_id.id
if main_pricelist == other_pricelist:
return False
return True
def _check_margin(self, cr, uid, ids, context=None):
for item in self.browse(cr, uid, ids, context=context):
if item.price_max_margin and item.price_min_margin and (item.price_min_margin > item.price_max_margin):
return False
return True
_columns = {
'name': fields.char('Rule Name', help="Explicit rule name for this pricelist line."),
'price_version_id': fields.many2one('product.pricelist.version', 'Price List Version', required=True, select=True, ondelete='cascade'),
'product_tmpl_id': fields.many2one('product.template', 'Product Template', ondelete='cascade', help="Specify a template if this rule only applies to one product template. Keep empty otherwise."),
'product_id': fields.many2one('product.product', 'Product', ondelete='cascade', help="Specify a product if this rule only applies to one product. Keep empty otherwise."),
'categ_id': fields.many2one('product.category', 'Product Category', ondelete='cascade', help="Specify a product category if this rule only applies to products belonging to this category or its children categories. Keep empty otherwise."),
'min_quantity': fields.integer('Min. Quantity', required=True,
help="For the rule to apply, bought/sold quantity must be greater "
"than or equal to the minimum quantity specified in this field.\n"
"Expressed in the default UoM of the product."
),
'sequence': fields.integer('Sequence', required=True, help="Gives the order in which the pricelist items will be checked. The evaluation gives highest priority to lowest sequence and stops as soon as a matching item is found."),
'base': fields.selection(_price_field_get, 'Based on', required=True, size=-1, help="Base price for computation."),
'base_pricelist_id': fields.many2one('product.pricelist', 'Other Pricelist'),
'price_surcharge': fields.float('Price Surcharge',
digits_compute= dp.get_precision('Product Price'), help='Specify the fixed amount to add or substract(if negative) to the amount calculated with the discount.'),
'price_discount': fields.float('Price Discount', digits=(16,4)),
'price_round': fields.float('Price Rounding',
digits_compute= dp.get_precision('Product Price'),
help="Sets the price so that it is a multiple of this value.\n" \
"Rounding is applied after the discount and before the surcharge.\n" \
"To have prices that end in 9.99, set rounding 10, surcharge -0.01" \
),
'price_min_margin': fields.float('Min. Price Margin',
digits_compute= dp.get_precision('Product Price'), help='Specify the minimum amount of margin over the base price.'),
'price_max_margin': fields.float('Max. Price Margin',
digits_compute= dp.get_precision('Product Price'), help='Specify the maximum amount of margin over the base price.'),
'company_id': fields.related('price_version_id','company_id',type='many2one',
readonly=True, relation='res.company', string='Company', store=True)
}
_constraints = [
(_check_recursion, 'Error! You cannot assign the Main Pricelist as Other Pricelist in PriceList Item!', ['base_pricelist_id']),
(_check_margin, 'Error! The minimum margin should be lower than the maximum margin.', ['price_min_margin', 'price_max_margin'])
]
def product_id_change(self, cr, uid, ids, product_id, context=None):
if not product_id:
return {}
prod = self.pool.get('product.product').read(cr, uid, [product_id], ['code','name'])
if prod[0]['code']:
return {'value': {'name': prod[0]['code']}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
trudikampfschaf/flask-microblog
|
flask/lib/python2.7/site-packages/sqlalchemy/util/deprecations.py
|
34
|
3906
|
# util/deprecations.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Helpers related to deprecation of functions, methods, classes, other
functionality."""
from sqlalchemy import exc
import warnings
import re
from langhelpers import decorator
def warn_deprecated(msg, stacklevel=3):
warnings.warn(msg, exc.SADeprecationWarning, stacklevel=stacklevel)
def warn_pending_deprecation(msg, stacklevel=3):
warnings.warn(msg, exc.SAPendingDeprecationWarning, stacklevel=stacklevel)
def deprecated(version, message=None, add_deprecation_to_docstring=True):
"""Decorates a function and issues a deprecation warning on use.
:param message:
If provided, issue message in the warning. A sensible default
is used if not provided.
:param add_deprecation_to_docstring:
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = ".. deprecated:: %s %s" % \
(version, (message or ''))
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn, exc.SADeprecationWarning,
message % dict(func=fn.__name__), header)
return decorate
def pending_deprecation(version, message=None,
add_deprecation_to_docstring=True):
"""Decorates a function and issues a pending deprecation warning on use.
:param version:
An approximate future version at which point the pending deprecation
will become deprecated. Not used in messaging.
:param message:
If provided, issue message in the warning. A sensible default
is used if not provided.
:param add_deprecation_to_docstring:
Default True. If False, the wrapped function's __doc__ is left
as-is. If True, the 'message' is prepended to the docs if
provided, or sensible default if message is omitted.
"""
if add_deprecation_to_docstring:
header = ".. deprecated:: %s (pending) %s" % \
(version, (message or ''))
else:
header = None
if message is None:
message = "Call to deprecated function %(func)s"
def decorate(fn):
return _decorate_with_warning(
fn, exc.SAPendingDeprecationWarning,
message % dict(func=fn.__name__), header)
return decorate
def _sanitize_restructured_text(text):
def repl(m):
type_, name = m.group(1, 2)
if type_ in ("func", "meth"):
name += "()"
return name
return re.sub(r'\:(\w+)\:`~?\.?(.+?)`', repl, text)
def _decorate_with_warning(func, wtype, message, docstring_header=None):
"""Wrap a function with a warnings.warn and augmented docstring."""
message = _sanitize_restructured_text(message)
@decorator
def warned(fn, *args, **kwargs):
warnings.warn(wtype(message), stacklevel=3)
return fn(*args, **kwargs)
doc = func.__doc__ is not None and func.__doc__ or ''
if docstring_header is not None:
docstring_header %= dict(func=func.__name__)
docs = doc and doc.expandtabs().split('\n') or []
indent = ''
for line in docs[1:]:
text = line.lstrip()
if text:
indent = line[0:len(line) - len(text)]
break
point = min(len(docs), 1)
docs.insert(point, '\n' + indent + docstring_header.rstrip())
doc = '\n'.join(docs)
decorated = warned(func)
decorated.__doc__ = doc
return decorated
|
bsd-3-clause
|
ricardog/raster-project
|
projections/r2py/rparser.py
|
1
|
5180
|
from pyparsing import *
import re
ParserElement.enablePackrat()
from .tree import Node, Operator
import pdb
def rparser():
expr = Forward()
lparen = Literal("(").suppress()
rparen = Literal(")").suppress()
double = Word(nums + ".").setParseAction(lambda t:float(t[0]))
integer = pyparsing_common.signed_integer
number = pyparsing_common.number
ident = Word(initChars = alphas + "_", bodyChars = alphanums + "_" + ".")
string = dblQuotedString
funccall = Group(ident + lparen + Group(Optional(delimitedList(expr))) +
rparen + Optional(integer)).setResultsName("funccall")
operand = number | string | funccall | ident
expop = Literal('^')
multop = oneOf('* /')
plusop = oneOf('+ -')
introp = oneOf('| :')
expr << infixNotation(operand,
[(expop, 2, opAssoc.RIGHT),
(introp, 2, opAssoc.LEFT),
(multop, 2, opAssoc.LEFT),
(plusop, 2, opAssoc.LEFT),]).setResultsName('expr')
return expr
PARSER = rparser()
def parse(text):
def walk(l):
## ['log', [['cropland', '+', 1]]]
## ['poly', [['log', [['cropland', '+', 1]]], 3], 3]
## [[['factor', ['unSub'], 21], ':', ['poly', [['log', [['cropland', '+', 1]]], 3], 3], ':', ['poly', [['log', [['hpd', '+', 1]]], 3], 2]]]
if type(l) in (int, float):
return l
if isinstance(l, str):
if l == 'Intercept' or l == '"Intercept"':
return 1
elif l[0] == '"' and l[-1] == '"':
return l[1:-1]
else:
return l
if len(l) == 1 and type(l[0]) in (int, str, float, ParseResults):
return walk(l[0])
if l[0] == 'factor':
assert len(l) == 3, "unexpected number of arguments to factor"
assert len(l[1]) == 1, "argument to factor is an expression"
assert type(l[2]) == int, "second argument to factor is not an int"
return Node(Operator('=='), (Node(Operator('in'),
(l[1][0], 'float32[:]')), l[2]))
if l[0] == 'poly':
assert len(l) in (2, 3), "unexpected number of arguments to poly"
assert isinstance(l[1][1], int), "degree argument to poly is not an int"
inner = walk(l[1][0])
degree = l[1][1]
if len(l) == 2:
pwr = 1
else:
assert type(l[2]) == int, "power argument to poly is not an int"
pwr = l[2]
return Node(Operator('sel'), (Node(Operator('poly'), (inner, degree)),
pwr))
if l[0] == 'log':
assert len(l) == 2, "unexpected number of arguments to log"
args = walk(l[1])
return Node(Operator('log'), [args])
if l[0] == 'scale':
assert len(l[1]) in (3, 5), "unexpected number of arguments to scale"
args = walk(l[1][0])
return Node(Operator('scale'), [args] + l[1][1:])
if l[0] == 'I':
assert len(l) == 2, "unexpected number of arguments to I"
args = walk(l[1])
return Node(Operator('I'), [args])
# Only used for testing
if l[0] in ('sin', 'tan'):
assert len(l) == 2, "unexpected number of arguments to %s" % l[0]
args = walk(l[1])
return Node(Operator(l[0]), [args])
if l[0] in ('max', 'min', 'pow'):
assert len(l) == 2, "unexpected number of arguments to %s" % l[0]
assert len(l[1]) == 2, "unexpected number of arguments to %s" % l[0]
left = walk(l[1][0])
right = walk(l[1][1])
return Node(Operator(l[0]), (left, right))
if l[0] == 'exp':
assert len(l) == 2, "unexpected number of arguments to exp"
args = walk(l[1])
return Node(Operator('exp'), [args])
if l[0] == 'clip':
assert len(l) == 2, "unexpected number of arguments to %s" % l[0]
assert len(l[1]) == 3, "unexpected number of arguments to %s" % l[0]
left = walk(l[1][0])
low = walk(l[1][1])
high = walk(l[1][2])
return Node(Operator(l[0]), (left, low, high))
if l[0] == 'inv_logit':
assert len(l) == 2, "unexpected number of arguments to inv_logit"
args = walk(l[1])
return Node(Operator('inv_logit'), [args])
## Only binary operators left
if len(l) == 1:
pdb.set_trace()
pass
assert len(l) % 2 == 1, "unexpected number of arguments for binary operator"
assert len(l) != 1, "unexpected number of arguments for binary operator"
## FIXME: this only works for associative operators. Need to either
## special-case division or include an attribute that specifies
## whether the op is associative.
left = walk(l.pop(0))
op = l.pop(0)
right = walk(l)
if type(right) != Node:
return Node(Operator(op), (left, right))
elif right.type.type == op:
return Node(Operator(op), (left, ) + right.args)
return Node(Operator(op), (left, right))
### FIXME: hack
if not isinstance(text, str):
text = str(text)
new_text = re.sub('newrange = c\((\d), (\d+)\)', '\\1, \\2', text)
new_text = new_text.replace('rescale(', 'scale(')
nodes = PARSER.parseString(new_text, parseAll=True)
tree = walk(nodes)
if isinstance(tree, (str, int, float)):
tree = Node(Operator('I'), [tree])
return tree
|
apache-2.0
|
doduytrung/odoo-8.0
|
addons/website/tests/test_views.py
|
221
|
8517
|
# -*- coding: utf-8 -*-
import itertools
import unittest2
from lxml import etree as ET, html
from lxml.html import builder as h
from openerp.tests import common
def attrs(**kwargs):
return dict(('data-oe-%s' % key, str(value)) for key, value in kwargs.iteritems())
class TestViewSaving(common.TransactionCase):
def eq(self, a, b):
self.assertEqual(a.tag, b.tag)
self.assertEqual(a.attrib, b.attrib)
self.assertEqual((a.text or '').strip(), (b.text or '').strip())
self.assertEqual((a.tail or '').strip(), (b.tail or '').strip())
for ca, cb in itertools.izip_longest(a, b):
self.eq(ca, cb)
def setUp(self):
super(TestViewSaving, self).setUp()
self.arch = h.DIV(
h.DIV(
h.H3("Column 1"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("Item 1"),
h.LI(h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char'))),
h.LI(h.SPAN("+00 00 000 00 0 000", attrs(model='res.company', id=1, field='phone', type='char')))
))
)
self.view_id = self.registry('ir.ui.view').create(self.cr, self.uid, {
'name': "Test View",
'type': 'qweb',
'arch': ET.tostring(self.arch, encoding='utf-8').decode('utf-8')
})
def test_embedded_extraction(self):
fields = self.registry('ir.ui.view').extract_embedded_fields(
self.cr, self.uid, self.arch, context=None)
expect = [
h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char')),
h.SPAN("+00 00 000 00 0 000", attrs(model='res.company', id=1, field='phone', type='char')),
]
for actual, expected in itertools.izip_longest(fields, expect):
self.eq(actual, expected)
def test_embedded_save(self):
embedded = h.SPAN("+00 00 000 00 0 000", attrs(
model='res.company', id=1, field='phone', type='char'))
self.registry('ir.ui.view').save_embedded_field(self.cr, self.uid, embedded)
company = self.registry('res.company').browse(self.cr, self.uid, 1)
self.assertEqual(company.phone, "+00 00 000 00 0 000")
@unittest2.skip("save conflict for embedded (saved by third party or previous version in page) not implemented")
def test_embedded_conflict(self):
e1 = h.SPAN("My Company", attrs(model='res.company', id=1, field='name'))
e2 = h.SPAN("Leeroy Jenkins", attrs(model='res.company', id=1, field='name'))
View = self.registry('ir.ui.view')
View.save_embedded_field(self.cr, self.uid, e1)
# FIXME: more precise exception
with self.assertRaises(Exception):
View.save_embedded_field(self.cr, self.uid, e2)
def test_embedded_to_field_ref(self):
View = self.registry('ir.ui.view')
embedded = h.SPAN("My Company", attrs(expression="bob"))
self.eq(
View.to_field_ref(self.cr, self.uid, embedded, context=None),
h.SPAN({'t-field': 'bob'})
)
def test_to_field_ref_keep_attributes(self):
View = self.registry('ir.ui.view')
att = attrs(expression="bob", model="res.company", id=1, field="name")
att['id'] = "whop"
att['class'] = "foo bar"
embedded = h.SPAN("My Company", att)
self.eq(View.to_field_ref(self.cr, self.uid, embedded, context=None),
h.SPAN({'t-field': 'bob', 'class': 'foo bar', 'id': 'whop'}))
def test_replace_arch(self):
replacement = h.P("Wheee")
result = self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, None, replacement)
self.eq(result, h.DIV("Wheee"))
def test_replace_arch_2(self):
replacement = h.DIV(h.P("Wheee"))
result = self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, None, replacement)
self.eq(result, replacement)
def test_fixup_arch(self):
replacement = h.H1("I am the greatest title alive!")
result = self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, '/div/div[1]/h3',
replacement)
self.eq(result, h.DIV(
h.DIV(
h.H3("I am the greatest title alive!"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("Item 1"),
h.LI(h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char'))),
h.LI(h.SPAN("+00 00 000 00 0 000", attrs(model='res.company', id=1, field='phone', type='char')))
))
))
def test_multiple_xpath_matches(self):
with self.assertRaises(ValueError):
self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, '/div/div/h3',
h.H6("Lol nope"))
def test_save(self):
Company = self.registry('res.company')
View = self.registry('ir.ui.view')
replacement = ET.tostring(h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("wob wob wob"),
h.LI(h.SPAN("Acme Corporation", attrs(model='res.company', id=1, field='name', expression="bob", type='char'))),
h.LI(h.SPAN("+12 3456789", attrs(model='res.company', id=1, field='phone', expression="edmund", type='char'))),
)
), encoding='utf-8')
View.save(self.cr, self.uid, res_id=self.view_id, value=replacement,
xpath='/div/div[2]')
company = Company.browse(self.cr, self.uid, 1)
self.assertEqual(company.name, "Acme Corporation")
self.assertEqual(company.phone, "+12 3456789")
self.eq(
ET.fromstring(View.browse(self.cr, self.uid, self.view_id).arch.encode('utf-8')),
h.DIV(
h.DIV(
h.H3("Column 1"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("wob wob wob"),
h.LI(h.SPAN({'t-field': "bob"})),
h.LI(h.SPAN({'t-field': "edmund"}))
))
)
)
def test_save_only_embedded(self):
Company = self.registry('res.company')
company_id = 1
Company.write(self.cr, self.uid, company_id, {'name': "Foo Corporation"})
node = html.tostring(h.SPAN(
"Acme Corporation",
attrs(model='res.company', id=company_id, field="name", expression='bob', type='char')))
self.registry('ir.ui.view').save(self.cr, self.uid, res_id=company_id,value=node)
company = Company.browse(self.cr, self.uid, company_id)
self.assertEqual(company.name, "Acme Corporation")
def test_field_tail(self):
View = self.registry('ir.ui.view')
replacement = ET.tostring(
h.LI(h.SPAN("+12 3456789", attrs(
model='res.company', id=1, type='char',
field='phone', expression="edmund")),
"whop whop"
), encoding="utf-8")
View.save(self.cr, self.uid, res_id = self.view_id, value=replacement,
xpath='/div/div[2]/ul/li[3]')
self.eq(
ET.fromstring(View.browse(self.cr, self.uid, self.view_id).arch.encode('utf-8')),
h.DIV(
h.DIV(
h.H3("Column 1"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("Item 1"),
h.LI(h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char'))),
h.LI(h.SPAN({'t-field': "edmund"}), "whop whop"),
))
)
)
|
agpl-3.0
|
shepdelacreme/ansible
|
lib/ansible/modules/cloud/amazon/aws_sgw_facts.py
|
21
|
11555
|
#!/usr/bin/python
# Copyright: (c) 2018, Loic BLOT (@nerzhul) <loic.blot@unix-experience.fr>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This module is sponsored by E.T.A.I. (www.etai.fr)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_sgw_facts
short_description: Fetch AWS Storage Gateway facts
description:
- Fetch AWS Storage Gateway facts
version_added: "2.6"
requirements: [ boto3 ]
author: Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
options:
gather_local_disks:
description:
- Gather local disks attached to the storage gateway.
type: bool
required: false
default: true
gather_tapes:
description:
- Gather tape information for storage gateways in tape mode.
type: bool
required: false
default: true
gather_file_shares:
description:
- Gather file share information for storage gateways in s3 mode.
type: bool
required: false
default: true
gather_volumes:
description:
- Gather volume information for storage gateways in iSCSI (cached & stored) modes.
type: bool
required: false
default: true
extends_documentation_fragment:
- aws
- ec2
'''
RETURN = '''
gateways:
description: list of gateway objects
returned: always
type: complex
contains:
gateway_arn:
description: "Storage Gateway ARN"
returned: always
type: string
sample: "arn:aws:storagegateway:eu-west-1:367709993819:gateway/sgw-9999F888"
gateway_id:
description: "Storage Gateway ID"
returned: always
type: string
sample: "sgw-9999F888"
gateway_name:
description: "Storage Gateway friendly name"
returned: always
type: string
sample: "my-sgw-01"
gateway_operational_state:
description: "Storage Gateway operational state"
returned: always
type: string
sample: "ACTIVE"
gateway_type:
description: "Storage Gateway type"
returned: always
type: string
sample: "FILE_S3"
file_shares:
description: "Storage gateway file shares"
returned: when gateway_type == "FILE_S3"
type: complex
contains:
file_share_arn:
description: "File share ARN"
returned: always
type: string
sample: "arn:aws:storagegateway:eu-west-1:399805793479:share/share-AF999C88"
file_share_id:
description: "File share ID"
returned: always
type: string
sample: "share-AF999C88"
file_share_status:
description: "File share status"
returned: always
type: string
sample: "AVAILABLE"
tapes:
description: "Storage Gateway tapes"
returned: when gateway_type == "VTL"
type: complex
contains:
tape_arn:
description: "Tape ARN"
returned: always
type: string
sample: "arn:aws:storagegateway:eu-west-1:399805793479:tape/tape-AF999C88"
tape_barcode:
description: "Tape ARN"
returned: always
type: string
sample: "tape-AF999C88"
tape_size_in_bytes:
description: "Tape ARN"
returned: always
type: integer
sample: 555887569
tape_status:
description: "Tape ARN"
returned: always
type: string
sample: "AVAILABLE"
local_disks:
description: "Storage gateway local disks"
returned: always
type: complex
contains:
disk_allocation_type:
description: "Disk allocation type"
returned: always
type: string
sample: "CACHE STORAGE"
disk_id:
description: "Disk ID on the system"
returned: always
type: string
sample: "pci-0000:00:1f.0"
disk_node:
description: "Disk parent block device"
returned: always
type: string
sample: "/dev/sdb"
disk_path:
description: "Disk path used for the cache"
returned: always
type: string
sample: "/dev/nvme1n1"
disk_size_in_bytes:
description: "Disk size in bytes"
returned: always
type: integer
sample: 107374182400
disk_status:
description: "Disk status"
returned: always
type: string
sample: "present"
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: "Get AWS storage gateway facts"
aws_sgw_facts:
- name: "Get AWS storage gateway facts for region eu-west-3"
aws_sgw_facts:
region: eu-west-3
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by imported HAS_BOTO3
class SGWFactsManager(object):
def __init__(self, client, module):
self.client = client
self.module = module
self.name = self.module.params.get('name')
def fetch(self):
gateways = self.list_gateways()
for gateway in gateways:
if self.module.params.get('gather_local_disks'):
self.list_local_disks(gateway)
# File share gateway
if gateway["gateway_type"] == "FILE_S3" and self.module.params.get('gather_file_shares'):
self.list_gateway_file_shares(gateway)
# Volume tape gateway
elif gateway["gateway_type"] == "VTL" and self.module.params.get('gather_tapes'):
self.list_gateway_vtl(gateway)
# iSCSI gateway
elif gateway["gateway_type"] in ["CACHED", "STORED"] and self.module.params.get('gather_volumes'):
self.list_gateway_volumes(gateway)
self.module.exit_json(gateways=gateways)
"""
List all storage gateways for the AWS endpoint.
"""
def list_gateways(self):
try:
paginator = self.client.get_paginator('list_gateways')
response = paginator.paginate(
PaginationConfig={
'PageSize': 100,
}
).build_full_result()
gateways = []
for gw in response["Gateways"]:
gateways.append(camel_dict_to_snake_dict(gw))
return gateways
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Couldn't list storage gateways")
"""
Read file share objects from AWS API response.
Drop the gateway_arn attribute from response, as it will be duplicate with parent object.
"""
@staticmethod
def _read_gateway_fileshare_response(fileshares, aws_reponse):
for share in aws_reponse["FileShareInfoList"]:
share_obj = camel_dict_to_snake_dict(share)
if "gateway_arn" in share_obj:
del share_obj["gateway_arn"]
fileshares.append(share_obj)
return aws_reponse["NextMarker"] if "NextMarker" in aws_reponse else None
"""
List file shares attached to AWS storage gateway when in S3 mode.
"""
def list_gateway_file_shares(self, gateway):
try:
response = self.client.list_file_shares(
GatewayARN=gateway["gateway_arn"],
Limit=100
)
gateway["file_shares"] = []
marker = self._read_gateway_fileshare_response(gateway["file_shares"], response)
while marker is not None:
response = self.client.list_file_shares(
GatewayARN=gateway["gateway_arn"],
Marker=marker,
Limit=100
)
marker = self._read_gateway_fileshare_response(gateway["file_shares"], response)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Couldn't list gateway file shares")
"""
List storage gateway local disks
"""
def list_local_disks(self, gateway):
try:
gateway['local_disks'] = [camel_dict_to_snake_dict(disk) for disk in
self.client.list_local_disks(GatewayARN=gateway["gateway_arn"])['Disks']]
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Couldn't list storage gateway local disks")
"""
Read tape objects from AWS API response.
Drop the gateway_arn attribute from response, as it will be duplicate with parent object.
"""
@staticmethod
def _read_gateway_tape_response(tapes, aws_response):
for tape in aws_response["TapeInfos"]:
tape_obj = camel_dict_to_snake_dict(tape)
if "gateway_arn" in tape_obj:
del tape_obj["gateway_arn"]
tapes.append(tape_obj)
return aws_response["Marker"] if "Marker" in aws_response else None
"""
List VTL & VTS attached to AWS storage gateway in VTL mode
"""
def list_gateway_vtl(self, gateway):
try:
response = self.client.list_tapes(
Limit=100
)
gateway["tapes"] = []
marker = self._read_gateway_tape_response(gateway["tapes"], response)
while marker is not None:
response = self.client.list_tapes(
Marker=marker,
Limit=100
)
marker = self._read_gateway_tape_response(gateway["tapes"], response)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Couldn't list storage gateway tapes")
"""
List volumes attached to AWS storage gateway in CACHED or STORAGE mode
"""
def list_gateway_volumes(self, gateway):
try:
paginator = self.client.get_paginator('list_volumes')
response = paginator.paginate(
GatewayARN=gateway["gateway_arn"],
PaginationConfig={
'PageSize': 100,
}
).build_full_result()
gateway["volumes"] = []
for volume in response["VolumeInfos"]:
volume_obj = camel_dict_to_snake_dict(volume)
if "gateway_arn" in volume_obj:
del volume_obj["gateway_arn"]
if "gateway_id" in volume_obj:
del volume_obj["gateway_id"]
gateway["volumes"].append(volume_obj)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e, msg="Couldn't list storage gateway volumes")
def main():
argument_spec = dict(
gather_local_disks=dict(type='bool', default=True),
gather_tapes=dict(type='bool', default=True),
gather_file_shares=dict(type='bool', default=True),
gather_volumes=dict(type='bool', default=True)
)
module = AnsibleAWSModule(argument_spec=argument_spec)
client = module.client('storagegateway')
if client is None: # this should never happen
module.fail_json(msg='Unknown error, failed to create storagegateway client, no information from boto.')
SGWFactsManager(client, module).fetch()
if __name__ == '__main__':
main()
|
gpl-3.0
|
rajrakeshdr/pychess
|
lib/pychess/Utils/lutils/egtb_gaviota.py
|
22
|
9260
|
from __future__ import absolute_import
import os
import re
import sys
import platform
from ctypes import *
from .bitboard import firstBit, clearBit
from .lmovegen import genAllMoves, genCheckEvasions
from pychess.Utils.const import *
from pychess.System import conf
from pychess.System.prefix import addDataPrefix, getDataPrefix
from pychess.System.Log import log
class TB_STATS(Structure):
_fields_ = [
( 'wdl_easy_hits' , c_ulong*2 ),
( 'wdl_hard_prob' , c_ulong*2 ),
( 'wdl_soft_prob' , c_ulong*2 ),
( 'wdl_cachesize' , c_size_t ),
( 'wdl_occupancy' , c_double ),
( 'dtm_easy_hits' , c_ulong*2 ),
( 'dtm_hard_prob' , c_ulong*2 ),
( 'dtm_soft_prob' , c_ulong*2 ),
( 'dtm_cachesize' , c_size_t ),
( 'dtm_occupancy' , c_double ),
( 'total_hits' , c_ulong*2 ),
( 'memory_hits' , c_ulong*2 ),
( 'drive_hits' , c_ulong*2 ),
( 'drive_miss' , c_ulong*2 ),
( 'bytes_read' , c_ulong*2 ),
( 'files_opened' , c_ulong ),
( 'memory_efficiency', c_double ),
]
class egtb_gaviota:
def __init__ (self):
self.libgtb = None
self.initialized = False
# Get a list of files in the tablebase folder.
configuredTbPath = conf.get("egtb_path", "")
tbPath = configuredTbPath or getDataPrefix()
try:
tbPathContents = os.listdir(tbPath)
except OSError as e:
if configuredTbPath:
log.warning("Unable to open Gaviota TB folder: %s" % repr(e))
return
# Find files named *.gtb.cp# and pick the most common "#".
# (This is the compression scheme; the library currently only uses one at a time.)
schemeCount = [0] * 10
for filename in tbPathContents:
match = re.search("\.gtb\.cp(\d)$", filename)
if match:
schemeCount[int(match.group(1))] += 1
compressionScheme = max(zip(schemeCount, range(10)))
if compressionScheme[0] == 0:
if configuredTbPath:
log.warning("Could not find any Gaviota TB files in %s" % configuredTbPath)
return
compressionScheme = compressionScheme[1]
# Locate and load the library.
if not self._loadLibrary():
return
self._setupFunctionPrototypes()
self.pathList = self.tbpaths_init()
self.pathList = self.tbpaths_add(self.pathList, tbPath.encode())
initInfo = self.tb_init(True, compressionScheme, self.pathList)
self.initialized = ( self.tb_is_initialized() != 0 )
if not self.initialized:
log.warning(initInfo or "Failed to initialize Gaviota EGTB library")
self.pathList = self.tbpaths_done(self.pathList)
return
elif initInfo:
log.info(initInfo)
# TODO: Set up a WDL cache area once the engine can use it.
self.initialized &= self.tbcache_init(4*1024*1024, 0)
if not self.initialized:
log.warning("Failed to initialize Gaviota EGTB cache")
self.tb_done()
self.pathList = self.tbpaths_done(self.pathList)
return
self.availability = self.tb_availability()
def _del (self):
if self.initialized:
self.tb_done()
self.pathList = self.tbpaths_done(self.pathList)
def supports (self, size):
return self.initialized and ( \
sum(size) <= 2 or \
(self.availability & (3 << (2*sum(size)-6))) != 0 )
def scoreAllMoves (self, board):
result, depth = self.scoreGame(board, False, False)
if result is None:
return []
scores = []
gen = board.isChecked() and genCheckEvasions or genAllMoves
for move in gen(board):
board.applyMove(move)
if not board.opIsChecked():
result, depth = self.scoreGame(board, False, False)
if result is None:
log.warning("An EGTB file does not have all its dependencies")
board.popMove()
return []
scores.append( (move, result, depth) )
board.popMove()
def mateScore(mrd):
if mrd[1] == DRAW:
return 0
absScore = 32767 - mrd[2]
if (board.color == WHITE) ^ (mrd[1] == WHITEWON):
return absScore
return -absScore
scores.sort(key=mateScore)
return scores
def scoreGame(self, board, omitDepth, probeSoft):
stm = board.color
epsq = board.enpassant or 64 # 64 is tb_NOSQUARE
castles = (board.castling >> 2 & 3) | (board.castling << 2 & 12)
tbinfo = c_uint()
depth = c_uint()
SqArray= c_uint * 65
PcArray= c_byte * 65
pc, sq = [], []
for color in (WHITE, BLACK):
sq.append(SqArray())
pc.append(PcArray())
i = 0
bb = board.friends[color]
while bb:
b = firstBit(bb)
bb = clearBit(bb, b)
sq[-1][i] = b
pc[-1][i] = board.arBoard[b]
i += 1
sq[-1][i] = 64 # tb_NOSQUARE, terminates the list
pc[-1][i] = 0 # tb_NOPIECE, terminates the list
if omitDepth and probeSoft:
ok = self.tb_probe_WDL_soft(stm, epsq, castles, sq[WHITE], sq[BLACK], pc[WHITE], pc[BLACK], byref(tbinfo))
elif omitDepth and not probeSoft:
ok = self.tb_probe_WDL_hard(stm, epsq, castles, sq[WHITE], sq[BLACK], pc[WHITE], pc[BLACK], byref(tbinfo))
elif not omitDepth and probeSoft:
ok = self.tb_probe_soft (stm, epsq, castles, sq[WHITE], sq[BLACK], pc[WHITE], pc[BLACK], byref(tbinfo), byref(depth))
elif not omitDepth and not probeSoft:
ok = self.tb_probe_hard (stm, epsq, castles, sq[WHITE], sq[BLACK], pc[WHITE], pc[BLACK], byref(tbinfo), byref(depth))
resultMap = [ DRAW, WHITEWON, BLACKWON ]
if not ok or not 0 <= tbinfo.value <= 2:
return None, None
result = resultMap[tbinfo.value]
if omitDepth or result == DRAW:
depth = None
else:
depth = depth.value
return result, depth
def _loadLibrary (self):
libName = "libgaviotatb.so.1.0.1"
try:
self.libgtb = CDLL(libName)
except OSError:
log.warning("Failed to load Gaviota EGTB library %s" % libName)
return None
return self.libgtb
# Prototypes from gtb-probe.h follow.
def _setupFunctionPrototypes (self):
def proto(name, returnType, *args):
argTypes = map(lambda x: x[0], args)
argNames = map(lambda x: x[1], args)
funcType = CFUNCTYPE(returnType, *argTypes)
paramFlags = tuple(zip([1] * len(args), argNames))
setattr(self, name, funcType((name, self.libgtb), paramFlags))
paths_t = POINTER(c_char_p)
uip = POINTER(c_uint)
ucp = POINTER(c_byte)
proto("tb_init" , c_char_p, (c_int, "verbosity"), (c_int, "compression_scheme"), (paths_t, "paths"))
proto("tb_restart", c_char_p, (c_int, "verbosity"), (c_int, "compression_scheme"), (paths_t, "paths"))
proto("tb_done", None)
proto("tb_probe_hard", c_int, (c_uint, "stm"), (c_uint, "epsq"), (c_uint, "castles"), (uip, "wSQ"), (uip, "bSQ"), (ucp, "wPC"), (ucp, "bPC"), (uip, "tbinfo"), (uip, "plies"))
proto("tb_probe_soft", c_int, (c_uint, "stm"), (c_uint, "epsq"), (c_uint, "castles"), (uip, "wSQ"), (uip, "bSQ"), (ucp, "wPC"), (ucp, "bPC"), (uip, "tbinfo"), (uip, "plies"))
proto("tb_probe_WDL_hard", c_int, (c_uint, "stm"), (c_uint, "epsq"), (c_uint, "castles"), (uip, "wSQ"), (uip, "bSQ"), (ucp, "wPC"), (ucp, "bPC"), (uip, "tbinfo"))
proto("tb_probe_WDL_soft", c_int, (c_uint, "stm"), (c_uint, "epsq"), (c_uint, "castles"), (uip, "wSQ"), (uip, "bSQ"), (ucp, "wPC"), (ucp, "bPC"), (uip, "tbinfo"))
proto("tb_is_initialized", c_int)
proto("tb_availability", c_uint)
proto("tb_indexmemory", c_size_t)
proto("tbcache_init" , c_int, (c_size_t, "cache_mem"), (c_int, "wdl_fraction"))
proto("tbcache_restart", c_int, (c_size_t, "cache_mem"), (c_int, "wdl_fraction"))
proto("tbcache_done", None)
proto("tbcache_is_on", c_int)
proto("tbcache_flush", None)
proto("tbstats_reset", None)
proto("tbstats_get", None, (POINTER(TB_STATS), "stats"))
proto("tbpaths_init", paths_t)
proto("tbpaths_add", paths_t, (paths_t, "ps"), (c_char_p, "newpath"))
proto("tbpaths_done", paths_t, (paths_t, "ps"))
proto("tbpaths_getmain", c_char_p)
|
gpl-3.0
|
scdoshi/djutils
|
djutils/gis.py
|
1
|
2346
|
"""
GIS: GIS related utilities.
"""
###############################################################################
## Imports
###############################################################################
import math
###############################################################################
## GIS Format Conversions
###############################################################################
def GPRMC2DegDec(lat, latDirn, lng, lngDirn):
"""Converts GPRMC formats (Decimal Minutes) to Degrees Decimal
Eg.
"""
x = float(lat[0:2]) + float(lat[2:]) / 60
y = float(lng[0:3]) + float(lng[3:]) / 60
if latDirn == 'S':
x = -x
if lngDirn == 'W':
y = -y
return x, y
def TinyGPS2DegDec(lat, lng):
"""Converts TinyGPS formats (Decimal Degrees to e-5) to Degrees Decimal
Eg.
"""
x = float(lat[:-5] + '.' + lat[-5:])
y = float(lng[:-5] + '.' + lng[-5:])
return x, y
###############################################################################
## Functions to convert miles to change in lat, long (approx)
###############################################################################
# Distances are measured in miles.
# Longitudes and latitudes are measured in degrees.
# Earth is assumed to be perfectly spherical.
earth_radius = 3960.0
degrees_to_radians = math.pi / 180.0
radians_to_degrees = 180.0 / math.pi
def ChangeInLatitude(miles):
"""Given a distance north, return the change in latitude."""
return (miles / earth_radius) * radians_to_degrees
def ChangeInLongitude(lat, miles):
"""Given a latitude and a distance west, return the change in longitude."""
# Find the radius of a circle around the earth at given latitude.
r = earth_radius * math.cos(lat * degrees_to_radians)
return (miles / r) * radians_to_degrees
def CalculateBoundingBox(lng, lat, miles):
"""
Given a latitude, longitude and a distance in miles, calculate
the co-ordinates of the bounding box 2*miles on long each side with the
given co-ordinates at the center.
"""
latChange = ChangeInLatitude(miles)
latSouth = lat - latChange
latNorth = lat + latChange
lngChange = ChangeInLongitude(lat, miles)
lngWest = lng + lngChange
lngEast = lng - lngChange
return (lngWest, latSouth, lngEast, latNorth)
|
bsd-3-clause
|
Z2PackDev/TBmodels
|
tests/test_hdf5.py
|
1
|
3921
|
#!/usr/bin/env python
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""Tests saving and loading to HDF5 format."""
import tempfile
import pytest
import numpy as np
import tbmodels
KWARGS = [
dict(),
dict(pos=None, dim=3),
dict(uc=3 * np.eye(3)),
dict(pos=np.zeros((2, 3)), uc=np.eye(3)),
]
@pytest.mark.parametrize("kwargs", KWARGS)
def test_hdf5_consistency_file(get_model, models_equal, kwargs):
"""
Test that a tight-binding model remains the same after saving to
HDF5 format and loading, using the Model methods.
"""
model1 = get_model(0.1, 0.2, **kwargs)
with tempfile.NamedTemporaryFile() as tmpf:
model1.to_hdf5_file(tmpf.name)
model2 = tbmodels.Model.from_hdf5_file(tmpf.name)
models_equal(model1, model2)
@pytest.mark.parametrize("kwargs", KWARGS)
def test_hdf5_consistency_freefunc(get_model, models_equal, kwargs):
"""
Test that a tight-binding model remains the same after saving to
HDF5 and loading, using the free `io` functions.
"""
model1 = get_model(0.1, 0.2, **kwargs)
with tempfile.NamedTemporaryFile() as tmpf:
tbmodels.io.save(model1, tmpf.name)
model2 = tbmodels.io.load(tmpf.name)
models_equal(model1, model2)
@pytest.fixture(params=["InAs_nosym.hdf5"])
def hdf5_sample(sample, request):
"""Fixture which provides the filename of a HDF5 tight-binding model."""
return sample(request.param)
@pytest.fixture(params=["InAs_nosym_legacy.hdf5"])
def hdf5_sample_legacy(sample, request):
"""
Fixture which provides the filename of a HDF5 tight-binding model,
in legacy format.
"""
return sample(request.param)
def test_hdf5_load_freefunc(hdf5_sample): # pylint: disable=redefined-outer-name
"""Test that a HDF5 file can be loaded with the `io.load` function."""
res = tbmodels.io.load(hdf5_sample)
assert isinstance(res, tbmodels.Model)
def test_hdf5_load_method(hdf5_sample): # pylint: disable=redefined-outer-name
"""Test that a HDF5 file can be loaded with the Model method."""
res = tbmodels.Model.from_hdf5_file(hdf5_sample)
assert isinstance(res, tbmodels.Model)
def test_hdf5_load_freefunc_legacy(
hdf5_sample_legacy,
): # pylint: disable=redefined-outer-name
"""Test that a HDF5 file in legacy format can be loaded with the `io.load` function."""
with pytest.deprecated_call():
res = tbmodels.io.load(hdf5_sample_legacy)
assert isinstance(res, tbmodels.Model)
def test_hdf5_load_method_legacy(
hdf5_sample_legacy,
): # pylint: disable=redefined-outer-name
"""Test that a HDF5 file in legacy format can be loaded with the Model method."""
with pytest.deprecated_call():
res = tbmodels.Model.from_hdf5_file(hdf5_sample_legacy)
assert isinstance(res, tbmodels.Model)
def test_hdf5_kdotp(kdotp_models_equal):
"""Test that k.p models can be saved / loaded to HDF5."""
kp_model = tbmodels.kdotp.KdotpModel(
{(1, 0): [[0.1, 0.2j], [-0.2j, 0.3]], (0, 0): np.eye(2)}
)
with tempfile.NamedTemporaryFile() as tmpf:
tbmodels.io.save(kp_model, tmpf.name)
model2 = tbmodels.io.load(tmpf.name)
kdotp_models_equal(kp_model, model2)
def test_generic_legacy_object(sample):
"""Test that a generic object in legacy format can be loaded."""
filename = sample("legacy_general_object.hdf5")
with pytest.deprecated_call():
res = tbmodels.io.load(filename)
assert res == [2, [3, 4], 2.3]
def test_invalid_hdf5_file(sample):
"""
Check that trying to load a file with invalid structure raises the
expected error.
"""
filename = sample("invalid_file_structure.hdf5")
with pytest.deprecated_call(), pytest.raises(ValueError) as excinfo:
tbmodels.io.load(filename)
assert "File structure not understood" in str(excinfo.value)
|
apache-2.0
|
manderson23/NewsBlur
|
apps/reader/migrations/0001_initial.py
|
18
|
12215
|
from south.db import db
from django.db import models
from apps.reader.models import *
class Migration:
def forwards(self, orm):
# Adding model 'UserSubscription'
db.create_table('reader_usersubscription', (
('id', orm['reader.UserSubscription:id']),
('user', orm['reader.UserSubscription:user']),
('feed', orm['reader.UserSubscription:feed']),
('last_read_date', orm['reader.UserSubscription:last_read_date']),
('mark_read_date', orm['reader.UserSubscription:mark_read_date']),
('unread_count_neutral', orm['reader.UserSubscription:unread_count_neutral']),
('unread_count_positive', orm['reader.UserSubscription:unread_count_positive']),
('unread_count_negative', orm['reader.UserSubscription:unread_count_negative']),
('unread_count_updated', orm['reader.UserSubscription:unread_count_updated']),
('needs_unread_recalc', orm['reader.UserSubscription:needs_unread_recalc']),
))
db.send_create_signal('reader', ['UserSubscription'])
# Adding model 'UserSubscriptionFolders'
db.create_table('reader_usersubscriptionfolders', (
('id', orm['reader.UserSubscriptionFolders:id']),
('user', orm['reader.UserSubscriptionFolders:user']),
('folders', orm['reader.UserSubscriptionFolders:folders']),
))
db.send_create_signal('reader', ['UserSubscriptionFolders'])
# Adding model 'UserStory'
db.create_table('reader_userstory', (
('id', orm['reader.UserStory:id']),
('user', orm['reader.UserStory:user']),
('feed', orm['reader.UserStory:feed']),
('story', orm['reader.UserStory:story']),
('read_date', orm['reader.UserStory:read_date']),
('opinion', orm['reader.UserStory:opinion']),
))
db.send_create_signal('reader', ['UserStory'])
# Creating unique_together for [user, feed] on UserSubscription.
db.create_unique('reader_usersubscription', ['user_id', 'feed_id'])
# Creating unique_together for [user, feed, story] on UserStory.
db.create_unique('reader_userstory', ['user_id', 'feed_id', 'story_id'])
def backwards(self, orm):
# Deleting unique_together for [user, feed, story] on UserStory.
db.delete_unique('reader_userstory', ['user_id', 'feed_id', 'story_id'])
# Deleting unique_together for [user, feed] on UserSubscription.
db.delete_unique('reader_usersubscription', ['user_id', 'feed_id'])
# Deleting model 'UserSubscription'
db.delete_table('reader_usersubscription')
# Deleting model 'UserSubscriptionFolders'
db.delete_table('reader_usersubscriptionfolders')
# Deleting model 'UserStory'
db.delete_table('reader_userstory')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reader.userstory': {
'Meta': {'unique_together': "(('user', 'feed', 'story'),)"},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opinion': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'read_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Story']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'reader.usersubscription': {
'Meta': {'unique_together': "(('user', 'feed'),)"},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_read_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 5, 28, 16, 53, 30, 352483)'}),
'mark_read_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 5, 28, 16, 53, 30, 352527)'}),
'needs_unread_recalc': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'unread_count_negative': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'unread_count_neutral': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'unread_count_positive': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'unread_count_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2000, 1, 1, 0, 0)'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'reader.usersubscriptionfolders': {
'folders': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'rss_feeds.feed': {
'Meta': {'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': '0', 'auto_now': 'True', 'blank': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '15'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'page_data': ('StoryField', [], {'null': 'True', 'blank': 'True'}),
'stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'rss_feeds.story': {
'Meta': {'db_table': "'stories'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'story_author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.StoryAuthor']"}),
'story_content': ('StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_content_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'story_date': ('django.db.models.fields.DateTimeField', [], {}),
'story_feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stories'", 'to': "orm['rss_feeds.Feed']"}),
'story_guid': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_guid_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'story_original_content': ('StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_past_trim_date': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'story_permalink': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_tags': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['rss_feeds.Tag']"})
},
'rss_feeds.storyauthor': {
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.tag': {
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['reader']
|
mit
|
ArcherSys/ArcherSys
|
Scripts/pildriver.py
|
1
|
15521
|
#!c:\xampp\htdocs\scripts\python.exe
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <esr@thyrsus.com>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver(object):
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack = [item] + self.stack
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
top = self.stack[0]
self.stack = self.stack[1:]
return top
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.stack = [dup] + self.stack
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1> [<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower> <image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
filter = eval("ImageFilter." + self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(filter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset> <image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
|
mit
|
peiyuwang/pants
|
src/python/pants/backend/docgen/tasks/generate_pants_reference.py
|
16
|
4195
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.base.mustache import MustacheRenderer
from pants.goal.goal import Goal
from pants.help.build_dictionary_info_extracter import BuildDictionaryInfoExtracter
from pants.help.help_info_extracter import HelpInfoExtracter
from pants.help.scope_info_iterator import ScopeInfoIterator
from pants.option.arg_splitter import GLOBAL_SCOPE
from pants.option.scope import ScopeInfo
from pants.task.task import Task
from pants.util.dirutil import safe_open
class GeneratePantsReference(Task):
"""Generate Pants reference documentation.
Specifically, generates two files: a build dictionary detailing all the directive that
can appear in BUILD files, and a reference listing all available goals and options.
"""
@classmethod
def register_options(cls, register):
register('--pants-reference-template', default='reference/pants_reference.html',
help='The template for the Pants reference document. Defaults to generating '
'a standalone reference page.')
register('--build-dictionary-template', default='reference/build_dictionary.html',
help='The template for the build dictionary document. Defaults to generating '
'a standalone build dictionary page.')
def __init__(self, *args, **kwargs):
super(GeneratePantsReference, self).__init__(*args, **kwargs)
self._outdir = os.path.join(self.get_options().pants_distdir, 'reference')
def execute(self):
self._gen_reference()
self._gen_build_dictionary()
def _gen_reference(self):
def get_scope_data(scope):
ret = []
for si in ScopeInfoIterator(self.context.options.known_scope_to_info).iterate([scope]):
help_info = HelpInfoExtracter(si.scope).get_option_scope_help_info_from_parser(
self.context.options.get_parser(si.scope))
ret.append({
# We don't use _asdict(), because then .description wouldn't be available.
'scope_info': si,
# We do use _asdict() here, so our mustache library can do property expansion.
'help_info': help_info._asdict(),
})
return ret
all_global_data = get_scope_data(GLOBAL_SCOPE)
global_scope_data = all_global_data[0:1]
global_subsystem_data = all_global_data[1:]
goal_scopes = sorted([si.scope for si in self.context.options.known_scope_to_info.values()
if si.scope and '.' not in si.scope and si.category != ScopeInfo.SUBSYSTEM])
# TODO: Make goals Optionable and get their description via their ScopeInfo?
goal_data = []
for scope in goal_scopes:
goal_data.append({
'goal': scope,
'goal_description': Goal.by_name(scope).description,
'task_data': get_scope_data(scope)[1:]
})
self._do_render(self.get_options().pants_reference_template, {
'global_scope_data': global_scope_data,
'global_subsystem_data': global_subsystem_data,
'goal_data': goal_data
})
def _gen_build_dictionary(self):
buildfile_aliases = self.context.build_file_parser.registered_aliases()
extracter = BuildDictionaryInfoExtracter(buildfile_aliases)
target_type_infos = extracter.get_target_type_info()
other_infos = sorted(extracter.get_object_info() + extracter.get_object_factory_info())
self._do_render(self.get_options().build_dictionary_template, {
'target_types': {
'infos': target_type_infos
},
'other_symbols': {
'infos': other_infos
}
})
def _do_render(self, filename, args):
package_name, _, _ = __name__.rpartition('.')
renderer = MustacheRenderer(package_name=package_name)
output_path = os.path.join(self._outdir, os.path.basename(filename))
self.context.log.info('Generating {}'.format(output_path))
html = renderer.render_name(filename, args)
with safe_open(output_path, 'w') as outfile:
outfile.write(html.encode('utf8'))
|
apache-2.0
|
botswana-harvard/bais-subject
|
bais_subject/models/attitudes_towards_people.py
|
1
|
6192
|
from django.db import models
from edc_base.model_fields import OtherCharField
from edc_base.model_mixins import BaseUuidModel
from ..choices import (YES_NO, TESTING_REASONS, TB_NONDISCLOSURE,
HIV_TEST_RESULT, ARV_USAGE, ARV_TREATMENT_SOURCE,
REASONS_ARV_NOT_TAKEN, TB_REACTION)
class AttitudesTowardsPeople(BaseUuidModel):
meal_sharing = models.CharField(
verbose_name='Would you ever share a meal (from the same plate)'
' with a person you knew or suspected had HIV AND AIDS?',
max_length=35,
choices=YES_NO,
)
aids_household_care = models.CharField(
verbose_name='If a member of your family became sick with HIV AND AIDS,'
' would you be willing to care for him or her in your household?',
max_length=35,
choices=YES_NO,
)
tb_household_care = models.CharField(
verbose_name='If a member of your family became sick with TB,'
' would you be willing to care for him or her in your household?',
max_length=35,
choices=YES_NO,
)
tb_household_empathy = models.CharField(
verbose_name='If a member of your family got diagnosed with TB,'
' would you be willing to care for him or her in your household?',
max_length=35,
choices=YES_NO,
)
aids_housekeeper = models.CharField(
verbose_name='If your housekeeper, nanny or anybody looking'
' after your child has HIV but is not sick, '
'would you allow him/her to continue'
' working/assisting with babysitting in your house? ',
max_length=35,
choices=YES_NO,
)
aids_teacher = models.CharField(
verbose_name='If a teacher has HIV but is not sick,'
' should s/he be allowed to continue teaching in school?',
max_length=35,
choices=YES_NO,
)
aids_shopkeeper = models.CharField(
verbose_name='If you knew that a shopkeeper or food seller had'
' HIV or AIDS, would you buy vegetables from them?',
max_length=35,
choices=YES_NO,
)
aids_family_member = models.CharField(
verbose_name='If a member of your family got infected'
'with HIV, would you want it to remain a secret?',
max_length=35,
choices=YES_NO,
help_text="",
)
aids_children = models.CharField(
verbose_name='Do you think that children living with HIV '
'should attend school with children who are HIV negative?',
max_length=35,
choices=YES_NO,
)
aids_room_sharing = models.CharField(
verbose_name='Would you share a room '
'with a person you knew has been diagnosed with TB?',
max_length=35,
choices=YES_NO,
)
aids_hiv_testing = models.CharField(
verbose_name='Have you ever been tested for HIV?',
max_length=35,
choices=YES_NO,
)
aids_hiv_times_tested = models.CharField(
verbose_name='In the past 12 months how many times'
' have you been tested for HIV and received your results?',
max_length=35,
choices=YES_NO,
)
aids_hiv_test_partner = models.CharField(
verbose_name='Did you test together with your partner?',
max_length=250,
choices=YES_NO,
)
aids_hiv_test_reason = models.CharField(
verbose_name='What was the main reason for testing?',
max_length=35,
choices=TESTING_REASONS,
)
aids_hiv_test_reason_other = OtherCharField(
verbose_name='Specify Other',
max_length=35,
null=True,
blank=True,
)
aids_hiv_not_tested = models.CharField(
verbose_name='Why haven’t you tested?',
max_length=35,
choices=TESTING_REASONS,
)
aids_hiv_not_tested_other = OtherCharField(
verbose_name='Other, Specify',
max_length=35,
null=True,
blank=True,
)
aids_hiv_test_result = models.CharField(
verbose_name='What was the result of your last HIV test? ',
max_length=35,
choices=HIV_TEST_RESULT,
)
aids_hiv_test_result_disclosure = models.CharField(
verbose_name='Did you tell anyone the result of your the test? ',
max_length=35,
choices=YES_NO,
)
current_arv_therapy = models.CharField(
verbose_name='Are you currently taking ARVs?',
max_length=35,
choices=ARV_USAGE,
)
current_arv_supplier = models.CharField(
verbose_name='Where are you taking your ARVs?',
max_length=35,
choices=ARV_TREATMENT_SOURCE,
)
current_arv_supplier_other = OtherCharField(
verbose_name='Other Specify',
max_length=35,
null=True,
blank=True,
)
not_on_arv_therapy = models.CharField(
verbose_name='Why aren\'t you taking your ARVs?',
max_length=35,
choices=REASONS_ARV_NOT_TAKEN,
)
not_on_arv_therapy_other = OtherCharField(
verbose_name='Other Specify',
max_length=35,
blank=True,
null=True
)
tb_reaction = models.CharField(
verbose_name='What would be your reaction'
' if you found out you had TB ?',
max_length=35,
choices=TB_REACTION,
)
tb_reaction_other = OtherCharField(
verbose_name='Other Specify',
max_length=35,
null=True,
blank=True,
)
tb_diagnosis = models.CharField(
verbose_name='If you were diagnosed with Tb,would you tell anyone?',
max_length=35,
choices=YES_NO,
)
tb_diagnosis_disclosure = models.CharField(
verbose_name='If yes, whom would you tell?',
max_length=35,
choices=YES_NO,
)
tb_diagnosis_no_disclosure = models.CharField(
verbose_name='If No,why not',
max_length=35,
choices=TB_NONDISCLOSURE,
)
tb_diagnosis_no_disclosure_other = OtherCharField(
verbose_name='Other, Specify',
max_length=35,
blank=True,
null=True
)
class Meta(BaseUuidModel.Meta):
app_label = 'bais_subject'
|
gpl-3.0
|
nzavagli/UnrealPy
|
UnrealPyEmbed/Source/Python/Lib/python27/distutils/command/build_py.py
|
74
|
16338
|
"""distutils.command.build_py
Implements the Distutils 'build_py' command."""
__revision__ = "$Id$"
import os
import sys
from glob import glob
from distutils.core import Command
from distutils.errors import DistutilsOptionError, DistutilsFileError
from distutils.util import convert_path
from distutils import log
class build_py(Command):
description = "\"build\" pure Python modules (copy to build directory)"
user_options = [
('build-lib=', 'd', "directory to \"build\" (copy) to"),
('compile', 'c', "compile .py to .pyc"),
('no-compile', None, "don't compile .py files [default]"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
]
boolean_options = ['compile', 'force']
negative_opt = {'no-compile' : 'compile'}
def initialize_options(self):
self.build_lib = None
self.py_modules = None
self.package = None
self.package_data = None
self.package_dir = None
self.compile = 0
self.optimize = 0
self.force = None
def finalize_options(self):
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('force', 'force'))
# Get the distribution options that are aliases for build_py
# options -- list of packages and list of modules.
self.packages = self.distribution.packages
self.py_modules = self.distribution.py_modules
self.package_data = self.distribution.package_data
self.package_dir = {}
if self.distribution.package_dir:
for name, path in self.distribution.package_dir.items():
self.package_dir[name] = convert_path(path)
self.data_files = self.get_data_files()
# Ick, copied straight from install_lib.py (fancy_getopt needs a
# type system! Hell, *everything* needs a type system!!!)
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
assert 0 <= self.optimize <= 2
except (ValueError, AssertionError):
raise DistutilsOptionError("optimize must be 0, 1, or 2")
def run(self):
# XXX copy_file by default preserves atime and mtime. IMHO this is
# the right thing to do, but perhaps it should be an option -- in
# particular, a site administrator might want installed files to
# reflect the time of installation rather than the last
# modification time before the installed release.
# XXX copy_file by default preserves mode, which appears to be the
# wrong thing to do: if a file is read-only in the working
# directory, we want it to be installed read/write so that the next
# installation of the same module distribution can overwrite it
# without problems. (This might be a Unix-specific issue.) Thus
# we turn off 'preserve_mode' when copying to the build directory,
# since the build directory is supposed to be exactly what the
# installation will look like (ie. we preserve mode when
# installing).
# Two options control which modules will be installed: 'packages'
# and 'py_modules'. The former lets us work with whole packages, not
# specifying individual modules at all; the latter is for
# specifying modules one-at-a-time.
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.byte_compile(self.get_outputs(include_bytecode=0))
def get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
data = []
if not self.packages:
return data
for package in self.packages:
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = 0
if src_dir:
plen = len(src_dir)+1
# Strip directory from globbed filenames
filenames = [
file[plen:] for file in self.find_data_files(package, src_dir)
]
data.append((package, src_dir, build_dir, filenames))
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = (self.package_data.get('', [])
+ self.package_data.get(package, []))
files = []
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
filelist = glob(os.path.join(src_dir, convert_path(pattern)))
# Files that match more than one pattern are only added once
files.extend([fn for fn in filelist if fn not in files
and os.path.isfile(fn)])
return files
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
self.copy_file(os.path.join(src_dir, filename), target,
preserve_mode=False)
def get_package_dir(self, package):
"""Return the directory, relative to the top of the source
distribution, where package 'package' should be found
(at least according to the 'package_dir' option, if any)."""
path = package.split('.')
if not self.package_dir:
if path:
return os.path.join(*path)
else:
return ''
else:
tail = []
while path:
try:
pdir = self.package_dir['.'.join(path)]
except KeyError:
tail.insert(0, path[-1])
del path[-1]
else:
tail.insert(0, pdir)
return os.path.join(*tail)
else:
# Oops, got all the way through 'path' without finding a
# match in package_dir. If package_dir defines a directory
# for the root (nameless) package, then fallback on it;
# otherwise, we might as well have not consulted
# package_dir at all, as we just use the directory implied
# by 'tail' (which should be the same as the original value
# of 'path' at this point).
pdir = self.package_dir.get('')
if pdir is not None:
tail.insert(0, pdir)
if tail:
return os.path.join(*tail)
else:
return ''
def check_package(self, package, package_dir):
# Empty dir name means current directory, which we can probably
# assume exists. Also, os.path.exists and isdir don't know about
# my "empty string means current dir" convention, so we have to
# circumvent them.
if package_dir != "":
if not os.path.exists(package_dir):
raise DistutilsFileError(
"package directory '%s' does not exist" % package_dir)
if not os.path.isdir(package_dir):
raise DistutilsFileError(
"supposed package directory '%s' exists, "
"but is not a directory" % package_dir)
# Require __init__.py for all but the "root package"
if package:
init_py = os.path.join(package_dir, "__init__.py")
if os.path.isfile(init_py):
return init_py
else:
log.warn(("package init file '%s' not found " +
"(or not a regular file)"), init_py)
# Either not in a package at all (__init__.py not expected), or
# __init__.py doesn't exist -- so don't return the filename.
return None
def check_module(self, module, module_file):
if not os.path.isfile(module_file):
log.warn("file %s (for module %s) not found", module_file, module)
return False
else:
return True
def find_package_modules(self, package, package_dir):
self.check_package(package, package_dir)
module_files = glob(os.path.join(package_dir, "*.py"))
modules = []
setup_script = os.path.abspath(self.distribution.script_name)
for f in module_files:
abs_f = os.path.abspath(f)
if abs_f != setup_script:
module = os.path.splitext(os.path.basename(f))[0]
modules.append((package, module, f))
else:
self.debug_print("excluding %s" % setup_script)
return modules
def find_modules(self):
"""Finds individually-specified Python modules, ie. those listed by
module name in 'self.py_modules'. Returns a list of tuples (package,
module_base, filename): 'package' is a tuple of the path through
package-space to the module; 'module_base' is the bare (no
packages, no dots) module name, and 'filename' is the path to the
".py" file (relative to the distribution root) that implements the
module.
"""
# Map package names to tuples of useful info about the package:
# (package_dir, checked)
# package_dir - the directory where we'll find source files for
# this package
# checked - true if we have checked that the package directory
# is valid (exists, contains __init__.py, ... ?)
packages = {}
# List of (package, module, filename) tuples to return
modules = []
# We treat modules-in-packages almost the same as toplevel modules,
# just the "package" for a toplevel is empty (either an empty
# string or empty list, depending on context). Differences:
# - don't check for __init__.py in directory for empty package
for module in self.py_modules:
path = module.split('.')
package = '.'.join(path[0:-1])
module_base = path[-1]
try:
(package_dir, checked) = packages[package]
except KeyError:
package_dir = self.get_package_dir(package)
checked = 0
if not checked:
init_py = self.check_package(package, package_dir)
packages[package] = (package_dir, 1)
if init_py:
modules.append((package, "__init__", init_py))
# XXX perhaps we should also check for just .pyc files
# (so greedy closed-source bastards can distribute Python
# modules too)
module_file = os.path.join(package_dir, module_base + ".py")
if not self.check_module(module, module_file):
continue
modules.append((package, module_base, module_file))
return modules
def find_all_modules(self):
"""Compute the list of all modules that will be built, whether
they are specified one-module-at-a-time ('self.py_modules') or
by whole packages ('self.packages'). Return a list of tuples
(package, module, module_file), just like 'find_modules()' and
'find_package_modules()' do."""
modules = []
if self.py_modules:
modules.extend(self.find_modules())
if self.packages:
for package in self.packages:
package_dir = self.get_package_dir(package)
m = self.find_package_modules(package, package_dir)
modules.extend(m)
return modules
def get_source_files(self):
return [module[-1] for module in self.find_all_modules()]
def get_module_outfile(self, build_dir, package, module):
outfile_path = [build_dir] + list(package) + [module + ".py"]
return os.path.join(*outfile_path)
def get_outputs(self, include_bytecode=1):
modules = self.find_all_modules()
outputs = []
for (package, module, module_file) in modules:
package = package.split('.')
filename = self.get_module_outfile(self.build_lib, package, module)
outputs.append(filename)
if include_bytecode:
if self.compile:
outputs.append(filename + "c")
if self.optimize > 0:
outputs.append(filename + "o")
outputs += [
os.path.join(build_dir, filename)
for package, src_dir, build_dir, filenames in self.data_files
for filename in filenames
]
return outputs
def build_module(self, module, module_file, package):
if isinstance(package, str):
package = package.split('.')
elif not isinstance(package, (list, tuple)):
raise TypeError(
"'package' must be a string (dot-separated), list, or tuple")
# Now put the module source file into the "build" area -- this is
# easy, we just copy it somewhere under self.build_lib (the build
# directory for Python source).
outfile = self.get_module_outfile(self.build_lib, package, module)
dir = os.path.dirname(outfile)
self.mkpath(dir)
return self.copy_file(module_file, outfile, preserve_mode=0)
def build_modules(self):
modules = self.find_modules()
for (package, module, module_file) in modules:
# Now "build" the module -- ie. copy the source file to
# self.build_lib (the build directory for Python source).
# (Actually, it gets copied to the directory for this package
# under self.build_lib.)
self.build_module(module, module_file, package)
def build_packages(self):
for package in self.packages:
# Get list of (package, module, module_file) tuples based on
# scanning the package directory. 'package' is only included
# in the tuple so that 'find_modules()' and
# 'find_package_tuples()' have a consistent interface; it's
# ignored here (apart from a sanity check). Also, 'module' is
# the *unqualified* module name (ie. no dots, no package -- we
# already know its package!), and 'module_file' is the path to
# the .py file, relative to the current directory
# (ie. including 'package_dir').
package_dir = self.get_package_dir(package)
modules = self.find_package_modules(package, package_dir)
# Now loop over the modules we found, "building" each one (just
# copy it to self.build_lib).
for (package_, module, module_file) in modules:
assert package == package_
self.build_module(module, module_file, package)
def byte_compile(self, files):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
prefix = self.build_lib
if prefix[-1] != os.sep:
prefix = prefix + os.sep
# XXX this code is essentially the same as the 'byte_compile()
# method of the "install_lib" command, except for the determination
# of the 'prefix' string. Hmmm.
if self.compile:
byte_compile(files, optimize=0,
force=self.force, prefix=prefix, dry_run=self.dry_run)
if self.optimize > 0:
byte_compile(files, optimize=self.optimize,
force=self.force, prefix=prefix, dry_run=self.dry_run)
|
mit
|
smnitro555/ESWegarden
|
raspibot/lib/python2.7/site-packages/setuptools/monkey.py
|
80
|
5255
|
"""
Monkey patching of distutils.
"""
import sys
import distutils.filelist
import platform
import types
import functools
import inspect
from .py26compat import import_module
import six
import setuptools
__all__ = []
"""
Everything is private. Contact the project team
if you think you need this functionality.
"""
def get_unpatched(item):
lookup = (
get_unpatched_class if isinstance(item, six.class_types) else
get_unpatched_function if isinstance(item, types.FunctionType) else
lambda item: None
)
return lookup(item)
def get_unpatched_class(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
external_bases = (
cls
for cls in inspect.getmro(cls)
if not cls.__module__.startswith('setuptools')
)
base = next(external_bases)
if not base.__module__.startswith('distutils'):
msg = "distutils has already been patched by %r" % cls
raise AssertionError(msg)
return base
def patch_all():
# we can't patch distutils.cmd, alas
distutils.core.Command = setuptools.Command
has_issue_12885 = sys.version_info <= (3, 5, 3)
if has_issue_12885:
# fix findall bug in distutils (http://bugs.python.org/issue12885)
distutils.filelist.findall = setuptools.findall
needs_warehouse = (
sys.version_info < (2, 7, 13)
or
(3, 0) < sys.version_info < (3, 3, 7)
or
(3, 4) < sys.version_info < (3, 4, 6)
or
(3, 5) < sys.version_info <= (3, 5, 3)
)
if needs_warehouse:
warehouse = 'https://upload.pypi.org/legacy/'
distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
_patch_distribution_metadata_write_pkg_file()
_patch_distribution_metadata_write_pkg_info()
# Install Distribution throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = setuptools.dist.Distribution
# Install the patched Extension
distutils.core.Extension = setuptools.extension.Extension
distutils.extension.Extension = setuptools.extension.Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = (
setuptools.extension.Extension
)
patch_for_msvc_specialized_compiler()
def _patch_distribution_metadata_write_pkg_file():
"""Patch write_pkg_file to also write Requires-Python/Requires-External"""
distutils.dist.DistributionMetadata.write_pkg_file = (
setuptools.dist.write_pkg_file
)
def _patch_distribution_metadata_write_pkg_info():
"""
Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local
encoding to save the pkg_info. Monkey-patch its write_pkg_info method to
correct this undesirable behavior.
"""
environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2)
if not environment_local:
return
distutils.dist.DistributionMetadata.write_pkg_info = (
setuptools.dist.write_pkg_info
)
def patch_func(replacement, target_mod, func_name):
"""
Patch func_name in target_mod with replacement
Important - original must be resolved by name to avoid
patching an already patched function.
"""
original = getattr(target_mod, func_name)
# set the 'unpatched' attribute on the replacement to
# point to the original.
vars(replacement).setdefault('unpatched', original)
# replace the function in the original module
setattr(target_mod, func_name, replacement)
def get_unpatched_function(candidate):
return getattr(candidate, 'unpatched')
def patch_for_msvc_specialized_compiler():
"""
Patch functions in distutils to use standalone Microsoft Visual C++
compilers.
"""
# import late to avoid circular imports on Python < 3.5
msvc = import_module('setuptools.msvc')
if platform.system() != 'Windows':
# Compilers only availables on Microsoft Windows
return
def patch_params(mod_name, func_name):
"""
Prepare the parameters for patch_func to patch indicated function.
"""
repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_'
repl_name = repl_prefix + func_name.lstrip('_')
repl = getattr(msvc, repl_name)
mod = import_module(mod_name)
if not hasattr(mod, func_name):
raise ImportError(func_name)
return repl, mod, func_name
# Python 2.7 to 3.4
msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler')
# Python 3.5+
msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
try:
# Patch distutils.msvc9compiler
patch_func(*msvc9('find_vcvarsall'))
patch_func(*msvc9('query_vcvarsall'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler._get_vc_env
patch_func(*msvc14('_get_vc_env'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler.gen_lib_options for Numpy
patch_func(*msvc14('gen_lib_options'))
except ImportError:
pass
|
gpl-3.0
|
Zlash65/erpnext
|
erpnext/patches/v10_0/repost_gle_for_purchase_receipts_with_rejected_items.py
|
11
|
1024
|
# Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
def execute():
for company in frappe.get_all("Company"):
if not erpnext.is_perpetual_inventory_enabled(company.name):
continue
acc_frozen_upto = frappe.db.get_value("Accounts Settings", None, "acc_frozen_upto") or "1900-01-01"
pr_with_rejected_warehouse = frappe.db.sql("""
select pr.name
from `tabPurchase Receipt` pr, `tabPurchase Receipt Item` pr_item
where pr.name = pr_item.parent
and pr.posting_date > %s
and pr.docstatus=1
and pr.company = %s
and pr_item.rejected_qty > 0
""", (acc_frozen_upto, company.name), as_dict=1)
for d in pr_with_rejected_warehouse:
doc = frappe.get_doc("Purchase Receipt", d.name)
doc.docstatus = 2
doc.make_gl_entries_on_cancel(repost_future_gle=False)
# update gl entries for submit state of PR
doc.docstatus = 1
doc.make_gl_entries(repost_future_gle=False)
|
gpl-3.0
|
somcom3x/tw_herc_kernel
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
12980
|
5411
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
gpl-2.0
|
msrb/freeipa
|
ipatests/test_xmlrpc/test_permission_plugin.py
|
2
|
152297
|
# Authors:
# Rob Crittenden <rcritten@redhat.com>
# Pavel Zuna <pzuna@redhat.com>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test the `ipalib/plugins/permission.py` module.
"""
from __future__ import print_function
import os
import nose
from ipalib import api, errors
from ipatests.test_xmlrpc import objectclasses
from ipatests.test_xmlrpc.xmlrpc_test import Declarative
from ipapython.dn import DN
import inspect
try:
from ipaserver.plugins.ldap2 import ldap2
except ImportError:
have_ldap2 = False
else:
have_ldap2 = True
permission1 = u'testperm'
permission1_dn = DN(('cn',permission1),
api.env.container_permission,api.env.basedn)
permission1_renamed = u'testperm1_rn'
permission1_renamed_dn = DN(('cn',permission1_renamed),
api.env.container_permission,api.env.basedn)
permission1_renamed_ucase = u'Testperm_RN'
permission1_renamed_ucase_dn = DN(('cn',permission1_renamed_ucase),
api.env.container_permission,api.env.basedn)
permission2 = u'testperm2'
permission2_dn = DN(('cn',permission2),
api.env.container_permission,api.env.basedn)
permission3 = u'testperm3'
permission3_dn = DN(('cn',permission3),
api.env.container_permission,api.env.basedn)
permission3_attributelevelrights = {
'member': u'rscwo',
'seealso': u'rscwo',
'ipapermissiontype': u'rscwo',
'cn': u'rscwo',
'businesscategory': u'rscwo',
'objectclass': u'rscwo',
'memberof': u'rscwo',
'aci': u'rscwo',
'ipapermlocation': u'rscwo',
'o': u'rscwo',
'ipapermincludedattr': u'rscwo',
'ipapermdefaultattr': u'rscwo',
'ipapermexcludedattr': u'rscwo',
'owner': u'rscwo',
'ou': u'rscwo',
'ipapermright': u'rscwo',
'nsaccountlock': u'rscwo',
'description': u'rscwo',
'ipapermtargetfilter': u'rscwo',
'ipapermtargetto': u'rscwo',
'ipapermtargetfrom': u'rscwo',
'ipapermbindruletype': u'rscwo',
'ipapermlocation': u'rscwo',
'ipapermtarget': u'rscwo',
'type': u'rscwo',
'targetgroup': u'rscwo',
'attrs': u'rscwo',
}
privilege1 = u'testpriv1'
privilege1_dn = DN(('cn',privilege1),
api.env.container_privilege,api.env.basedn)
invalid_permission1 = u'bad;perm'
users_dn = DN(api.env.container_user, api.env.basedn)
groups_dn = DN(api.env.container_group, api.env.basedn)
etc_dn = DN('cn=etc', api.env.basedn)
nonexistent_dn = DN('cn=does not exist', api.env.basedn)
admin_dn = DN('uid=admin', users_dn)
group_filter = u'(|(objectclass=ipausergroup)(objectclass=posixgroup))'
def verify_permission_aci(name, dn, acistring):
"""Return test dict that verifies the ACI at the given location"""
return dict(
desc="Verify ACI of %s #(%s)" % (name, lineinfo(2)),
command=('aci_show', [name], dict(
aciprefix=u'permission', location=dn, raw=True)),
expected=dict(
result=dict(aci=acistring),
summary=None,
value=name,
),
)
def verify_permission_aci_missing(name, dn):
"""Return test dict that checks the ACI at the given location is missing"""
return dict(
desc="Verify ACI of %s is missing #(%s)" % (name, lineinfo(2)),
command=('aci_show', [name], dict(
aciprefix=u'permission', location=dn, raw=True)),
expected=errors.NotFound(
reason='ACI with name "%s" not found' % name),
)
def lineinfo(level):
"""Return "filename:lineno" for `level`-th caller"""
# Declarative tests hide tracebacks.
# Including this info in the test name makes it possible
# to locate failing tests.
frame = inspect.currentframe()
for i in range(level):
frame = frame.f_back
lineno = frame.f_lineno
filename = os.path.basename(frame.f_code.co_filename)
return '%s:%s' % (filename, lineno)
class test_permission_negative(Declarative):
"""Make sure invalid operations fail"""
cleanup_commands = [
('permission_del', [permission1], {'force': True}),
]
tests = [
dict(
desc='Try to retrieve non-existent %r' % permission1,
command=('permission_show', [permission1], {}),
expected=errors.NotFound(
reason=u'%s: permission not found' % permission1),
),
dict(
desc='Try to update non-existent %r' % permission1,
command=('permission_mod', [permission1], dict(ipapermright=u'all')),
expected=errors.NotFound(
reason=u'%s: permission not found' % permission1),
),
dict(
desc='Try to delete non-existent %r' % permission1,
command=('permission_del', [permission1], {}),
expected=errors.NotFound(
reason=u'%s: permission not found' % permission1),
),
dict(
desc='Search for non-existent %r' % permission1,
command=('permission_find', [permission1], {}),
expected=dict(
count=0,
truncated=False,
summary=u'0 permissions matched',
result=[],
),
),
dict(
desc='Try creating %r with no ipapermright' % permission1,
command=(
'permission_add', [permission1], dict(
type=u'user',
attrs=[u'sn'],
)
),
expected=errors.RequirementError(name='ipapermright'),
),
dict(
desc='Try creating %r with no target option' % permission1,
command=(
'permission_add', [permission1], dict(
ipapermright=u'write',
)
),
expected=errors.ValidationError(
name='target',
error='there must be at least one target entry specifier '
'(e.g. target, targetfilter, attrs)'),
),
verify_permission_aci_missing(permission1, api.env.basedn),
dict(
desc='Try to create invalid %r' % invalid_permission1,
command=('permission_add', [invalid_permission1], dict(
type=u'user',
ipapermright=u'write',
)),
expected=errors.ValidationError(name='name',
error='May only contain letters, numbers, -, _, ., and space'),
),
verify_permission_aci_missing(permission1, users_dn),
dict(
desc='Try creating %r with bad attribute name' % permission1,
command=(
'permission_add', [permission1], dict(
type=u'user',
ipapermright=u'write',
attrs=u'bogusattr',
)
),
expected=errors.InvalidSyntax(
attr=r'targetattr "bogusattr" does not exist in schema. '
r'Please add attributeTypes "bogusattr" to '
r'schema if necessary. '
r'ACL Syntax Error(-5):'
r'(targetattr = \22bogusattr\22)'
r'(targetfilter = \22(objectclass=posixaccount)\22)'
r'(version 3.0;acl \22permission:%(name)s\22;'
r'allow (write) groupdn = \22ldap:///%(dn)s\22;)' % dict(
name=permission1,
dn=permission1_dn),
),
),
verify_permission_aci_missing(permission1, users_dn),
dict(
desc='Try to create permission with : in the name',
command=('permission_add', ['bad:' + permission1], dict(
type=u'user',
ipapermright=u'write',
)),
expected=errors.ValidationError(name='name',
error='May only contain letters, numbers, -, _, ., and space'),
),
verify_permission_aci_missing(permission1, users_dn),
dict(
desc='Try to create permission with full and extra target filter',
command=('permission_add', [permission1], dict(
type=u'user',
ipapermright=u'write',
ipapermtargetfilter=u'(cn=*)',
extratargetfilter=u'(sn=*)',
)),
expected=errors.ValidationError(name='ipapermtargetfilter',
error='cannot specify full target filter and extra target '
'filter simultaneously'),
),
verify_permission_aci_missing(permission1, users_dn),
dict(
desc='Create %r so we can try breaking it' % permission1,
command=(
'permission_add', [permission1], dict(
type=u'user',
ipapermright=[u'write'],
attrs=[u'sn'],
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
dict(
desc='Try remove ipapermright from %r' % permission1,
command=(
'permission_mod', [permission1], dict(
ipapermright=None,
)
),
expected=errors.RequirementError(name='ipapermright'),
),
dict(
desc='Try to remove type from %r' % permission1,
command=(
'permission_mod', [permission1], dict(
attrs=None,
type=None,
)
),
expected=errors.ValidationError(
name='target',
error='there must be at least one target entry specifier '
'(e.g. target, targetfilter, attrs)'),
),
dict(
desc='Try to "remove" empty memberof from %r' % permission1,
command=(
'permission_mod', [permission1], dict(
memberof=None,
)
),
expected=errors.EmptyModlist(),
),
dict(
desc='Try to remove targetfilter and memberof from %r' % permission1,
command=(
'permission_mod', [permission1], dict(
attrs=None,
ipapermtargetfilter=None,
)
),
expected=errors.ValidationError(
name='target',
error='there must be at least one target entry specifier '
'(e.g. target, targetfilter, attrs)'),
),
dict(
desc='Try to rename %r to invalid %r' % (
permission1, invalid_permission1),
command=('permission_mod', [permission1], dict(
rename=invalid_permission1,
)),
expected=errors.ValidationError(name='rename',
error='May only contain letters, numbers, -, _, ., and space'),
),
dict(
desc='Try setting ipapermexcludedattr on %r' % permission1,
command=(
'permission_mod', [permission1], dict(
ipapermexcludedattr=[u'cn'],
)
),
expected=errors.ValidationError(
name='ipapermexcludedattr',
error='only available on managed permissions'),
),
dict(
desc='Try to setting both full and extra target filter on %s' % permission1,
command=('permission_mod', [permission1], dict(
ipapermtargetfilter=u'(cn=*)',
extratargetfilter=u'(sn=*)',
)),
expected=errors.ValidationError(name='ipapermtargetfilter',
error='cannot specify full target filter and extra target '
'filter simultaneously'),
),
]
class test_permission(Declarative):
"""Misc. tests for the permission plugin"""
cleanup_commands = [
('permission_del', [permission1], {'force': True}),
('permission_del', [permission2], {'force': True}),
('permission_del', [permission3], {'force': True}),
('permission_del', [permission1_renamed], {'force': True}),
('permission_del', [permission1_renamed_ucase], {'force': True}),
('privilege_del', [privilege1], {}),
]
tests = [
dict(
desc='Create %r' % permission1,
command=(
'permission_add', [permission1], dict(
type=u'user',
ipapermright=[u'write'],
attrs=[u'sn'],
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Try to create duplicate %r' % permission1,
command=(
'permission_add', [permission1], dict(
type=u'user',
ipapermright=[u'write'],
attrs=[u'sn'],
),
),
expected=errors.DuplicateEntry(
message='permission with name "%s" already exists' % permission1),
),
dict(
desc='Create %r' % privilege1,
command=('privilege_add', [privilege1],
dict(description=u'privilege desc. 1')
),
expected=dict(
value=privilege1,
summary=u'Added privilege "%s"' % privilege1,
result=dict(
dn=privilege1_dn,
cn=[privilege1],
description=[u'privilege desc. 1'],
objectclass=objectclasses.privilege,
),
),
),
dict(
desc='Add permission %r to privilege %r' % (permission1, privilege1),
command=('privilege_add_permission', [privilege1],
dict(permission=permission1)
),
expected=dict(
completed=1,
failed=dict(
member=dict(
permission=[],
),
),
result={
'dn': privilege1_dn,
'cn': [privilege1],
'description': [u'privilege desc. 1'],
'memberof_permission': [permission1],
'objectclass': objectclasses.privilege,
}
),
),
dict(
desc='Retrieve %r' % permission1,
command=('permission_show', [permission1], {}),
expected=dict(
value=permission1,
summary=None,
result={
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': [u'user'],
'ipapermright': [u'write'],
'attrs': [u'sn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
},
),
),
dict(
desc='Retrieve %r with --raw' % permission1,
command=('permission_show', [permission1], {'raw': True}),
expected=dict(
value=permission1,
summary=None,
result={
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member': [privilege1_dn],
'ipapermincludedattr': [u'sn'],
'ipapermbindruletype': [u'permission'],
'ipapermright': [u'write'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
'ipapermtargetfilter': [u'(objectclass=posixaccount)'],
'aci': ['(targetattr = "sn")'
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%(name)s";'
'allow (write) groupdn = "ldap:///%(pdn)s";)' %
{'tdn': DN(('uid', '*'), users_dn),
'name': permission1,
'pdn': permission1_dn}],
},
),
),
dict(
desc='Search for %r' % permission1,
command=('permission_find', [permission1], {}),
expected=dict(
count=1,
truncated=False,
summary=u'1 permission matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': [u'user'],
'ipapermright': [u'write'],
'attrs': [u'sn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
},
],
),
),
dict(
desc='Search for %r using --name' % permission1,
command=('permission_find', [], {'cn': permission1}),
expected=dict(
count=1,
truncated=False,
summary=u'1 permission matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': [u'user'],
'ipapermright': [u'write'],
'attrs': [u'sn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
},
],
),
),
dict(
desc='Search for non-existent permission using --name',
command=('permission_find', [], {'cn': u'notfound'}),
expected=dict(
count=0,
truncated=False,
summary=u'0 permissions matched',
result=[],
),
),
dict(
desc='Search for %r' % privilege1,
command=('permission_find', [privilege1], {}),
expected=dict(
count=1,
truncated=False,
summary=u'1 permission matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': [u'user'],
'ipapermright': [u'write'],
'attrs': [u'sn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
},
],
),
),
dict(
desc='Search for %r with --raw' % permission1,
command=('permission_find', [permission1], {'raw': True}),
expected=dict(
count=1,
truncated=False,
summary=u'1 permission matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member': [privilege1_dn],
'ipapermincludedattr': [u'sn'],
'ipapermbindruletype': [u'permission'],
'ipapermright': [u'write'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
'ipapermtargetfilter': [u'(objectclass=posixaccount)'],
'aci': ['(targetattr = "sn")'
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%(name)s";'
'allow (write) groupdn = "ldap:///%(pdn)s";)' %
{'tdn': DN(('uid', '*'), users_dn),
'name': permission1,
'pdn': permission1_dn}],
},
],
),
),
dict(
desc='Create %r' % permission2,
command=(
'permission_add', [permission2], dict(
type=u'user',
ipapermright=u'write',
setattr=u'owner=cn=test',
addattr=u'owner=cn=test2',
attrs=[u'cn'],
)
),
expected=dict(
value=permission2,
summary=u'Added permission "%s"' % permission2,
result=dict(
dn=permission2_dn,
cn=[permission2],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
owner=[u'cn=test', u'cn=test2'],
attrs=[u'cn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission2, users_dn,
'(targetattr = "cn")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission2 +
'allow (write) groupdn = "ldap:///%s";)' % permission2_dn,
),
dict(
desc='Search for %r' % permission1,
command=('permission_find', [permission1], {}),
expected=dict(
count=2,
truncated=False,
summary=u'2 permissions matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': [u'user'],
'ipapermright': [u'write'],
'attrs': [u'sn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
},
{
'dn': permission2_dn,
'cn': [permission2],
'objectclass': objectclasses.permission,
'type': [u'user'],
'ipapermright': [u'write'],
'attrs': [u'cn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
},
],
),
),
dict(
desc='Search for %r with --pkey-only' % permission1,
command=('permission_find', [permission1], {'pkey_only' : True}),
expected=dict(
count=2,
truncated=False,
summary=u'2 permissions matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
},
{
'dn': permission2_dn,
'cn': [permission2],
},
],
),
),
dict(
desc='Search by ACI attribute with --pkey-only',
command=('permission_find', [], {'pkey_only': True,
'attrs': [u'krbminpwdlife']}),
expected=dict(
count=2,
truncated=False,
summary=u'2 permissions matched',
result=[
{
'dn': DN(('cn', 'System: Modify Group Password Policy'),
api.env.container_permission, api.env.basedn),
'cn': [u'System: Modify Group Password Policy'],
},
{
'dn': DN(('cn', 'System: Read Group Password Policy'),
api.env.container_permission, api.env.basedn),
'cn': [u'System: Read Group Password Policy'],
},
],
),
),
dict(
desc='Search for %r' % privilege1,
command=('privilege_find', [privilege1], {}),
expected=dict(
count=1,
truncated=False,
summary=u'1 privilege matched',
result=[
{
'dn': privilege1_dn,
'cn': [privilege1],
'description': [u'privilege desc. 1'],
'memberof_permission': [permission1],
},
],
),
),
dict(
desc='Search for %r with a limit of 1 (truncated)' % permission1,
command=('permission_find', [permission1], dict(sizelimit=1)),
expected=dict(
count=1,
truncated=True,
summary=u'1 permission matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': [u'user'],
'ipapermright': [u'write'],
'attrs': [u'sn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
},
],
),
),
dict(
desc='Search for %r with a limit of 2' % permission1,
command=('permission_find', [permission1], dict(sizelimit=2)),
expected=dict(
count=2,
truncated=False,
summary=u'2 permissions matched',
result=[
{
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'type': [u'user'],
'ipapermright': [u'write'],
'attrs': [u'sn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
'member_privilege': [privilege1],
},
{
'dn': permission2_dn,
'cn': [permission2],
'objectclass': objectclasses.permission,
'type': [u'user'],
'ipapermright': [u'write'],
'attrs': [u'cn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
},
],
),
),
# This tests setting truncated to True in the post_callback of
# permission_find(). The return order in LDAP is not guaranteed
# so do not check the actual entry.
dict(
desc='Search for permissions by attr with a limit of 1 (truncated)',
command=('permission_find', [u'Modify'],
dict(attrs=u'ipaenabledflag', sizelimit=1)),
expected=dict(
count=1,
truncated=True,
summary=u'1 permission matched',
result=[lambda res:
DN(res['dn']).endswith(DN(api.env.container_permission,
api.env.basedn)) and
'ipapermission' in res['objectclass']],
),
),
dict(
desc='Update %r' % permission1,
command=(
'permission_mod', [permission1], dict(
ipapermright=u'read',
memberof=u'ipausers',
setattr=u'owner=cn=other-test',
addattr=u'owner=cn=other-test2',
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
member_privilege=[privilege1],
type=[u'user'],
ipapermright=[u'read'],
memberof=[u'ipausers'],
owner=[u'cn=other-test', u'cn=other-test2'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(&' +
'(memberOf=%s)' % DN('cn=ipausers', groups_dn) +
'(objectclass=posixaccount))")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (read) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Retrieve %r to verify update' % permission1,
command=('permission_show', [permission1], {}),
expected=dict(
value=permission1,
summary=None,
result={
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': [u'user'],
'ipapermright': [u'read'],
'memberof': [u'ipausers'],
'attrs': [u'sn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
},
),
),
dict(
desc='Try to rename %r to existing permission %r' % (permission1,
permission2),
command=(
'permission_mod', [permission1], dict(rename=permission2,
ipapermright=u'all',)
),
expected=errors.DuplicateEntry(),
),
dict(
desc='Try to rename %r to empty name' % (permission1),
command=(
'permission_mod', [permission1], dict(rename=u'',
ipapermright=u'all',)
),
expected=errors.ValidationError(name='rename',
error=u'New name can not be empty'),
),
dict(
desc='Check integrity of original permission %r' % permission1,
command=('permission_show', [permission1], {}),
expected=dict(
value=permission1,
summary=None,
result={
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': [u'user'],
'ipapermright': [u'read'],
'memberof': [u'ipausers'],
'attrs': [u'sn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
},
),
),
dict(
desc='Rename %r to permission %r' % (permission1,
permission1_renamed),
command=(
'permission_mod', [permission1], dict(rename=permission1_renamed,
ipapermright= u'all',)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result={
'dn': permission1_renamed_dn,
'cn': [permission1_renamed],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': [u'user'],
'ipapermright': [u'all'],
'memberof': [u'ipausers'],
'attrs': [u'sn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
},
),
),
verify_permission_aci_missing(permission1, users_dn),
verify_permission_aci(
permission1_renamed, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(&' +
'(memberOf=%s)' % DN('cn=ipausers', groups_dn) +
'(objectclass=posixaccount))")' +
'(version 3.0;acl "permission:%s";' % permission1_renamed +
'allow (all) groupdn = "ldap:///%s";)' % permission1_renamed_dn,
),
dict(
desc='Rename %r to permission %r' % (permission1_renamed,
permission1_renamed_ucase),
command=(
'permission_mod', [permission1_renamed], dict(rename=permission1_renamed_ucase,
ipapermright= u'write',)
),
expected=dict(
value=permission1_renamed,
summary=u'Modified permission "%s"' % permission1_renamed,
result={
'dn': permission1_renamed_ucase_dn,
'cn': [permission1_renamed_ucase],
'objectclass': objectclasses.permission,
'member_privilege': [privilege1],
'type': [u'user'],
'ipapermright': [u'write'],
'memberof': [u'ipausers'],
'attrs': [u'sn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
},
),
),
verify_permission_aci_missing(permission1_renamed, users_dn),
verify_permission_aci(
permission1_renamed_ucase, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(&' +
'(memberOf=%s)' % DN('cn=ipausers', groups_dn) +
'(objectclass=posixaccount))")' +
'(version 3.0;acl "permission:%s";' % permission1_renamed_ucase +
'allow (write) groupdn = "ldap:///%s";)' %
permission1_renamed_ucase_dn,
),
dict(
desc='Change %r to a subtree type' % permission1_renamed_ucase,
command=(
'permission_mod', [permission1_renamed_ucase],
dict(ipapermlocation=users_dn, type=None)
),
expected=dict(
value=permission1_renamed_ucase,
summary=u'Modified permission "%s"' % permission1_renamed_ucase,
result=dict(
dn=permission1_renamed_ucase_dn,
cn=[permission1_renamed_ucase],
objectclass=objectclasses.permission,
member_privilege=[privilege1],
ipapermlocation=[users_dn],
ipapermright=[u'write'],
memberof=[u'ipausers'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
),
),
),
verify_permission_aci(
permission1_renamed_ucase, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(memberOf=%s)")' % DN('cn=ipausers', groups_dn) +
'(version 3.0;acl "permission:%s";' % permission1_renamed_ucase +
'allow (write) groupdn = "ldap:///%s";)' %
permission1_renamed_ucase_dn,
),
dict(
desc='Reset --subtree of %r' % permission2,
command=(
'permission_mod', [permission2],
dict(ipapermlocation=api.env.basedn)
),
expected=dict(
value=permission2,
summary=u'Modified permission "%s"' % permission2,
result={
'dn': permission2_dn,
'cn': [permission2],
'objectclass': objectclasses.permission,
'ipapermright': [u'write'],
'attrs': [u'cn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'extratargetfilter': [u'(objectclass=posixaccount)'],
'ipapermlocation': [api.env.basedn],
},
),
),
verify_permission_aci(
permission2, api.env.basedn,
'(targetattr = "cn")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission2 +
'allow (write) groupdn = "ldap:///%s";)' % permission2_dn,
),
dict(
desc='Change subtree of %r to admin' % permission1_renamed_ucase,
command=(
'permission_mod', [permission1_renamed_ucase],
dict(ipapermlocation=admin_dn)
),
expected=dict(
value=permission1_renamed_ucase,
summary=u'Modified permission "%s"' % permission1_renamed_ucase,
result=dict(
dn=permission1_renamed_ucase_dn,
cn=[permission1_renamed_ucase],
objectclass=objectclasses.permission,
member_privilege=[privilege1],
ipapermlocation=[admin_dn],
ipapermright=[u'write'],
memberof=[u'ipausers'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
),
),
),
verify_permission_aci(
permission1_renamed_ucase, admin_dn,
'(targetattr = "sn")' +
'(targetfilter = "(memberOf=%s)")' % DN('cn=ipausers', groups_dn) +
'(version 3.0;acl "permission:%s";' % permission1_renamed_ucase +
'allow (write) groupdn = "ldap:///%s";)' %
permission1_renamed_ucase_dn,
),
dict(
desc='Search for %r using --subtree' % permission1_renamed_ucase,
command=('permission_find', [],
{'ipapermlocation': u'ldap:///%s' % admin_dn}),
expected=dict(
count=1,
truncated=False,
summary=u'1 permission matched',
result=[
{
'dn':permission1_renamed_ucase_dn,
'cn':[permission1_renamed_ucase],
'objectclass': objectclasses.permission,
'member_privilege':[privilege1],
'ipapermlocation': [admin_dn],
'ipapermright':[u'write'],
'memberof':[u'ipausers'],
'attrs': [u'sn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
},
],
),
),
dict(
desc='Search using nonexistent --subtree',
command=('permission_find', [], {'ipapermlocation': u'foo'}),
expected=errors.ConversionError(
name='subtree', error='malformed RDN string = "foo"'),
),
dict(
desc='Search using --targetgroup',
command=('permission_find', [], {'targetgroup': u'ipausers'}),
expected=dict(
count=1,
truncated=False,
summary=u'1 permission matched',
result=[
{
'dn': DN(('cn', 'System: Add User to default group'),
api.env.container_permission, api.env.basedn),
'cn': [u'System: Add User to default group'],
'objectclass': objectclasses.permission,
'member_privilege': [u'User Administrators'],
'attrs': [u'member'],
'targetgroup': [u'ipausers'],
'memberindirect_role': [u'User Administrator'],
'ipapermright': [u'write'],
'ipapermbindruletype': [u'permission'],
'ipapermtarget': [DN(
'cn=ipausers', api.env.container_group,
api.env.basedn)],
'ipapermlocation': [groups_dn],
'ipapermdefaultattr': [u'member'],
'ipapermissiontype': [u'V2', u'MANAGED', u'SYSTEM'],
}
],
),
),
dict(
desc='Delete %r' % permission1_renamed_ucase,
command=('permission_del', [permission1_renamed_ucase], {}),
expected=dict(
result=dict(failed=[]),
value=[permission1_renamed_ucase],
summary=u'Deleted permission "%s"' % permission1_renamed_ucase,
)
),
verify_permission_aci_missing(permission1_renamed_ucase, users_dn),
dict(
desc='Try to delete non-existent %r' % permission1,
command=('permission_del', [permission1], {}),
expected=errors.NotFound(
reason=u'%s: permission not found' % permission1),
),
dict(
desc='Try to retrieve non-existent %r' % permission1,
command=('permission_show', [permission1], {}),
expected=errors.NotFound(
reason=u'%s: permission not found' % permission1),
),
dict(
desc='Try to update non-existent %r' % permission1,
command=('permission_mod', [permission1], dict(rename=u'Foo')),
expected=errors.NotFound(
reason=u'%s: permission not found' % permission1),
),
dict(
desc='Delete %r' % permission2,
command=('permission_del', [permission2], {}),
expected=dict(
result=dict(failed=[]),
value=[permission2],
summary=u'Deleted permission "%s"' % permission2,
)
),
verify_permission_aci_missing(permission2, users_dn),
dict(
desc='Search for %r' % permission1,
command=('permission_find', [permission1], {}),
expected=dict(
count=0,
truncated=False,
summary=u'0 permissions matched',
result=[],
),
),
dict(
desc='Delete %r' % privilege1,
command=('privilege_del', [privilege1], {}),
expected=dict(
result=dict(failed=[]),
value=[privilege1],
summary=u'Deleted privilege "%s"' % privilege1,
)
),
dict(
desc='Try to create permission %r with non-existing memberof' % permission1,
command=(
'permission_add', [permission1], dict(
memberof=u'nonexisting',
ipapermright=u'write',
attrs=[u'cn'],
)
),
expected=errors.NotFound(reason=u'nonexisting: group not found'),
),
dict(
desc='Create memberof permission %r' % permission1,
command=(
'permission_add', [permission1], dict(
memberof=u'editors',
ipapermright=u'write',
type=u'user',
attrs=[u'sn'],
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
memberof=[u'editors'],
ipapermright=[u'write'],
type=[u'user'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(&(memberOf=%s)' % DN('cn=editors', groups_dn) +
'(objectclass=posixaccount))")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Try to update non-existent memberof of %r' % permission1,
command=('permission_mod', [permission1], dict(
memberof=u'nonexisting')),
expected=errors.NotFound(reason=u'nonexisting: group not found'),
),
dict(
desc='Update memberof permission %r' % permission1,
command=(
'permission_mod', [permission1], dict(
memberof=u'admins',
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
memberof=[u'admins'],
ipapermright=[u'write'],
type=[u'user'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(&' +
'(memberOf=%s)' % DN('cn=admins', groups_dn) +
'(objectclass=posixaccount))")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Unset memberof of permission %r' % permission1,
command=(
'permission_mod', [permission1], dict(
memberof=None,
)
),
expected=dict(
summary=u'Modified permission "%s"' % permission1,
value=permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
type=[u'user'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Delete %r' % permission1,
command=('permission_del', [permission1], {}),
expected=dict(
result=dict(failed=[]),
value=[permission1],
summary=u'Deleted permission "%s"' % permission1,
)
),
verify_permission_aci_missing(permission1, users_dn),
dict(
desc='Create targetgroup permission %r' % permission1,
command=(
'permission_add', [permission1], dict(
targetgroup=u'editors',
ipapermright=u'write',
attrs=[u'sn'],
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
targetgroup=[u'editors'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermtarget=[DN(('cn', 'editors'), groups_dn)],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[api.env.basedn],
),
),
),
verify_permission_aci(
permission1, api.env.basedn,
'(targetattr = "sn")' +
'(target = "ldap:///%s")' % DN('cn=editors', groups_dn) +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Create %r' % permission3,
command=(
'permission_add', [permission3], dict(
type=u'user',
ipapermright=u'write',
attrs=[u'cn']
)
),
expected=dict(
value=permission3,
summary=u'Added permission "%s"' % permission3,
result=dict(
dn=permission3_dn,
cn=[permission3],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
attrs=(u'cn',),
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission3, users_dn,
'(targetattr = "cn")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission3 +
'allow (write) groupdn = "ldap:///%s";)' % permission3_dn,
),
dict(
desc='Retrieve %r with --all --rights' % permission3,
command=('permission_show', [permission3], {'all' : True, 'rights' : True}),
expected=dict(
value=permission3,
summary=None,
result=dict(
dn=permission3_dn,
cn=[permission3],
objectclass=objectclasses.permission,
type=[u'user'],
attrs=[u'cn'],
ipapermincludedattr=[u'cn'],
ipapermright=[u'write'],
attributelevelrights=permission3_attributelevelrights,
ipapermbindruletype=[u'permission'],
ipapermtargetfilter=[u'(objectclass=posixaccount)'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
dict(
desc='Modify %r with --all --rights' % permission3,
command=('permission_mod', [permission3], {
'all': True, 'rights': True,
'attrs': [u'cn', u'uid']}),
expected=dict(
value=permission3,
summary=u'Modified permission "%s"' % permission3,
result=dict(
dn=permission3_dn,
cn=[permission3],
objectclass=objectclasses.permission,
type=[u'user'],
attrs=[u'cn', u'uid'],
ipapermincludedattr=[u'cn', u'uid'],
ipapermright=[u'write'],
attributelevelrights=permission3_attributelevelrights,
ipapermbindruletype=[u'permission'],
ipapermtargetfilter=[u'(objectclass=posixaccount)'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission3, users_dn,
'(targetattr = "cn || uid")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission3 +
'allow (write) groupdn = "ldap:///%s";)' % permission3_dn,
),
dict(
desc='Try to modify %r with naked targetfilter' % permission1,
command=('permission_mod', [permission1],
{'ipapermtargetfilter': u"cn=admin"}),
expected=errors.ValidationError(
name='rawfilter',
error='must be enclosed in parentheses'),
),
dict(
desc='Try to modify %r with invalid targetfilter' % permission1,
command=('permission_mod', [permission1],
{'ipapermtargetfilter': u"(ceci n'est pas un filtre)"}),
expected=errors.ValidationError(
name='ipapermtargetfilter',
error='Bad search filter'),
),
dict(
desc='Try setting nonexisting location on %r' % permission1,
command=(
'permission_mod', [permission1], dict(
ipapermlocation=nonexistent_dn,
)
),
expected=errors.ValidationError(
name='ipapermlocation',
error='Entry %s does not exist' % nonexistent_dn)
),
dict(
desc='Search for nonexisting permission with ":" in the name',
command=(
'permission_find', ['doesnotexist:' + permission1], {}
),
expected=dict(
count=0,
truncated=False,
summary=u'0 permissions matched',
result=[],
),
),
]
class test_permission_rollback(Declarative):
"""Test rolling back changes after failed update"""
cleanup_commands = [
('permission_del', [permission1], {'force': True}),
]
_verifications = [
dict(
desc='Retrieve %r' % permission1,
command=('permission_show', [permission1], {}),
expected=dict(
value=permission1,
summary=None,
result={
'dn': permission1_dn,
'cn': [permission1],
'objectclass': objectclasses.permission,
'ipapermright': [u'write'],
'attrs': [u'sn'],
'ipapermbindruletype': [u'permission'],
'ipapermissiontype': [u'SYSTEM', u'V2'],
'ipapermlocation': [users_dn],
'ipapermtarget': [DN(('uid', 'admin'), users_dn)],
},
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(target = "ldap:///%s")' % DN(('uid', 'admin'), users_dn) +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
verify_permission_aci_missing(permission1, etc_dn)
]
tests = [
dict(
desc='Create %r' % permission1,
command=(
'permission_add', [permission1], dict(
ipapermlocation=users_dn,
ipapermtarget=DN('uid=admin', users_dn),
ipapermright=[u'write'],
attrs=[u'sn'],
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
ipapermtarget=[DN(('uid', 'admin'), users_dn)],
),
),
),
] + _verifications + [
dict(
desc='Move %r to non-existent DN' % permission1,
command=(
'permission_mod', [permission1], dict(
ipapermlocation=DN('foo=bar'),
)
),
expected=errors.ValidationError(
name='ipapermlocation',
error='Entry foo=bar does not exist'),
),
] + _verifications + [
dict(
desc='Move %r to another DN' % permission1,
command=('permission_mod', [permission1],
dict(ipapermlocation=etc_dn)
),
expected=errors.InvalidSyntax(
attr=r'ACL Invalid Target Error(-8): '
r'Target is beyond the scope of the ACL'
r'(SCOPE:%(sdn)s) '
r'(targetattr = \22sn\22)'
r'(target = \22ldap:///%(tdn)s\22)'
r'(version 3.0;acl \22permission:testperm\22;'
r'allow (write) groupdn = \22ldap:///%(pdn)s\22;)' % dict(
sdn=etc_dn,
tdn=DN('uid=admin', users_dn),
pdn=permission1_dn)),
),
] + _verifications + [
dict(
desc='Try adding an invalid attribute on %r with --all --rights' % permission1,
command=(
'permission_mod', [permission1], dict(
attrs=[u'cn', u'bogusattributexyz'],
rights=True,
all=True,
)
),
expected=errors.InvalidSyntax(
attr=r'targetattr "bogusattributexyz" does not exist '
r'in schema. Please add attributeTypes '
r'"bogusattributexyz" to schema if necessary. ACL Syntax '
r'Error(-5):(targetattr = \22bogusattributexyz || cn\22)'
r'(target = \22ldap:///%(tdn)s\22)'
r'(version 3.0;acl \22permission:%(name)s\22;'
r'allow (write) groupdn = \22ldap:///%(dn)s\22;)' % dict(
tdn=DN('uid=admin', users_dn),
name=permission1,
dn=permission1_dn),
),
),
] + _verifications
class test_permission_sync_attributes(Declarative):
"""Test the effects of setting permission attributes"""
cleanup_commands = [
('permission_del', [permission1], {'force': True}),
]
tests = [
dict(
desc='Create %r' % permission1,
command=(
'permission_add', [permission1], dict(
ipapermlocation=users_dn,
ipapermright=u'write',
attrs=u'sn',
ipapermtargetfilter=[
u'(memberOf=%s)' % DN(('cn', 'admins'), groups_dn),
u'(objectclass=posixaccount)'],
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
memberof=[u'admins'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(&(memberOf=%s)' % DN('cn=admins', groups_dn) +
'(objectclass=posixaccount))")'
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Unset location on %r, verify type is gone' % permission1,
command=(
'permission_mod', [permission1], dict(
ipapermlocation=None,
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
extratargetfilter=[
u'(objectclass=posixaccount)'],
memberof=[u'admins'],
ipapermlocation=[api.env.basedn],
),
),
),
verify_permission_aci(
permission1, api.env.basedn,
'(targetattr = "sn")' +
'(targetfilter = "(&(memberOf=%s)' % DN('cn=admins', groups_dn) +
'(objectclass=posixaccount))")'
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
verify_permission_aci_missing(permission1, users_dn),
dict(
desc='Reset location on %r' % permission1,
command=(
'permission_mod', [permission1], dict(
ipapermlocation=users_dn,
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
memberof=[u'admins'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(&(memberOf=%s)' % DN('cn=admins', groups_dn) +
'(objectclass=posixaccount))")'
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
verify_permission_aci_missing(permission1, api.env.basedn),
dict(
desc='Unset objectclass filter on %r, verify type is gone' % permission1,
command=(
'permission_mod', [permission1], dict(
ipapermtargetfilter=u'(memberOf=%s)' % DN(('cn', 'admins'),
groups_dn),
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
memberof=[u'admins'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(memberOf=%s)")' % DN('cn=admins', groups_dn) +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Unset targetfilter on %r, verify memberof is gone' % permission1,
command=(
'permission_mod', [permission1], dict(
ipapermtargetfilter=None,
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Set type of %r to group' % permission1,
command=(
'permission_mod', [permission1], dict(
type=u'group',
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'group'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[groups_dn],
),
),
),
verify_permission_aci(
permission1, groups_dn,
'(targetattr = "sn")' +
'(targetfilter = "%s")' % group_filter +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Set target on %r, verify targetgroup is set' % permission1,
command=(
'permission_mod', [permission1], dict(
ipapermtarget=DN('cn=editors', groups_dn),
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'group'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermtarget=[DN('cn=editors', groups_dn)],
ipapermlocation=[groups_dn],
targetgroup=[u'editors'],
),
),
),
verify_permission_aci(
permission1, groups_dn,
'(targetattr = "sn")' +
'(target = "ldap:///%s")' % DN(('cn', 'editors'), groups_dn) +
'(targetfilter = "%s")' % group_filter +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Set extra targetfilter on %r' % permission1,
command=(
'permission_mod', [permission1], dict(
extratargetfilter=u'(cn=blabla)',
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'group'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermtarget=[DN('cn=editors', groups_dn)],
ipapermlocation=[groups_dn],
targetgroup=[u'editors'],
extratargetfilter=[u'(cn=blabla)'],
),
),
),
verify_permission_aci(
permission1, groups_dn,
'(targetattr = "sn")' +
'(target = "ldap:///%s")' % DN(('cn', 'editors'), groups_dn) +
'(targetfilter = "(&(cn=blabla)%s)")' % group_filter +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Retrieve %r with --all' % permission1,
command=(
'permission_show', [permission1], dict(all=True)
),
expected=dict(
value=permission1,
summary=None,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'group'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermincludedattr=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermtarget=[DN('cn=editors', groups_dn)],
ipapermlocation=[groups_dn],
targetgroup=[u'editors'],
extratargetfilter=[u'(cn=blabla)'],
ipapermtargetfilter=[u'(cn=blabla)', group_filter],
),
),
),
dict(
desc='Set type of %r back to user' % permission1,
command=(
'permission_mod', [permission1], dict(
type=u'user', ipapermtarget=None,
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
extratargetfilter=[u'(cn=blabla)'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(&(cn=blabla)(objectclass=posixaccount))")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
]
class test_permission_sync_nice(Declarative):
"""Test the effects of setting convenience options on permissions"""
cleanup_commands = [
('permission_del', [permission1], {'force': True}),
]
tests = [
dict(
desc='Create %r' % permission1,
command=(
'permission_add', [permission1], dict(
type=u'user',
ipapermright=u'write',
attrs=u'sn',
memberof=u'admins',
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
memberof=[u'admins'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(&(memberOf=%s)' % DN('cn=admins', groups_dn) +
'(objectclass=posixaccount))")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Unset type on %r, verify target & filter are gone' % permission1,
command=(
'permission_mod', [permission1], dict(
type=None,
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
memberof=[u'admins'],
ipapermlocation=[api.env.basedn],
),
),
),
verify_permission_aci(
permission1, api.env.basedn,
'(targetattr = "sn")' +
'(targetfilter = "(memberOf=%s)")' % DN('cn=admins', groups_dn) +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Unset memberof on %r, verify targetfilter is gone' % permission1,
command=(
'permission_mod', [permission1], dict(
memberof=None,
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[api.env.basedn],
),
),
),
verify_permission_aci(
permission1, api.env.basedn,
'(targetattr = "sn")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Set type of %r to group' % permission1,
command=(
'permission_mod', [permission1], dict(
type=u'group',
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'group'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[groups_dn],
),
),
),
verify_permission_aci(
permission1, groups_dn,
'(targetattr = "sn")' +
'(targetfilter = "%s")' % group_filter +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Set targetgroup on %r, verify target is set' % permission1,
command=(
'permission_mod', [permission1], dict(
targetgroup=u'editors',
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'group'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermtarget=[DN('cn=editors', groups_dn)],
ipapermlocation=[groups_dn],
targetgroup=[u'editors'],
),
),
),
verify_permission_aci(
permission1, groups_dn,
'(targetattr = "sn")' +
'(target = "ldap:///%s")' % DN(('cn', 'editors'), groups_dn) +
'(targetfilter = "%s")' % group_filter +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
]
class test_permission_targetfilter(Declarative):
"""Test the targetfilter options on permissions"""
cleanup_commands = [
('permission_del', [permission1], {'force': True}),
]
_initial_aci = (
'(targetattr = "sn")' +
'(targetfilter = "(&' +
'(cn=*)' +
'(memberOf=%s)' % DN('cn=admins', groups_dn) +
'(objectclass=posixaccount)' +
'(sn=*)' +
')")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn
)
tests = [
dict(
desc='Create %r' % permission1,
command=(
'permission_add', [permission1], dict(
type=u'user',
ipapermright=u'write',
attrs=u'sn',
memberof=u'admins',
extratargetfilter=[u'(cn=*)', u'(sn=*)'],
all=True,
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermincludedattr=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
memberof=[u'admins'],
extratargetfilter=[u'(cn=*)', u'(sn=*)'],
ipapermtargetfilter=[
u'(cn=*)', u'(sn=*)',
u'(memberOf=%s)' % DN(('cn', 'admins'), groups_dn),
u'(objectclass=posixaccount)'],
),
),
),
verify_permission_aci(permission1, users_dn, _initial_aci),
dict(
desc='Retrieve %r' % permission1,
command=(
'permission_show', [permission1], dict()
),
expected=dict(
value=permission1,
summary=None,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
memberof=[u'admins'],
extratargetfilter=[u'(cn=*)', u'(sn=*)'],
),
),
),
dict(
desc='Retrieve %r with --all' % permission1,
command=(
'permission_show', [permission1], dict(all=True)
),
expected=dict(
value=permission1,
summary=None,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermincludedattr=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
memberof=[u'admins'],
extratargetfilter=[u'(cn=*)', u'(sn=*)'],
ipapermtargetfilter=[
u'(cn=*)', u'(sn=*)',
u'(memberOf=%s)' % DN(('cn', 'admins'), groups_dn),
u'(objectclass=posixaccount)'],
),
),
),
dict(
desc='Retrieve %r with --raw' % permission1,
command=(
'permission_show', [permission1], dict(raw=True)
),
expected=dict(
value=permission1,
summary=None,
result=dict(
dn=permission1_dn,
cn=[permission1],
aci=[_initial_aci],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
ipapermincludedattr=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
ipapermtargetfilter=[
u'(cn=*)', u'(sn=*)',
u'(memberOf=%s)' % DN(('cn', 'admins'), groups_dn),
u'(objectclass=posixaccount)'],
),
),
),
dict(
desc='Retrieve %r with --all and --raw' % permission1,
command=(
'permission_show', [permission1], dict(all=True, raw=True)
),
expected=dict(
value=permission1,
summary=None,
result=dict(
dn=permission1_dn,
cn=[permission1],
aci=[_initial_aci],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
ipapermincludedattr=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
ipapermtargetfilter=[
u'(cn=*)', u'(sn=*)',
u'(memberOf=%s)' % DN(('cn', 'admins'), groups_dn),
u'(objectclass=posixaccount)'],
),
),
),
dict(
desc='Modify extratargetfilter of %r' % permission1,
command=(
'permission_mod', [permission1], dict(
extratargetfilter=[u'(cn=*)', u'(l=*)'],
all=True,
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermincludedattr=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
memberof=[u'admins'],
extratargetfilter=[u'(cn=*)', u'(l=*)'],
ipapermtargetfilter=[
u'(cn=*)', u'(l=*)',
u'(memberOf=%s)' % DN(('cn', 'admins'), groups_dn),
u'(objectclass=posixaccount)'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(&' +
'(cn=*)' +
'(l=*)' +
'(memberOf=%s)' % DN('cn=admins', groups_dn) +
'(objectclass=posixaccount)' +
')")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn
),
dict(
desc='Remove raw targetfilter of %r' % permission1,
command=(
'permission_mod', [permission1], dict(
ipapermtargetfilter=None,
all=True,
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermincludedattr=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn
),
dict(
desc='Set extra targetfilter on %r to restore' % permission1,
command=(
'permission_mod', [permission1], dict(
extratargetfilter=[
u'(cn=*)',
u'(memberOf=%s)' % DN(('cn', 'admins'), groups_dn),
u'(objectclass=posixaccount)'],
all=True,
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermincludedattr=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
memberof=[u'admins'],
extratargetfilter=[u'(cn=*)'],
ipapermtargetfilter=[
u'(cn=*)',
u'(memberOf=%s)' % DN(('cn', 'admins'), groups_dn),
u'(objectclass=posixaccount)'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(&' +
'(cn=*)' +
'(memberOf=%s)' % DN('cn=admins', groups_dn) +
'(objectclass=posixaccount)' +
')")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn
),
] + [
dict(
desc='Search for %r using %s %s' % (permission1, value_name, option_name),
command=(
'permission_find', [permission1],
{option_name: value, 'all': True}
),
expected=dict(
summary=u'1 permission matched' if should_find else u'0 permissions matched',
truncated=False,
count=1 if should_find else 0,
result=[dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermincludedattr=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
memberof=[u'admins'],
extratargetfilter=[u'(cn=*)'],
ipapermtargetfilter=[
u'(cn=*)',
u'(memberOf=%s)' % DN(('cn', 'admins'), groups_dn),
u'(objectclass=posixaccount)'],
)] if should_find else [],
),
)
for option_name in (
'extratargetfilter',
'ipapermtargetfilter',
)
for value_name, value, should_find in (
('"extra"', u'(cn=*)', True),
('"non-extra"', u'(objectclass=posixaccount)', True),
('non-existing', u'(sn=insert a very improbable last name)', False),
)
] + [
dict(
desc='Set extra objectclass filter on %r' % permission1,
command=(
'permission_mod', [permission1], dict(
extratargetfilter=[u'(cn=*)', u'(objectclass=top)'],
all=True,
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermincludedattr=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
memberof=[u'admins'],
extratargetfilter=[u'(cn=*)', u'(objectclass=top)'],
ipapermtargetfilter=[
u'(cn=*)',
u'(memberOf=%s)' % DN(('cn', 'admins'), groups_dn),
u'(objectclass=posixaccount)',
u'(objectclass=top)'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "sn")' +
'(targetfilter = "(&' +
'(cn=*)' +
'(memberOf=%s)' % DN('cn=admins', groups_dn) +
'(objectclass=posixaccount)' +
'(objectclass=top)' +
')")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn
),
dict(
desc='Unset type on %r to verify extra objectclass filter stays' % permission1,
command=(
'permission_mod', [permission1], dict(
type=None,
all=True,
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermincludedattr=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[api.env.basedn],
memberof=[u'admins'],
extratargetfilter=[u'(cn=*)', u'(objectclass=top)'],
ipapermtargetfilter=[
u'(cn=*)',
u'(memberOf=%s)' % DN(('cn', 'admins'), groups_dn),
u'(objectclass=top)'],
),
),
),
verify_permission_aci(
permission1, api.env.basedn,
'(targetattr = "sn")' +
'(targetfilter = "(&' +
'(cn=*)' +
'(memberOf=%s)' % DN('cn=admins', groups_dn) +
'(objectclass=top)' +
')")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn
),
dict(
desc='Set wildcard memberof filter on %r' % permission1,
command=(
'permission_mod', [permission1], dict(
extratargetfilter=u'(memberof=*)',
all=True,
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermincludedattr=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[api.env.basedn],
memberof=[u'admins'],
extratargetfilter=[u'(memberof=*)'],
ipapermtargetfilter=[
u'(memberOf=%s)' % DN(('cn', 'admins'), groups_dn),
u'(memberof=*)'],
),
),
),
verify_permission_aci(
permission1, api.env.basedn,
'(targetattr = "sn")' +
'(targetfilter = "(&' +
'(memberOf=%s)' % DN('cn=admins', groups_dn) +
'(memberof=*)' +
')")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn
),
dict(
desc='Remove --memberof on %r to verify wildcard is still there' % permission1,
command=(
'permission_mod', [permission1], dict(
memberof=[],
all=True,
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
attrs=[u'sn'],
ipapermincludedattr=[u'sn'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[api.env.basedn],
extratargetfilter=[u'(memberof=*)'],
ipapermtargetfilter=[u'(memberof=*)'],
),
),
),
verify_permission_aci(
permission1, api.env.basedn,
'(targetattr = "sn")' +
'(targetfilter = "(memberof=*)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn
),
]
def _make_permission_flag_tests(flags, expected_message):
return [
dict(
desc='Create %r with flags %s' % (permission1, flags),
command=(
'permission_add_noaci', [permission1], dict(
ipapermissiontype=flags,
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.system_permission,
ipapermissiontype=flags,
),
),
),
dict(
desc='Try to modify %r' % permission1,
command=('permission_mod', [permission1], {'type': u'user'}),
expected=errors.ACIError(info=expected_message),
),
dict(
desc='Try to delete %r' % permission1,
command=('permission_del', [permission1], {}),
expected=errors.ACIError(info=expected_message),
),
dict(
desc='Add %r to %r' % (permission1, privilege1),
command=('privilege_add_permission', [privilege1],
{'permission': permission1}),
expected=dict(
completed=1,
failed=dict(
member=dict(
permission=[],
),
),
result={
'dn': privilege1_dn,
'cn': [privilege1],
'description': [u'privilege desc. 1'],
'memberof_permission': [permission1],
'objectclass': objectclasses.privilege,
}
),
),
dict(
desc='Delete %r with --force' % permission1,
command=('permission_del', [permission1], {'force': True}),
expected=dict(
result=dict(failed=[]),
value=[permission1],
summary=u'Deleted permission "%s"' % permission1,
),
),
]
class test_permission_flags(Declarative):
"""Test that permission flags are handled correctly"""
cleanup_commands = [
('permission_del', [permission1], {'force': True}),
('privilege_del', [privilege1], {}),
]
tests = [
dict(
desc='Create %r' % privilege1,
command=('privilege_add', [privilege1],
dict(description=u'privilege desc. 1')
),
expected=dict(
value=privilege1,
summary=u'Added privilege "%s"' % privilege1,
result=dict(
dn=privilege1_dn,
cn=[privilege1],
description=[u'privilege desc. 1'],
objectclass=objectclasses.privilege,
),
),
),
] + (
_make_permission_flag_tests(
[u'SYSTEM'],
'A SYSTEM permission may not be modified or removed') +
_make_permission_flag_tests(
[u'??'],
'Permission with unknown flag ?? may not be modified or removed') +
_make_permission_flag_tests(
[u'SYSTEM', u'??'],
'Permission with unknown flag ?? may not be modified or removed'))
def check_legacy_results(results):
"""Check that the expected number of legacy permissions are in $SUFFIX"""
legacy_permissions = [p for p in results
if not p.get('ipapermissiontype')]
print(legacy_permissions)
assert len(legacy_permissions) == 8, len(legacy_permissions)
return True
class test_permission_legacy(Declarative):
"""Tests for non-upgraded permissions"""
tests = [
dict(
desc='Check that some legacy permission is found in $SUFFIX',
command=('permission_find', [],
{'ipapermlocation': api.env.basedn}),
expected=dict(
count=lambda count: count,
truncated=False,
summary=lambda s: True,
result=check_legacy_results,
),
),
]
class test_permission_bindtype(Declarative):
cleanup_commands = [
('permission_del', [permission1], {'force': True}),
('permission_del', [permission1_renamed], {'force': True}),
('privilege_del', [privilege1], {}),
]
tests = [
dict(
desc='Create anonymous %r' % permission1,
command=(
'permission_add', [permission1], dict(
type=u'user',
ipapermright=u'write',
ipapermbindruletype=u'anonymous',
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'anonymous'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) userdn = "ldap:///anyone";)',
),
dict(
desc='Create %r' % privilege1,
command=('privilege_add', [privilege1],
dict(description=u'privilege desc. 1')
),
expected=dict(
value=privilege1,
summary=u'Added privilege "%s"' % privilege1,
result=dict(
dn=privilege1_dn,
cn=[privilege1],
description=[u'privilege desc. 1'],
objectclass=objectclasses.privilege,
),
),
),
dict(
desc='Try to add %r to %r' % (permission1, privilege1),
command=(
'privilege_add_permission', [privilege1], dict(
permission=[permission1],
)
),
expected=errors.ValidationError(
name='permission',
error=u'cannot add permission "%s" with bindtype "%s" to a '
'privilege' % (permission1, 'anonymous')),
),
dict(
desc='Change binddn of %r to all' % permission1,
command=(
'permission_mod', [permission1], dict(
type=u'user',
ipapermbindruletype=u'all',
)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'all'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) userdn = "ldap:///all";)',
),
dict(
desc='Try to add %r to %r' % (permission1, privilege1),
command=(
'privilege_add_permission', [privilege1], dict(
permission=[permission1],
)
),
expected=errors.ValidationError(
name='permission',
error=u'cannot add permission "%s" with bindtype "%s" to a '
'privilege' % (permission1, 'all')),
),
dict(
desc='Search for %r using --bindtype' % permission1,
command=('permission_find', [permission1],
{'ipapermbindruletype': u'all'}),
expected=dict(
count=1,
truncated=False,
summary=u'1 permission matched',
result=[
dict(
dn=permission1_dn,
cn=[permission1],
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'all'],
objectclass=objectclasses.permission,
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
],
),
),
dict(
desc='Search for %r using bad --bindtype' % permission1,
command=('permission_find', [permission1],
{'ipapermbindruletype': u'anonymous'}),
expected=dict(
count=0,
truncated=False,
summary=u'0 permissions matched',
result=[],
),
),
dict(
desc='Add zero permissions to %r' % (privilege1),
command=('privilege_add_permission', [privilege1], {}),
expected=dict(
completed=0,
failed=dict(member=dict(permission=[])),
result=dict(
dn=privilege1_dn,
cn=[privilege1],
description=[u'privilege desc. 1'],
objectclass=objectclasses.privilege,
),
),
),
dict(
desc='Rename %r to permission %r' % (permission1,
permission1_renamed),
command=(
'permission_mod', [permission1], dict(rename=permission1_renamed)
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_renamed_dn,
cn=[permission1_renamed],
type=[u'user'],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
ipapermbindruletype=[u'all'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission1_renamed, users_dn,
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1_renamed +
'allow (write) userdn = "ldap:///all";)',
),
dict(
desc='Reset binddn of %r to permission' % permission1_renamed,
command=(
'permission_mod', [permission1_renamed], dict(
type=u'user',
ipapermbindruletype=u'permission',
)
),
expected=dict(
value=permission1_renamed,
summary=u'Modified permission "%s"' % permission1_renamed,
result=dict(
dn=permission1_renamed_dn,
cn=[permission1_renamed],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission1_renamed, users_dn,
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1_renamed +
'allow (write) groupdn = "ldap:///%s";)' % permission1_renamed_dn,
),
dict(
desc='Rename %r back to %r' % (permission1_renamed, permission1),
command=(
'permission_mod', [permission1_renamed],
dict(rename=permission1)
),
expected=dict(
value=permission1_renamed,
summary=u'Modified permission "%s"' % permission1_renamed,
result=dict(
dn=permission1_dn,
cn=[permission1],
type=[u'user'],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Add %r to %r' % (permission1, privilege1),
command=(
'privilege_add_permission', [privilege1], dict(
permission=[permission1],
)
),
expected=dict(
completed=1,
failed=dict(member=dict(permission=[])),
result=dict(
dn=privilege1_dn,
cn=[privilege1],
description=[u'privilege desc. 1'],
memberof_permission=[permission1],
objectclass=objectclasses.privilege,
)
),
),
dict(
desc='Try to change binddn of %r to anonymous' % permission1,
command=(
'permission_mod', [permission1], dict(
type=u'user',
ipapermbindruletype=u'anonymous',
)
),
expected=errors.ValidationError(
name='ipapermbindruletype',
error=u'cannot set bindtype for a permission that is '
'assigned to a privilege')
),
]
class test_managed_permissions(Declarative):
cleanup_commands = [
('permission_del', [permission1], {'force': True}),
('permission_del', [permission2], {'force': True}),
]
@classmethod
def setup_class(cls):
super(test_managed_permissions, cls).setup_class()
if not have_ldap2:
raise nose.SkipTest('server plugin not available')
def add_managed_permission(self):
"""Add a managed permission and the corresponding ACI"""
ldap = ldap2(api)
ldap.connect()
result = api.Command.permission_add(permission1, type=u'user',
ipapermright=u'write',
attrs=[u'cn'])
# TODO: This hack relies on the permission internals.
# Change as necessary.
# Add permission DN
entry = ldap.get_entry(permission1_dn)
entry['ipapermdefaultattr'] = ['l', 'o', 'cn']
ldap.update_entry(entry)
# Update the ACI via the API
result = api.Command.permission_mod(permission1,
attrs=[u'l', u'o', u'cn'])
# Set the permission type to MANAGED
entry = ldap.get_entry(permission1_dn)
entry['ipapermissiontype'].append('MANAGED')
ldap.update_entry(entry)
tests = [
add_managed_permission,
dict(
desc='Show pre-created %r' % permission1,
command=('permission_show', [permission1], {'all': True}),
expected=dict(
value=permission1,
summary=None,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermissiontype=[u'SYSTEM', u'V2', u'MANAGED'],
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'permission'],
ipapermlocation=[users_dn],
ipapermtargetfilter=[u'(objectclass=posixaccount)'],
ipapermdefaultattr=[u'l', u'o', u'cn'],
attrs=[u'l', u'o', u'cn'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "cn || l || o")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
] + [
# Verify that most permission attributes can't be changed
dict(
desc='Try to modify %s in %r' % (attr_name, permission1),
command=('permission_mod', [permission1],
{attr_name: value}),
expected=errors.ValidationError(
name=err_attr or attr_name,
error='not modifiable on managed permissions'),
)
for attr_name, err_attr, value in (
('ipapermlocation', None, users_dn),
('ipapermright', None, u'compare'),
('ipapermtarget', None, users_dn),
('ipapermtargetfilter', None, u'(ou=engineering)'),
('memberof', 'ipapermtargetfilter', u'admins'),
('targetgroup', 'ipapermtarget', u'admins'),
('type', 'ipapermlocation', u'group'),
('extratargetfilter', 'extratargetfilter', u'(cn=*)'),
)
] + [
dict(
desc='Try to rename %r' % permission1,
command=('permission_mod', [permission1],
{'rename': permission2}),
expected=errors.ValidationError(
name='rename',
error='cannot rename managed permissions'),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "cn || l || o")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Modify included and excluded attrs in %r' % permission1,
command=('permission_mod', [permission1],
{'ipapermincludedattr': [u'dc'],
'ipapermexcludedattr': [u'cn'],
'all': True}),
expected=dict(
value=permission1,
summary=u'Modified permission "testperm"',
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermissiontype=[u'SYSTEM', u'V2', u'MANAGED'],
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'permission'],
ipapermlocation=[users_dn],
ipapermtargetfilter=[u'(objectclass=posixaccount)'],
ipapermdefaultattr=[u'l', u'o', u'cn'],
attrs=[u'l', u'o', u'dc'],
ipapermincludedattr=[u'dc'],
ipapermexcludedattr=[u'cn'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "dc || l || o")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Modify included attrs in %r' % permission1,
command=('permission_mod', [permission1],
{'ipapermincludedattr': [u'cn', u'sn'],
'all': True}),
expected=dict(
value=permission1,
summary=u'Modified permission "testperm"',
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermissiontype=[u'SYSTEM', u'V2', u'MANAGED'],
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'permission'],
ipapermlocation=[users_dn],
ipapermtargetfilter=[u'(objectclass=posixaccount)'],
ipapermdefaultattr=[u'l', u'o', u'cn'],
attrs=[u'l', u'o', u'sn'],
ipapermincludedattr=[u'cn', u'sn'],
ipapermexcludedattr=[u'cn'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "l || o || sn")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Add ineffective included attr to %r' % permission1,
command=('permission_mod', [permission1],
{'ipapermincludedattr': [u'cn', u'sn', u'o'],
'all': True}),
expected=dict(
value=permission1,
summary=u'Modified permission "testperm"',
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermissiontype=[u'SYSTEM', u'V2', u'MANAGED'],
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'permission'],
ipapermlocation=[users_dn],
ipapermtargetfilter=[u'(objectclass=posixaccount)'],
ipapermdefaultattr=[u'l', u'o', u'cn'],
attrs=[u'l', u'o', u'sn'],
ipapermincludedattr=[u'cn', u'sn', u'o'],
ipapermexcludedattr=[u'cn'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "l || o || sn")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Modify excluded attrs in %r' % permission1,
command=('permission_mod', [permission1],
{'ipapermexcludedattr': [u'cn', u'sn'],
'all': True}),
expected=dict(
value=permission1,
summary=u'Modified permission "testperm"',
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermissiontype=[u'SYSTEM', u'V2', u'MANAGED'],
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'permission'],
ipapermlocation=[users_dn],
ipapermtargetfilter=[u'(objectclass=posixaccount)'],
ipapermdefaultattr=[u'l', u'o', u'cn'],
attrs=[u'l', u'o'],
ipapermincludedattr=[u'cn', u'sn', u'o'],
ipapermexcludedattr=[u'cn', u'sn'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "l || o")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Modify bind rule in %r' % permission1,
command=('permission_mod', [permission1],
{'ipapermbindruletype': u'all'}),
expected=dict(
value=permission1,
summary=u'Modified permission "testperm"',
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermissiontype=[u'SYSTEM', u'V2', u'MANAGED'],
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'all'],
ipapermlocation=[users_dn],
ipapermdefaultattr=[u'l', u'o', u'cn'],
attrs=[u'l', u'o'],
ipapermincludedattr=[u'cn', u'sn', u'o'],
ipapermexcludedattr=[u'cn', u'sn'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "l || o")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) userdn = "ldap:///all";)',
),
dict(
desc='Show %r with no options' % permission1,
command=('permission_show', [permission1], {}),
expected=dict(
value=permission1,
summary=None,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermissiontype=[u'SYSTEM', u'V2', u'MANAGED'],
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'all'],
ipapermlocation=[users_dn],
ipapermdefaultattr=[u'l', u'o', u'cn'],
attrs=[u'l', u'o'],
ipapermincludedattr=[u'cn', u'sn', u'o'],
ipapermexcludedattr=[u'cn', u'sn'],
),
),
),
dict(
desc='Show %r with --all' % permission1,
command=('permission_show', [permission1], {'all': True}),
expected=dict(
value=permission1,
summary=None,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermissiontype=[u'SYSTEM', u'V2', u'MANAGED'],
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'all'],
ipapermlocation=[users_dn],
ipapermtargetfilter=[u'(objectclass=posixaccount)'],
ipapermdefaultattr=[u'l', u'o', u'cn'],
attrs=[u'l', u'o'],
ipapermincludedattr=[u'cn', u'sn', u'o'],
ipapermexcludedattr=[u'cn', u'sn'],
),
),
),
dict(
desc='Show %r with --raw' % permission1,
command=('permission_show', [permission1], {'raw': True}),
expected=dict(
value=permission1,
summary=None,
result=dict(
dn=permission1_dn,
cn=[permission1],
aci=['(targetattr = "l || o")'
'(targetfilter = "(objectclass=posixaccount)")'
'(version 3.0;acl "permission:%(name)s";'
'allow (write) userdn = "ldap:///all";)' %
{'name': permission1}],
objectclass=objectclasses.permission,
ipapermissiontype=[u'SYSTEM', u'V2', u'MANAGED'],
ipapermright=[u'write'],
ipapermbindruletype=[u'all'],
ipapermlocation=[users_dn],
ipapermtargetfilter=[u'(objectclass=posixaccount)'],
ipapermdefaultattr=[u'l', u'o', u'cn'],
ipapermincludedattr=[u'cn', u'sn', u'o'],
ipapermexcludedattr=[u'cn', u'sn'],
),
),
),
dict(
desc='Modify attrs of %r to normalize' % permission1,
command=('permission_mod', [permission1],
{'attrs': [u'l', u'o']}),
expected=dict(
value=permission1,
summary=u'Modified permission "testperm"',
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermissiontype=[u'SYSTEM', u'V2', u'MANAGED'],
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'all'],
ipapermlocation=[users_dn],
ipapermdefaultattr=[u'l', u'o', u'cn'],
attrs=[u'l', u'o'],
ipapermexcludedattr=[u'cn'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "l || o")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) userdn = "ldap:///all";)',
),
dict(
desc='Modify attrs of %r to add sn' % permission1,
command=('permission_mod', [permission1],
{'attrs': [u'l', u'o', u'sn']}),
expected=dict(
value=permission1,
summary=u'Modified permission "testperm"',
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermissiontype=[u'SYSTEM', u'V2', u'MANAGED'],
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'all'],
ipapermlocation=[users_dn],
ipapermdefaultattr=[u'l', u'o', u'cn'],
attrs=[u'l', u'o', u'sn'],
ipapermincludedattr=[u'sn'],
ipapermexcludedattr=[u'cn'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "l || o || sn")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) userdn = "ldap:///all";)',
),
dict(
desc='Search for %r using all its --attrs' % permission1,
command=('permission_find', [permission1],
{'cn': permission1, 'attrs': [u'l', u'o', u'sn']}),
expected=dict(
count=1,
truncated=False,
summary=u'1 permission matched',
result=[dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermissiontype=[u'SYSTEM', u'V2', u'MANAGED'],
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'all'],
ipapermlocation=[users_dn],
ipapermdefaultattr=[u'l', u'o', u'cn'],
attrs=[u'l', u'o', u'sn'],
ipapermincludedattr=[u'sn'],
ipapermexcludedattr=[u'cn'],
)],
),
),
dict(
desc='Search for %r using some --attrs' % permission1,
command=('permission_find', [permission1],
{'cn': permission1, 'attrs': [u'l', u'sn']}),
expected=dict(
count=1,
truncated=False,
summary=u'1 permission matched',
result=[dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermissiontype=[u'SYSTEM', u'V2', u'MANAGED'],
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'all'],
ipapermlocation=[users_dn],
ipapermdefaultattr=[u'l', u'o', u'cn'],
attrs=[u'l', u'o', u'sn'],
ipapermincludedattr=[u'sn'],
ipapermexcludedattr=[u'cn'],
)],
),
),
dict(
desc='Search for %r using excluded --attrs' % permission1,
command=('permission_find', [permission1],
{'cn': permission1, 'attrs': [u'sn', u'cn']}),
expected=dict(
count=0,
truncated=False,
summary=u'0 permissions matched',
result=[],
),
),
dict(
desc='Modify attrs of %r to allow cn again' % permission1,
command=('permission_mod', [permission1],
{'attrs': [u'l', u'o', u'sn', u'cn']}),
expected=dict(
value=permission1,
summary=u'Modified permission "testperm"',
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermissiontype=[u'SYSTEM', u'V2', u'MANAGED'],
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'all'],
ipapermlocation=[users_dn],
ipapermdefaultattr=[u'l', u'o', u'cn'],
attrs=[u'l', u'o', u'sn', u'cn'],
ipapermincludedattr=[u'sn'],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetattr = "cn || l || o || sn")' +
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) userdn = "ldap:///all";)',
),
dict(
desc='Try to delete %r' % permission1,
command=('permission_del', [permission1], {}),
expected=errors.ACIError(
info='cannot delete managed permissions'),
),
dict(
desc='Delete %r with --force' % permission1,
command=('permission_del', [permission1], {'force': True}),
expected=dict(
result=dict(failed=[]),
value=[permission1],
summary=u'Deleted permission "%s"' % permission1,
),
),
]
class test_permission_filters(Declarative):
"""Test multi-valued filters, type, memberof"""
cleanup_commands = [
('permission_del', [permission1], {'force': True}),
]
tests = [
dict(
desc='Create %r with many filters' % permission1,
command=(
'permission_add', [permission1], dict(
type=u'user',
memberof=u'ipausers',
ipapermright=u'write',
ipapermtargetfilter=[
u'(objectclass=top)',
u'(memberof=%s)' % DN(('cn', 'admins'), groups_dn),
]
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
memberof=[u'admins', u'ipausers'],
ipapermright=[u'write'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
extratargetfilter=[
u'(objectclass=top)',
],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetfilter = "(&'
'(memberOf=%s)' % DN(('cn', 'ipausers'), groups_dn) +
'(memberof=%s)' % DN(('cn', 'admins'), groups_dn) +
'(objectclass=posixaccount)(objectclass=top)' +
')")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Remove type from %r while setting other filters' % permission1,
command=(
'permission_mod', [permission1],
dict(
type=None,
memberof=u'ipausers',
ipapermtargetfilter=[
u'(objectclass=ipauser)',
u'(memberof=%s)' % DN(('cn', 'admins'), groups_dn),
],
),
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
memberof=[u'admins', u'ipausers'],
ipapermright=[u'write'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[api.env.basedn],
extratargetfilter=[
u'(objectclass=ipauser)',
],
),
),
),
verify_permission_aci(
permission1, api.env.basedn,
'(targetfilter = "(&'
'(memberOf=%s)' % DN(('cn', 'ipausers'), groups_dn) +
'(memberof=%s)' % DN(('cn', 'admins'), groups_dn) +
'(objectclass=ipauser)' +
')")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Remove memberof from %r while adding a filter' % permission1,
command=(
'permission_mod', [permission1],
dict(
memberof=None,
addattr=u'ipapermtargetfilter=(cn=xyz)',
),
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[api.env.basedn],
extratargetfilter=[
u'(cn=xyz)',
u'(objectclass=ipauser)',
],
),
),
),
verify_permission_aci(
permission1, api.env.basedn,
'(targetfilter = "(&'
'(cn=xyz)' +
'(objectclass=ipauser)' +
')")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Set memberof, type and filter on %r at once' % permission1,
command=(
'permission_mod', [permission1],
dict(
type=u'user',
memberof=u'admins',
ipapermtargetfilter=u'(uid=abc)',
),
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
memberof=[u'admins'],
ipapermright=[u'write'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
extratargetfilter=[
u'(uid=abc)',
],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetfilter = "(&'
u'(memberOf=%s)' % DN(('cn', 'admins'), groups_dn) +
'(objectclass=posixaccount)' +
'(uid=abc)' +
')")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Remove memberof & type from %r at once' % permission1,
command=(
'permission_mod', [permission1],
dict(
type=None,
memberof=None,
),
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[api.env.basedn],
extratargetfilter=[
u'(uid=abc)',
],
),
),
),
verify_permission_aci(
permission1, api.env.basedn,
'(targetfilter = "(uid=abc)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Add multiple memberof to %r' % permission1,
command=(
'permission_mod', [permission1],
dict(
memberof=[u'admins', u'editors'],
),
),
expected=dict(
value=permission1,
summary=u'Modified permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
ipapermright=[u'write'],
memberof=[u'admins', u'editors'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[api.env.basedn],
extratargetfilter=[u'(uid=abc)'],
),
),
),
verify_permission_aci(
permission1, api.env.basedn,
'(targetfilter = "(&'
'(memberOf=%s)' % DN(('cn', 'admins'), groups_dn) +
'(memberOf=%s)' % DN(('cn', 'editors'), groups_dn) +
'(uid=abc)' +
')")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Delete %r' % permission1,
command=('permission_del', [permission1], {}),
expected=dict(
result=dict(failed=[]),
value=[permission1],
summary=u'Deleted permission "%s"' % permission1,
)
),
verify_permission_aci_missing(permission1, api.env.basedn),
dict(
desc='Create %r with empty filters [#4206]' % permission1,
command=(
'permission_add', [permission1], dict(
type=u'user',
ipapermright=u'write',
ipapermtargetfilter=u'',
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
type=[u'user'],
ipapermright=[u'write'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[users_dn],
),
),
),
verify_permission_aci(
permission1, users_dn,
'(targetfilter = "(objectclass=posixaccount)")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (write) groupdn = "ldap:///%s";)' % permission1_dn,
),
]
class test_permission_in_accounts(Declarative):
"""Test managing a permission in cn=accounts"""
tests = [
dict(
desc='Create %r in cn=accounts' % permission1,
command=(
'permission_add', [permission1], dict(
ipapermlocation=DN('cn=accounts', api.env.basedn),
ipapermright=u'add',
attrs=[u'cn'],
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
attrs=[u'cn'],
ipapermright=[u'add'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[DN('cn=accounts', api.env.basedn)],
),
),
),
verify_permission_aci(
permission1, DN('cn=accounts', api.env.basedn),
'(targetattr = "cn")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (add) groupdn = "ldap:///%s";)' % permission1_dn,
),
dict(
desc='Delete %r' % permission1,
command=(
'permission_del', [permission1], {}
),
expected=dict(
result=dict(failed=[]),
value=[permission1],
summary=u'Deleted permission "%s"' % permission1,
)
),
verify_permission_aci_missing(permission1, api.env.basedn),
]
class test_autoadd_operational_attrs(Declarative):
"""Test that read access to operational attributes is automatically added
"""
cleanup_commands = [
('permission_del', [permission1], {'force': True}),
]
tests = [
dict(
desc='Create %r' % permission1,
command=(
'permission_add', [permission1], dict(
ipapermlocation=DN('cn=accounts', api.env.basedn),
ipapermright=u'read',
attrs=[u'ObjectClass'],
)
),
expected=dict(
value=permission1,
summary=u'Added permission "%s"' % permission1,
result=dict(
dn=permission1_dn,
cn=[permission1],
objectclass=objectclasses.permission,
attrs=[u'ObjectClass', u'entryusn', u'createtimestamp',
u'modifytimestamp'],
ipapermright=[u'read'],
ipapermbindruletype=[u'permission'],
ipapermissiontype=[u'SYSTEM', u'V2'],
ipapermlocation=[DN('cn=accounts', api.env.basedn)],
),
),
),
verify_permission_aci(
permission1, DN('cn=accounts', api.env.basedn),
'(targetattr = "ObjectClass || createtimestamp || entryusn || ' +
'modifytimestamp")' +
'(version 3.0;acl "permission:%s";' % permission1 +
'allow (read) groupdn = "ldap:///%s";)' % permission1_dn,
),
]
|
gpl-3.0
|
XiaodunServerGroup/medicalmooc
|
common/djangoapps/xmodule_modifiers.py
|
6
|
9936
|
"""
Functions that can are used to modify XBlock fragments for use in the LMS and Studio
"""
import datetime
import json
import logging
import static_replace
from django.conf import settings
from django.utils.timezone import UTC
from edxmako.shortcuts import render_to_string
from xblock.exceptions import InvalidScopeError
from xblock.fragment import Fragment
from xmodule.seq_module import SequenceModule
from xmodule.vertical_module import VerticalModule
from xmodule.x_module import shim_xmodule_js, XModuleDescriptor, XModule
from lms.lib.xblock.runtime import quote_slashes
log = logging.getLogger(__name__)
def wrap_fragment(fragment, new_content):
"""
Returns a new Fragment that has `new_content` and all
as its content, and all of the resources from fragment
"""
wrapper_frag = Fragment(content=new_content)
wrapper_frag.add_frag_resources(fragment)
return wrapper_frag
def wrap_xblock(runtime_class, block, view, frag, context, display_name_only=False, extra_data=None): # pylint: disable=unused-argument
"""
Wraps the results of rendering an XBlock view in a standard <section> with identifying
data so that the appropriate javascript module can be loaded onto it.
:param runtime_class: The name of the javascript runtime class to use to load this block
:param block: An XBlock (that may be an XModule or XModuleDescriptor)
:param view: The name of the view that rendered the fragment being wrapped
:param frag: The :class:`Fragment` to be wrapped
:param context: The context passed to the view being rendered
:param display_name_only: If true, don't render the fragment content at all.
Instead, just render the `display_name` of `block`
:param extra_data: A dictionary with extra data values to be set on the wrapper
"""
if extra_data is None:
extra_data = {}
# If any mixins have been applied, then use the unmixed class
class_name = getattr(block, 'unmixed_class', block.__class__).__name__
data = {}
data.update(extra_data)
css_classes = ['xblock', 'xblock-' + view]
if isinstance(block, (XModule, XModuleDescriptor)):
if view == 'student_view':
# The block is acting as an XModule
css_classes.append('xmodule_display')
elif view == 'mobi_student_view':
# The block is acting as an XModule
css_classes.append('xmodule_display')
elif view == 'studio_view':
# The block is acting as an XModuleDescriptor
css_classes.append('xmodule_edit')
css_classes.append('xmodule_' + class_name)
data['type'] = block.js_module_name
shim_xmodule_js(frag)
if frag.js_init_fn:
data['init'] = frag.js_init_fn
data['runtime-class'] = runtime_class
data['runtime-version'] = frag.js_init_version
data['block-type'] = block.scope_ids.block_type
data['usage-id'] = quote_slashes(unicode(block.scope_ids.usage_id).encode('utf-8'))
template_context = {
'content': block.display_name if display_name_only else frag.content,
'classes': css_classes,
'display_name': block.display_name_with_default,
'data_attributes': ' '.join(u'data-{}="{}"'.format(key, value) for key, value in data.items()),
}
return wrap_fragment(frag, render_to_string('xblock_wrapper.html', template_context))
def replace_jump_to_id_urls(course_id, jump_to_id_base_url, block, view, frag, context): # pylint: disable=unused-argument
"""
This will replace a link between courseware in the format
/jump_to/<id> with a URL for a page that will correctly redirect
This is similar to replace_course_urls, but much more flexible and
durable for Studio authored courses. See more comments in static_replace.replace_jump_to_urls
course_id: The course_id in which this rewrite happens
jump_to_id_base_url:
A app-tier (e.g. LMS) absolute path to the base of the handler that will perform the
redirect. e.g. /courses/<org>/<course>/<run>/jump_to_id. NOTE the <id> will be appended to
the end of this URL at re-write time
output: a new :class:`~xblock.fragment.Fragment` that modifies `frag` with
content that has been update with /jump_to links replaced
"""
return wrap_fragment(frag, static_replace.replace_jump_to_id_urls(frag.content, course_id, jump_to_id_base_url))
def replace_course_urls(course_id, block, view, frag, context): # pylint: disable=unused-argument
"""
Updates the supplied module with a new get_html function that wraps
the old get_html function and substitutes urls of the form /course/...
with urls that are /courses/<course_id>/...
"""
return wrap_fragment(frag, static_replace.replace_course_urls(frag.content, course_id))
def replace_static_urls(data_dir, block, view, frag, context, course_id=None, static_asset_path=''): # pylint: disable=unused-argument
"""
Updates the supplied module with a new get_html function that wraps
the old get_html function and substitutes urls of the form /static/...
with urls that are /static/<prefix>/...
"""
return wrap_fragment(frag, static_replace.replace_static_urls(
frag.content,
data_dir,
course_id,
static_asset_path=static_asset_path
))
def grade_histogram(module_id):
'''
Print out a histogram of grades on a given problem in staff member debug info.
Warning: If a student has just looked at an xmodule and not attempted
it, their grade is None. Since there will always be at least one such student
this function almost always returns [].
'''
from django.db import connection
cursor = connection.cursor()
q = """SELECT courseware_studentmodule.grade,
COUNT(courseware_studentmodule.student_id)
FROM courseware_studentmodule
WHERE courseware_studentmodule.module_id=%s
GROUP BY courseware_studentmodule.grade"""
# Passing module_id this way prevents sql-injection.
cursor.execute(q, [module_id])
grades = list(cursor.fetchall())
grades.sort(key=lambda x: x[0]) # Add ORDER BY to sql query?
if len(grades) >= 1 and grades[0][0] is None:
return []
return grades
def add_staff_debug_info(user, block, view, frag, context): # pylint: disable=unused-argument
"""
Updates the supplied module with a new get_html function that wraps
the output of the old get_html function with additional information
for admin users only, including a histogram of student answers and the
definition of the xmodule
Does nothing if module is a SequenceModule or a VerticalModule.
"""
# TODO: make this more general, eg use an XModule attribute instead
if isinstance(block, (SequenceModule, VerticalModule)):
return frag
block_id = block.id
if block.has_score and settings.FEATURES.get('DISPLAY_HISTOGRAMS_TO_STAFF'):
histogram = grade_histogram(block_id)
render_histogram = len(histogram) > 0
else:
histogram = None
render_histogram = False
if settings.FEATURES.get('ENABLE_LMS_MIGRATION') and hasattr(block.runtime, 'filestore'):
[filepath, filename] = getattr(block, 'xml_attributes', {}).get('filename', ['', None])
osfs = block.runtime.filestore
if filename is not None and osfs.exists(filename):
# if original, unmangled filename exists then use it (github
# doesn't like symlinks)
filepath = filename
data_dir = block.static_asset_path or osfs.root_path.rsplit('/')[-1]
giturl = block.giturl or 'https://github.com/MITx'
edit_link = "%s/%s/tree/master/%s" % (giturl, data_dir, filepath)
else:
edit_link = False
# Need to define all the variables that are about to be used
giturl = ""
data_dir = ""
source_file = block.source_file # source used to generate the problem XML, eg latex or word
# useful to indicate to staff if problem has been released or not
# TODO (ichuang): use _has_access_descriptor.can_load in lms.courseware.access, instead of now>mstart comparison here
now = datetime.datetime.now(UTC())
is_released = "unknown"
mstart = block.start
if mstart is not None:
is_released = "<font color='red'>Yes!</font>" if (now > mstart) else "<font color='green'>Not yet</font>"
field_contents = []
for name, field in block.fields.items():
try:
field_contents.append((name, field.read_from(block)))
except InvalidScopeError:
log.warning("Unable to read field in Staff Debug information", exc_info=True)
field_contents.append((name, "WARNING: Unable to read field"))
staff_context = {'fields': field_contents,
'xml_attributes': getattr(block, 'xml_attributes', {}),
'location': block.location,
'xqa_key': block.xqa_key,
'source_file': source_file,
'source_url': '%s/%s/tree/master/%s' % (giturl, data_dir, source_file),
'category': str(block.__class__.__name__),
# Template uses element_id in js function names, so can't allow dashes
'element_id': block.location.html_id().replace('-', '_'),
'edit_link': edit_link,
'user': user,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:server@content-qa.mitx.mit.edu/xqa'),
'histogram': json.dumps(histogram),
'render_histogram': render_histogram,
'block_content': frag.content,
'is_released': is_released,
}
return wrap_fragment(frag, render_to_string("staff_problem_info.html", staff_context))
|
agpl-3.0
|
hynnet/openwrt-mt7620
|
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/root-ralink/usr/lib/python2.7/distutils/command/bdist_dumb.py
|
151
|
5195
|
"""distutils.command.bdist_dumb
Implements the Distutils 'bdist_dumb' command (create a "dumb" built
distribution -- i.e., just an archive to be unpacked under $prefix or
$exec_prefix)."""
__revision__ = "$Id$"
import os
from sysconfig import get_python_version
from distutils.util import get_platform
from distutils.core import Command
from distutils.dir_util import remove_tree, ensure_relative
from distutils.errors import DistutilsPlatformError
from distutils import log
class bdist_dumb (Command):
description = 'create a "dumb" built distribution'
user_options = [('bdist-dir=', 'd',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('format=', 'f',
"archive format to create (tar, ztar, gztar, zip)"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
]
boolean_options = ['keep-temp', 'skip-build', 'relative']
default_format = { 'posix': 'gztar',
'nt': 'zip',
'os2': 'zip' }
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.format = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = None
self.relative = 0
self.owner = None
self.group = None
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError, \
("don't know how to create dumb built distributions " +
"on platform %s") % os.name
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'),
('skip_build', 'skip_build'))
def run(self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
log.info("installing to %s" % self.bdist_dir)
self.run_command('install')
# And make an archive relative to the root of the
# pseudo-installation tree.
archive_basename = "%s.%s" % (self.distribution.get_fullname(),
self.plat_name)
# OS/2 objects to any ":" characters in a filename (such as when
# a timestamp is used in a version) so change them to hyphens.
if os.name == "os2":
archive_basename = archive_basename.replace(":", "-")
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
if (self.distribution.has_ext_modules() and
(install.install_base != install.install_platbase)):
raise DistutilsPlatformError, \
("can't make a dumb built distribution where "
"base and platbase are different (%s, %s)"
% (repr(install.install_base),
repr(install.install_platbase)))
else:
archive_root = os.path.join(self.bdist_dir,
ensure_relative(install.install_base))
# Make the archive
filename = self.make_archive(pseudoinstall_root,
self.format, root_dir=archive_root,
owner=self.owner, group=self.group)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_dumb', pyversion,
filename))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
|
gpl-2.0
|
openEduConnect/eduextractor
|
docs/conf.py
|
1
|
9538
|
# -*- coding: utf-8 -*-
#
# eduextractor documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 10 17:16:14 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'eduextractor'
copyright = u'2015, Hunter Owens'
author = u'Hunter Owens'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'eduextractordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'eduextractor.tex', u'eduextractor Documentation',
u'Hunter Owens', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'eduextractor', u'eduextractor Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'eduextractor', u'eduextractor Documentation',
author, 'eduextractor', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
mit
|
linebp/pandas
|
pandas/io/packers.py
|
4
|
27509
|
"""
Msgpack serializer support for reading and writing pandas data structures
to disk
portions of msgpack_numpy package, by Lev Givon were incorporated
into this module (and tests_packers.py)
License
=======
Copyright (c) 2013, Lev Givon.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Lev Givon nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from datetime import datetime, date, timedelta
from dateutil.parser import parse
import os
from textwrap import dedent
import warnings
import numpy as np
from pandas import compat
from pandas.compat import u, u_safe
from pandas.core.dtypes.common import (
is_categorical_dtype, is_object_dtype,
needs_i8_conversion, pandas_dtype)
from pandas import (Timestamp, Period, Series, DataFrame, # noqa
Index, MultiIndex, Float64Index, Int64Index,
Panel, RangeIndex, PeriodIndex, DatetimeIndex, NaT,
Categorical, CategoricalIndex)
from pandas._libs.tslib import NaTType
from pandas.core.sparse.api import SparseSeries, SparseDataFrame
from pandas.core.sparse.array import BlockIndex, IntIndex
from pandas.core.generic import NDFrame
from pandas.errors import PerformanceWarning
from pandas.io.common import get_filepath_or_buffer, _stringify_path
from pandas.core.internals import BlockManager, make_block, _safe_reshape
import pandas.core.internals as internals
from pandas.io.msgpack import Unpacker as _Unpacker, Packer as _Packer, ExtType
from pandas.util._move import (
BadMove as _BadMove,
move_into_mutable_buffer as _move_into_mutable_buffer,
)
# check whcih compression libs we have installed
try:
import zlib
def _check_zlib():
pass
except ImportError:
def _check_zlib():
raise ImportError('zlib is not installed')
_check_zlib.__doc__ = dedent(
"""\
Check if zlib is installed.
Raises
------
ImportError
Raised when zlib is not installed.
""",
)
try:
import blosc
def _check_blosc():
pass
except ImportError:
def _check_blosc():
raise ImportError('blosc is not installed')
_check_blosc.__doc__ = dedent(
"""\
Check if blosc is installed.
Raises
------
ImportError
Raised when blosc is not installed.
""",
)
# until we can pass this into our conversion functions,
# this is pretty hacky
compressor = None
def to_msgpack(path_or_buf, *args, **kwargs):
"""
msgpack (serialize) object to input file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, buffer-like, or None
if None, return generated string
args : an object or objects to serialize
encoding: encoding for unicode objects
append : boolean whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
global compressor
compressor = kwargs.pop('compress', None)
if compressor:
compressor = u(compressor)
append = kwargs.pop('append', None)
if append:
mode = 'a+b'
else:
mode = 'wb'
def writer(fh):
for a in args:
fh.write(pack(a, **kwargs))
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, mode) as fh:
writer(fh)
elif path_or_buf is None:
buf = compat.BytesIO()
writer(buf)
return buf.getvalue()
else:
writer(path_or_buf)
def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs):
"""
Load msgpack pandas object from the specified
file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, BytesIO like or string
encoding: Encoding for decoding msgpack str type
iterator : boolean, if True, return an iterator to the unpacker
(default is False)
Returns
-------
obj : type of object stored in file
"""
path_or_buf, _, _ = get_filepath_or_buffer(path_or_buf)
if iterator:
return Iterator(path_or_buf)
def read(fh):
l = list(unpack(fh, encoding=encoding, **kwargs))
if len(l) == 1:
return l[0]
return l
# see if we have an actual file
if isinstance(path_or_buf, compat.string_types):
try:
exists = os.path.exists(path_or_buf)
except (TypeError, ValueError):
exists = False
if exists:
with open(path_or_buf, 'rb') as fh:
return read(fh)
# treat as a binary-like
if isinstance(path_or_buf, compat.binary_type):
fh = None
try:
fh = compat.BytesIO(path_or_buf)
return read(fh)
finally:
if fh is not None:
fh.close()
# a buffer like
if hasattr(path_or_buf, 'read') and compat.callable(path_or_buf.read):
return read(path_or_buf)
raise ValueError('path_or_buf needs to be a string file path or file-like')
dtype_dict = {21: np.dtype('M8[ns]'),
u('datetime64[ns]'): np.dtype('M8[ns]'),
u('datetime64[us]'): np.dtype('M8[us]'),
22: np.dtype('m8[ns]'),
u('timedelta64[ns]'): np.dtype('m8[ns]'),
u('timedelta64[us]'): np.dtype('m8[us]'),
# this is platform int, which we need to remap to np.int64
# for compat on windows platforms
7: np.dtype('int64'),
'category': 'category'
}
def dtype_for(t):
""" return my dtype mapping, whether number or name """
if t in dtype_dict:
return dtype_dict[t]
return np.typeDict.get(t, t)
c2f_dict = {'complex': np.float64,
'complex128': np.float64,
'complex64': np.float32}
# numpy 1.6.1 compat
if hasattr(np, 'float128'):
c2f_dict['complex256'] = np.float128
def c2f(r, i, ctype_name):
"""
Convert strings to complex number instance with specified numpy type.
"""
ftype = c2f_dict[ctype_name]
return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
def convert(values):
""" convert the numpy values to a list """
dtype = values.dtype
if is_categorical_dtype(values):
return values
elif is_object_dtype(dtype):
return values.ravel().tolist()
if needs_i8_conversion(dtype):
values = values.view('i8')
v = values.ravel()
if compressor == 'zlib':
_check_zlib()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, zlib.compress(v))
elif compressor == 'blosc':
_check_blosc()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, blosc.compress(v, typesize=dtype.itemsize))
# ndarray (on original dtype)
return ExtType(0, v.tostring())
def unconvert(values, dtype, compress=None):
as_is_ext = isinstance(values, ExtType) and values.code == 0
if as_is_ext:
values = values.data
if is_categorical_dtype(dtype):
return values
elif is_object_dtype(dtype):
return np.array(values, dtype=object)
dtype = pandas_dtype(dtype).base
if not as_is_ext:
values = values.encode('latin1')
if compress:
if compress == u'zlib':
_check_zlib()
decompress = zlib.decompress
elif compress == u'blosc':
_check_blosc()
decompress = blosc.decompress
else:
raise ValueError("compress must be one of 'zlib' or 'blosc'")
try:
return np.frombuffer(
_move_into_mutable_buffer(decompress(values)),
dtype=dtype,
)
except _BadMove as e:
# Pull the decompressed data off of the `_BadMove` exception.
# We don't just store this in the locals because we want to
# minimize the risk of giving users access to a `bytes` object
# whose data is also given to a mutable buffer.
values = e.args[0]
if len(values) > 1:
# The empty string and single characters are memoized in many
# string creating functions in the capi. This case should not
# warn even though we need to make a copy because we are only
# copying at most 1 byte.
warnings.warn(
'copying data after decompressing; this may mean that'
' decompress is caching its result',
PerformanceWarning,
)
# fall through to copying `np.fromstring`
# Copy the string into a numpy array.
return np.fromstring(values, dtype=dtype)
def encode(obj):
"""
Data encoder
"""
tobj = type(obj)
if isinstance(obj, Index):
if isinstance(obj, RangeIndex):
return {u'typ': u'range_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'start': getattr(obj, '_start', None),
u'stop': getattr(obj, '_stop', None),
u'step': getattr(obj, '_step', None)}
elif isinstance(obj, PeriodIndex):
return {u'typ': u'period_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'freq': u_safe(getattr(obj, 'freqstr', None)),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.asi8),
u'compress': compressor}
elif isinstance(obj, DatetimeIndex):
tz = getattr(obj, 'tz', None)
# store tz info and data as UTC
if tz is not None:
tz = u(tz.zone)
obj = obj.tz_convert('UTC')
return {u'typ': u'datetime_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.asi8),
u'freq': u_safe(getattr(obj, 'freqstr', None)),
u'tz': tz,
u'compress': compressor}
elif isinstance(obj, MultiIndex):
return {u'typ': u'multi_index',
u'klass': u(obj.__class__.__name__),
u'names': getattr(obj, 'names', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
else:
return {u'typ': u'index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
elif isinstance(obj, Categorical):
return {u'typ': u'category',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'codes': obj.codes,
u'categories': obj.categories,
u'ordered': obj.ordered,
u'compress': compressor}
elif isinstance(obj, Series):
if isinstance(obj, SparseSeries):
raise NotImplementedError(
'msgpack sparse series is not implemented'
)
# d = {'typ': 'sparse_series',
# 'klass': obj.__class__.__name__,
# 'dtype': obj.dtype.name,
# 'index': obj.index,
# 'sp_index': obj.sp_index,
# 'sp_values': convert(obj.sp_values),
# 'compress': compressor}
# for f in ['name', 'fill_value', 'kind']:
# d[f] = getattr(obj, f, None)
# return d
else:
return {u'typ': u'series',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'index': obj.index,
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
elif issubclass(tobj, NDFrame):
if isinstance(obj, SparseDataFrame):
raise NotImplementedError(
'msgpack sparse frame is not implemented'
)
# d = {'typ': 'sparse_dataframe',
# 'klass': obj.__class__.__name__,
# 'columns': obj.columns}
# for f in ['default_fill_value', 'default_kind']:
# d[f] = getattr(obj, f, None)
# d['data'] = dict([(name, ss)
# for name, ss in compat.iteritems(obj)])
# return d
else:
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
# the block manager
return {u'typ': u'block_manager',
u'klass': u(obj.__class__.__name__),
u'axes': data.axes,
u'blocks': [{u'locs': b.mgr_locs.as_array,
u'values': convert(b.values),
u'shape': b.values.shape,
u'dtype': u(b.dtype.name),
u'klass': u(b.__class__.__name__),
u'compress': compressor} for b in data.blocks]
}
elif isinstance(obj, (datetime, date, np.datetime64, timedelta,
np.timedelta64, NaTType)):
if isinstance(obj, Timestamp):
tz = obj.tzinfo
if tz is not None:
tz = u(tz.zone)
freq = obj.freq
if freq is not None:
freq = u(freq.freqstr)
return {u'typ': u'timestamp',
u'value': obj.value,
u'freq': freq,
u'tz': tz}
if isinstance(obj, NaTType):
return {u'typ': u'nat'}
elif isinstance(obj, np.timedelta64):
return {u'typ': u'timedelta64',
u'data': obj.view('i8')}
elif isinstance(obj, timedelta):
return {u'typ': u'timedelta',
u'data': (obj.days, obj.seconds, obj.microseconds)}
elif isinstance(obj, np.datetime64):
return {u'typ': u'datetime64',
u'data': u(str(obj))}
elif isinstance(obj, datetime):
return {u'typ': u'datetime',
u'data': u(obj.isoformat())}
elif isinstance(obj, date):
return {u'typ': u'date',
u'data': u(obj.isoformat())}
raise Exception("cannot encode this datetimelike object: %s" % obj)
elif isinstance(obj, Period):
return {u'typ': u'period',
u'ordinal': obj.ordinal,
u'freq': u(obj.freq)}
elif isinstance(obj, BlockIndex):
return {u'typ': u'block_index',
u'klass': u(obj.__class__.__name__),
u'blocs': obj.blocs,
u'blengths': obj.blengths,
u'length': obj.length}
elif isinstance(obj, IntIndex):
return {u'typ': u'int_index',
u'klass': u(obj.__class__.__name__),
u'indices': obj.indices,
u'length': obj.length}
elif isinstance(obj, np.ndarray):
return {u'typ': u'ndarray',
u'shape': obj.shape,
u'ndim': obj.ndim,
u'dtype': u(obj.dtype.name),
u'data': convert(obj),
u'compress': compressor}
elif isinstance(obj, np.number):
if np.iscomplexobj(obj):
return {u'typ': u'np_scalar',
u'sub_typ': u'np_complex',
u'dtype': u(obj.dtype.name),
u'real': u(obj.real.__repr__()),
u'imag': u(obj.imag.__repr__())}
else:
return {u'typ': u'np_scalar',
u'dtype': u(obj.dtype.name),
u'data': u(obj.__repr__())}
elif isinstance(obj, complex):
return {u'typ': u'np_complex',
u'real': u(obj.real.__repr__()),
u'imag': u(obj.imag.__repr__())}
return obj
def decode(obj):
"""
Decoder for deserializing numpy data types.
"""
typ = obj.get(u'typ')
if typ is None:
return obj
elif typ == u'timestamp':
freq = obj[u'freq'] if 'freq' in obj else obj[u'offset']
return Timestamp(obj[u'value'], tz=obj[u'tz'], freq=freq)
elif typ == u'nat':
return NaT
elif typ == u'period':
return Period(ordinal=obj[u'ordinal'], freq=obj[u'freq'])
elif typ == u'index':
dtype = dtype_for(obj[u'dtype'])
data = unconvert(obj[u'data'], dtype,
obj.get(u'compress'))
return globals()[obj[u'klass']](data, dtype=dtype, name=obj[u'name'])
elif typ == u'range_index':
return globals()[obj[u'klass']](obj[u'start'],
obj[u'stop'],
obj[u'step'],
name=obj[u'name'])
elif typ == u'multi_index':
dtype = dtype_for(obj[u'dtype'])
data = unconvert(obj[u'data'], dtype,
obj.get(u'compress'))
data = [tuple(x) for x in data]
return globals()[obj[u'klass']].from_tuples(data, names=obj[u'names'])
elif typ == u'period_index':
data = unconvert(obj[u'data'], np.int64, obj.get(u'compress'))
d = dict(name=obj[u'name'], freq=obj[u'freq'])
return globals()[obj[u'klass']]._from_ordinals(data, **d)
elif typ == u'datetime_index':
data = unconvert(obj[u'data'], np.int64, obj.get(u'compress'))
d = dict(name=obj[u'name'], freq=obj[u'freq'], verify_integrity=False)
result = globals()[obj[u'klass']](data, **d)
tz = obj[u'tz']
# reverse tz conversion
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
elif typ == u'category':
from_codes = globals()[obj[u'klass']].from_codes
return from_codes(codes=obj[u'codes'],
categories=obj[u'categories'],
ordered=obj[u'ordered'])
elif typ == u'series':
dtype = dtype_for(obj[u'dtype'])
pd_dtype = pandas_dtype(dtype)
index = obj[u'index']
result = globals()[obj[u'klass']](unconvert(obj[u'data'], dtype,
obj[u'compress']),
index=index,
dtype=pd_dtype,
name=obj[u'name'])
return result
elif typ == u'block_manager':
axes = obj[u'axes']
def create_block(b):
values = _safe_reshape(unconvert(
b[u'values'], dtype_for(b[u'dtype']),
b[u'compress']), b[u'shape'])
# locs handles duplicate column names, and should be used instead
# of items; see GH 9618
if u'locs' in b:
placement = b[u'locs']
else:
placement = axes[0].get_indexer(b[u'items'])
return make_block(values=values,
klass=getattr(internals, b[u'klass']),
placement=placement,
dtype=b[u'dtype'])
blocks = [create_block(b) for b in obj[u'blocks']]
return globals()[obj[u'klass']](BlockManager(blocks, axes))
elif typ == u'datetime':
return parse(obj[u'data'])
elif typ == u'datetime64':
return np.datetime64(parse(obj[u'data']))
elif typ == u'date':
return parse(obj[u'data']).date()
elif typ == u'timedelta':
return timedelta(*obj[u'data'])
elif typ == u'timedelta64':
return np.timedelta64(int(obj[u'data']))
# elif typ == 'sparse_series':
# dtype = dtype_for(obj['dtype'])
# return globals()[obj['klass']](
# unconvert(obj['sp_values'], dtype, obj['compress']),
# sparse_index=obj['sp_index'], index=obj['index'],
# fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
# elif typ == 'sparse_dataframe':
# return globals()[obj['klass']](
# obj['data'], columns=obj['columns'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind']
# )
# elif typ == 'sparse_panel':
# return globals()[obj['klass']](
# obj['data'], items=obj['items'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind'])
elif typ == u'block_index':
return globals()[obj[u'klass']](obj[u'length'], obj[u'blocs'],
obj[u'blengths'])
elif typ == u'int_index':
return globals()[obj[u'klass']](obj[u'length'], obj[u'indices'])
elif typ == u'ndarray':
return unconvert(obj[u'data'], np.typeDict[obj[u'dtype']],
obj.get(u'compress')).reshape(obj[u'shape'])
elif typ == u'np_scalar':
if obj.get(u'sub_typ') == u'np_complex':
return c2f(obj[u'real'], obj[u'imag'], obj[u'dtype'])
else:
dtype = dtype_for(obj[u'dtype'])
try:
return dtype(obj[u'data'])
except:
return dtype.type(obj[u'data'])
elif typ == u'np_complex':
return complex(obj[u'real'] + u'+' + obj[u'imag'] + u'j')
elif isinstance(obj, (dict, list, set)):
return obj
else:
return obj
def pack(o, default=encode,
encoding='utf-8', unicode_errors='strict', use_single_float=False,
autoreset=1, use_bin_type=1):
"""
Pack an object and return the packed bytes.
"""
return Packer(default=default, encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type).pack(o)
def unpack(packed, object_hook=decode,
list_hook=None, use_list=False, encoding='utf-8',
unicode_errors='strict', object_pairs_hook=None,
max_buffer_size=0, ext_hook=ExtType):
"""
Unpack a packed object, return an iterator
Note: packed lists will be returned as tuples
"""
return Unpacker(packed, object_hook=object_hook,
list_hook=list_hook,
use_list=use_list, encoding=encoding,
unicode_errors=unicode_errors,
object_pairs_hook=object_pairs_hook,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Packer(_Packer):
def __init__(self, default=encode,
encoding='utf-8',
unicode_errors='strict',
use_single_float=False,
autoreset=1,
use_bin_type=1):
super(Packer, self).__init__(default=default,
encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type)
class Unpacker(_Unpacker):
def __init__(self, file_like=None, read_size=0, use_list=False,
object_hook=decode,
object_pairs_hook=None, list_hook=None, encoding='utf-8',
unicode_errors='strict', max_buffer_size=0, ext_hook=ExtType):
super(Unpacker, self).__init__(file_like=file_like,
read_size=read_size,
use_list=use_list,
object_hook=object_hook,
object_pairs_hook=object_pairs_hook,
list_hook=list_hook,
encoding=encoding,
unicode_errors=unicode_errors,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Iterator(object):
""" manage the unpacking iteration,
close the file on completion """
def __init__(self, path, **kwargs):
self.path = path
self.kwargs = kwargs
def __iter__(self):
needs_closing = True
try:
# see if we have an actual file
if isinstance(self.path, compat.string_types):
try:
path_exists = os.path.exists(self.path)
except TypeError:
path_exists = False
if path_exists:
fh = open(self.path, 'rb')
else:
fh = compat.BytesIO(self.path)
else:
if not hasattr(self.path, 'read'):
fh = compat.BytesIO(self.path)
else:
# a file-like
needs_closing = False
fh = self.path
unpacker = unpack(fh)
for o in unpacker:
yield o
finally:
if needs_closing:
fh.close()
|
bsd-3-clause
|
grave-w-grave/zulip
|
analytics/management/commands/update_analytics_counts.py
|
18
|
1988
|
from __future__ import absolute_import
from __future__ import print_function
from argparse import ArgumentParser
from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from django.utils.dateparse import parse_datetime
from analytics.models import RealmCount, UserCount
from analytics.lib.counts import COUNT_STATS, CountStat, process_count_stat
from zerver.lib.timestamp import datetime_to_string, is_timezone_aware
from zerver.models import UserProfile, Message
from typing import Any
class Command(BaseCommand):
help = """Fills Analytics tables.
Run as a cron job that runs every hour."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('--time', '-t',
type=str,
help='Update stat tables from current state to --time. Defaults to the current time.',
default=datetime_to_string(timezone.now()))
parser.add_argument('--utc',
type=bool,
help="Interpret --time in UTC.",
default=False)
parser.add_argument('--stat', '-s',
type=str,
help="CountStat to process. If omitted, all stats are processed.")
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
fill_to_time = parse_datetime(options['time'])
if options['utc']:
fill_to_time = fill_to_time.replace(tzinfo=timezone.utc)
if not (is_timezone_aware(fill_to_time)):
raise ValueError("--time must be timezone aware. Maybe you meant to use the --utc option?")
if options['stat'] is not None:
process_count_stat(COUNT_STATS[options['stat']], fill_to_time)
else:
for stat in COUNT_STATS.values():
process_count_stat(stat, fill_to_time)
|
apache-2.0
|
SVoxel/R7800
|
git_home/samba.git/third_party/waf/wafadmin/Constants.py
|
32
|
1307
|
#!/usr/bin/env python
# encoding: utf-8
# Yinon dot me gmail 2008
"""
these constants are somewhat public, try not to mess them
maintainer: the version number is updated from the top-level wscript file
"""
# do not touch these three lines, they are updated automatically
HEXVERSION=0x105019
WAFVERSION="1.5.19"
WAFREVISION = "9709M"
ABI = 7
# permissions
O644 = 420
O755 = 493
MAXJOBS = 99999999
CACHE_DIR = 'c4che'
CACHE_SUFFIX = '.cache.py'
DBFILE = '.wafpickle-%d' % ABI
WSCRIPT_FILE = 'wscript'
WSCRIPT_BUILD_FILE = 'wscript_build'
WAF_CONFIG_LOG = 'config.log'
WAF_CONFIG_H = 'config.h'
SIG_NIL = 'iluvcuteoverload'
VARIANT = '_VARIANT_'
DEFAULT = 'default'
SRCDIR = 'srcdir'
BLDDIR = 'blddir'
APPNAME = 'APPNAME'
VERSION = 'VERSION'
DEFINES = 'defines'
UNDEFINED = ()
BREAK = "break"
CONTINUE = "continue"
# task scheduler options
JOBCONTROL = "JOBCONTROL"
MAXPARALLEL = "MAXPARALLEL"
NORMAL = "NORMAL"
# task state
NOT_RUN = 0
MISSING = 1
CRASHED = 2
EXCEPTION = 3
SKIPPED = 8
SUCCESS = 9
ASK_LATER = -1
SKIP_ME = -2
RUN_ME = -3
LOG_FORMAT = "%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s"
HOUR_FORMAT = "%H:%M:%S"
TEST_OK = True
CFG_FILES = 'cfg_files'
# positive '->' install
# negative '<-' uninstall
INSTALL = 1337
UNINSTALL = -1337
|
gpl-2.0
|
woodrow-shen/ycmd
|
ycmd/completers/go/gocode_completer.py
|
14
|
6808
|
#!/usr/bin/env python
#
# Copyright (C) 2015 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
import os
import subprocess
from ycmd import responses
from ycmd import utils
from ycmd.completers.completer import Completer
GO_FILETYPES = set( [ 'go' ] )
BINARY_NOT_FOUND_MESSAGE = ( 'Gocode binary not found. Did you build it? ' +
'You can do so by running ' +
'"./install.sh --gocode-completer".' )
COMPLETION_ERROR_MESSAGE = 'Gocode shell call failed.'
PARSE_ERROR_MESSAGE = 'Gocode returned invalid JSON response.'
NO_COMPLETIONS_MESSAGE = 'Gocode returned empty JSON response.'
GOCODE_PANIC_MESSAGE = ( 'Gocode panicked trying to find completions, ' +
'you likely have a syntax error.' )
PATH_TO_GOCODE_BINARY = os.path.join(
os.path.abspath( os.path.dirname( __file__ ) ),
'..', '..', '..', 'third_party', 'gocode',
'gocode' + ( '.exe' if utils.OnWindows() else '' ) )
_logger = logging.getLogger( __name__ )
class GoCodeCompleter( Completer ):
subcommands = {
'StartServer': ( lambda self, request_data: self._StartServer() ),
'StopServer': ( lambda self, request_data: self._StopServer() ),
}
def __init__( self, user_options ):
super( GoCodeCompleter, self ).__init__( user_options )
self._popener = utils.SafePopen # Overridden in test.
self._binary = self.FindGoCodeBinary( user_options )
if not self._binary:
_logger.error( BINARY_NOT_FOUND_MESSAGE )
raise RuntimeError( BINARY_NOT_FOUND_MESSAGE )
_logger.info( 'Enabling go completion using %s binary', self._binary )
def SupportedFiletypes( self ):
return GO_FILETYPES
def ComputeCandidatesInner( self, request_data ):
filename = request_data[ 'filepath' ]
_logger.info( "gocode completion request %s" % filename )
if not filename:
return
contents = utils.ToUtf8IfNeeded(
request_data[ 'file_data' ][ filename ][ 'contents' ] )
offset = _ComputeOffset( contents, request_data[ 'line_num' ],
request_data[ 'column_num' ] )
stdoutdata = self._ExecuteGoCodeBinary( '-f=json', 'autocomplete',
filename, str(offset),
contents = contents )
try:
resultdata = json.loads( stdoutdata )
except ValueError:
_logger.error( PARSE_ERROR_MESSAGE )
raise RuntimeError( PARSE_ERROR_MESSAGE )
if len(resultdata) != 2:
_logger.error( NO_COMPLETIONS_MESSAGE )
raise RuntimeError( NO_COMPLETIONS_MESSAGE )
for result in resultdata[1]:
if result.get( 'class' ) == "PANIC":
raise RuntimeError( GOCODE_PANIC_MESSAGE )
return [ _ConvertCompletionData( x ) for x in resultdata[1] ]
def FindGoCodeBinary( self, user_options ):
""" Find the path to the gocode binary.
If 'gocode_binary_path' in the options is blank,
use the version installed with YCM, if it exists,
then the one on the path, if not.
If the 'gocode_binary_path' is specified, use it
as an absolute path.
If the resolved binary exists, return the path,
otherwise return None. """
if user_options.get( 'gocode_binary_path' ):
# The user has explicitly specified a path.
if os.path.isfile( user_options[ 'gocode_binary_path' ] ):
return user_options[ 'gocode_binary_path' ]
else:
return None
# Try to use the bundled binary or one on the path.
if os.path.isfile( PATH_TO_GOCODE_BINARY ):
return PATH_TO_GOCODE_BINARY
return utils.PathToFirstExistingExecutable( [ 'gocode' ] )
def DefinedSubcommands( self ):
return GoCodeCompleter.subcommands.keys()
def OnFileReadyToParse( self, request_data ):
self._StartServer()
def OnUserCommand( self, arguments, request_data ):
if not arguments:
raise ValueError( self.UserCommandsHelpMessage() )
command = arguments[ 0 ]
if command in GoCodeCompleter.subcommands:
command_lamba = GoCodeCompleter.subcommands[ command ]
return command_lamba( self, request_data )
else:
raise ValueError( self.UserCommandsHelpMessage() )
def Shutdown( self ):
self._StopServer()
def _StartServer( self ):
""" Start the GoCode server """
self._ExecuteGoCodeBinary()
def _StopServer( self ):
""" Stop the GoCode server """
_logger.info( 'Stopping GoCode server' )
self._ExecuteGoCodeBinary( 'close' )
def _ExecuteGoCodeBinary( self, *args, **kwargs ):
""" Execute the GoCode binary with given arguments. Use the contents
argument to send data to GoCode. Return the standard output. """
proc = self._popener(
[ self._binary ] + list(args), stdin = subprocess.PIPE,
stdout = subprocess.PIPE, stderr = subprocess.PIPE )
contents = kwargs[ 'contents' ] if 'contents' in kwargs else None
stdoutdata, stderrdata = proc.communicate( contents )
if proc.returncode:
_logger.error( COMPLETION_ERROR_MESSAGE + " code %i stderr: %s",
proc.returncode, stderrdata)
raise RuntimeError( COMPLETION_ERROR_MESSAGE )
return stdoutdata
# Compute the byte offset in the file given the line and column.
# TODO(ekfriis): If this is slow, consider moving this to C++ ycm_core,
# perhaps in RequestWrap.
def _ComputeOffset( contents, line, col ):
curline = 1
curcol = 1
for i, byte in enumerate( contents ):
if curline == line and curcol == col:
return i
curcol += 1
if byte == '\n':
curline += 1
curcol = 1
_logger.error( 'GoCode completer - could not compute byte offset ' +
'corresponding to L%i C%i', line, col )
return -1
def _ConvertCompletionData( completion_data ):
return responses.BuildCompletionData(
insertion_text = completion_data[ 'name' ],
menu_text = completion_data[ 'name' ],
extra_menu_info = completion_data[ 'type' ],
kind = completion_data[ 'class' ],
detailed_info = ' '.join( [
completion_data[ 'name' ],
completion_data[ 'type' ],
completion_data[ 'class' ] ] ) )
|
gpl-3.0
|
masmullin2000/kernel_tp_ts_bkl
|
tools/perf/scripts/python/check-perf-trace.py
|
1997
|
2539
|
# perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
gpl-2.0
|
atruberg/django-custom
|
django/contrib/gis/geos/point.py
|
224
|
4351
|
from ctypes import c_uint
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos import prototypes as capi
from django.utils import six
from django.utils.six.moves import xrange
class Point(GEOSGeometry):
_minlength = 2
_maxlength = 3
def __init__(self, x, y=None, z=None, srid=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
>>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, six.integer_types + (float,)) and isinstance(y, six.integer_types + (float,)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
if isinstance(z, six.integer_types + (float,)):
ndim = 3
coords = [x, y, z]
else:
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
point = self._create_point(ndim, coords)
# Initializing using the address returned from the GEOS
# createPoint factory.
super(Point, self).__init__(point, srid=srid)
def _create_point(self, ndim, coords):
"""
Create a coordinate sequence, set X, Y, [Z], and create point
"""
if ndim < 2 or ndim > 3:
raise TypeError('Invalid point dimension: %s' % str(ndim))
cs = capi.create_cs(c_uint(1), c_uint(ndim))
i = iter(coords)
capi.cs_setx(cs, 0, next(i))
capi.cs_sety(cs, 0, next(i))
if ndim == 3: capi.cs_setz(cs, 0, next(i))
return capi.create_point(cs)
def _set_list(self, length, items):
ptr = self._create_point(length, items)
if ptr:
capi.destroy_geom(self.ptr)
self._ptr = ptr
self._set_cs()
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._cs.setOrdinate(index, 0, value)
def __iter__(self):
"Allows iteration over coordinates of this Point."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of dimensions for this Point (either 0, 2 or 3)."
if self.empty: return 0
if self.hasz: return 3
else: return 2
def _get_single_external(self, index):
if index == 0:
return self.x
elif index == 1:
return self.y
elif index == 2:
return self.z
_get_single_internal = _get_single_external
def get_x(self):
"Returns the X component of the Point."
return self._cs.getOrdinate(0, 0)
def set_x(self, value):
"Sets the X component of the Point."
self._cs.setOrdinate(0, 0, value)
def get_y(self):
"Returns the Y component of the Point."
return self._cs.getOrdinate(1, 0)
def set_y(self, value):
"Sets the Y component of the Point."
self._cs.setOrdinate(1, 0, value)
def get_z(self):
"Returns the Z component of the Point."
if self.hasz:
return self._cs.getOrdinate(2, 0)
else:
return None
def set_z(self, value):
"Sets the Z component of the Point."
if self.hasz:
self._cs.setOrdinate(2, 0, value)
else:
raise GEOSException('Cannot set Z on 2D Point.')
# X, Y, Z properties
x = property(get_x, set_x)
y = property(get_y, set_y)
z = property(get_z, set_z)
### Tuple setting and retrieval routines. ###
def get_coords(self):
"Returns a tuple of the point."
return self._cs.tuple
def set_coords(self, tup):
"Sets the coordinates of the point with the given tuple."
self._cs[0] = tup
# The tuple and coords properties
tuple = property(get_coords, set_coords)
coords = tuple
|
bsd-3-clause
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/dask/array/tests/test_percentiles.py
|
4
|
2323
|
import pytest
pytest.importorskip('numpy')
import numpy as np
import dask.array as da
from dask.array.utils import assert_eq, same_keys
def test_percentile():
d = da.ones((16,), chunks=(4,))
assert_eq(da.percentile(d, [0, 50, 100]),
np.array([1, 1, 1], dtype=d.dtype))
x = np.array([0, 0, 5, 5, 5, 5, 20, 20])
d = da.from_array(x, chunks=(3,))
result = da.percentile(d, [0, 50, 100])
assert_eq(da.percentile(d, [0, 50, 100]),
np.array([0, 5, 20], dtype=result.dtype))
assert same_keys(da.percentile(d, [0, 50, 100]),
da.percentile(d, [0, 50, 100]))
assert not same_keys(da.percentile(d, [0, 50, 100]),
da.percentile(d, [0, 50]))
x = np.array(['a', 'a', 'd', 'd', 'd', 'e'])
d = da.from_array(x, chunks=(3,))
assert_eq(da.percentile(d, [0, 50, 100]),
np.array(['a', 'd', 'e'], dtype=x.dtype))
@pytest.mark.skip
def test_percentile_with_categoricals():
try:
import pandas as pd
except ImportError:
return
x0 = pd.Categorical(['Alice', 'Bob', 'Charlie', 'Dennis', 'Alice', 'Alice'])
x1 = pd.Categorical(['Alice', 'Bob', 'Charlie', 'Dennis', 'Alice', 'Alice'])
dsk = {('x', 0): x0, ('x', 1): x1}
x = da.Array(dsk, 'x', chunks=((6, 6),))
p = da.percentile(x, [50])
assert (p.compute().categories == x0.categories).all()
assert (p.compute().codes == [0]).all()
assert same_keys(da.percentile(x, [50]),
da.percentile(x, [50]))
def test_percentiles_with_empty_arrays():
x = da.ones(10, chunks=((5, 0, 5),))
assert_eq(da.percentile(x, [10, 50, 90]), np.array([1, 1, 1], dtype=x.dtype))
@pytest.mark.parametrize('q', [5, 5.0, np.int64(5), np.float64(5)])
def test_percentiles_with_scaler_percentile(q):
# Regression test to ensure da.percentile works with scalar percentiles
# See #3020
d = da.ones((16,), chunks=(4,))
assert_eq(da.percentile(d, q), np.array([1], dtype=d.dtype))
def test_unknown_chunk_sizes():
x = da.random.random(1000, chunks=(100,))
x._chunks = ((np.nan,) * 10,)
result = da.percentile(x, 50).compute()
assert 0.1 < result < 0.9
a, b = da.percentile(x, [40, 60]).compute()
assert 0.1 < a < 0.9
assert 0.1 < b < 0.9
assert a < b
|
gpl-3.0
|
fako/datascope
|
src/sources/models/google/text.py
|
1
|
2087
|
from datagrowth.exceptions import DGInvalidResource
from sources.models.google.query import GoogleQuery
class GoogleText(GoogleQuery):
URI_TEMPLATE = 'https://www.googleapis.com/customsearch/v1?q={}'
GET_SCHEMA = {
"args": {
"type": "array",
"items": [
{
"type": "string", # the query string
},
{
"type": "integer", # amount of desired images
},
],
"additionalItems": False,
"minItems": 1
},
"kwargs": None
}
def variables(self, *args):
args = args or self.request.get("args")
return {
"url": (args[0],),
"quantity": args[2] if len(args) > 2 else 0,
}
def auth_parameters(self):
params = super(GoogleText, self).auth_parameters()
params.update({
"cx": self.config.cx
})
return params
@property
def content(self):
content_type, data = super(GoogleText, self).content
try:
if data is not None:
data["queries"]["request"][0]["searchTerms"] = data["queries"]["request"][0]["searchTerms"][1:-1]
except (KeyError, IndexError):
raise DGInvalidResource("Google Image resource does not specify searchTerms", self)
return content_type, data
def next_parameters(self):
if self.request["quantity"] <= 0:
return {}
content_type, data = super(GoogleText, self).content
missing_quantity = self.request["quantity"] - 10
try:
nextData = data["queries"]["nextPage"][0]
except KeyError:
return {}
return {
"start": nextData["startIndex"],
"quantity": missing_quantity
}
def _create_request(self, method, *args, **kwargs):
request = super(GoogleText, self)._create_request(method, *args, **kwargs)
request["quantity"] = self.variables(*args)["quantity"]
return request
|
gpl-3.0
|
SymbiFlow/prjxray
|
fuzzers/005-tilegrid/pcie/top.py
|
1
|
1574
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import os
import random
random.seed(int(os.getenv("SEED"), 16))
from prjxray import util
from prjxray.db import Database
def gen_sites():
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
for site_name, site_type in gridinfo.sites.items():
if site_type in ['PCIE_2_1']:
yield tile_name, site_name
def write_params(params):
pinstr = 'tile,val,site\n'
for tile, (site, val) in sorted(params.items()):
pinstr += '%s,%s,%s\n' % (tile, val, site)
open('params.csv', 'w').write(pinstr)
def run():
print('''
module top(input wire in, output wire out);
''')
params = {}
sites = list(gen_sites())
for (tile_name, site_name), isone in zip(sites,
util.gen_fuzz_states(len(sites))):
params[tile_name] = (site_name, isone)
attr = "FALSE" if isone else "TRUE"
print(
'''
(* KEEP, DONT_TOUCH*)
PCIE_2_1 #(
.AER_CAP_PERMIT_ROOTERR_UPDATE("{}")
) pcie ();'''.format(attr))
print("endmodule")
write_params(params)
if __name__ == '__main__':
run()
|
isc
|
prescottprue/PiOpenLighting
|
python/ola/OlaClient.py
|
3
|
35025
|
# This program is free software; you can redistribute it and/or modify
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# OlaClient.py
# Copyright (C) 2005-2009 Simon Newton
"""The client used to communicate with the Ola Server."""
__author__ = 'nomis52@gmail.com (Simon Newton)'
import array
import logging
import socket
import struct
from ola.rpc.StreamRpcChannel import StreamRpcChannel
from ola.rpc.SimpleRpcController import SimpleRpcController
from ola import Ola_pb2
from ola.UID import UID
"""The port that the OLA server listens on."""
OLA_PORT = 9010
class Error(Exception):
"""The base error class."""
class OLADNotRunningException(Error):
"""Thrown if we try to connect and olad isn't running."""
class Plugin(object):
"""Represents a plugin.
Attributes:
id: the id of this plugin
name: the name of this plugin
"""
def __init__(self, plugin_id, name):
self._id = plugin_id
self._name = name
@property
def id(self):
return self._id
@property
def name(self):
return self._name
def __cmp__(self, other):
return cmp(self._id, other._id)
def __str__(self):
return 'Plugin %d (%s)' % (self._id, self,_name)
# Populate the Plugin class attributes from the protobuf
for value in Ola_pb2._PLUGINIDS.values:
setattr(Plugin, value.name, value.number)
class Device(object):
"""Represents a device.
Attributes:
id: the unique id of this device
alias: the integer alias for this device
name: the name of this device
plugin_id: the plugin that this device belongs to
input_ports: a list of Input Port objects
output_ports: a list of Output Port objects
"""
def __init__(self, device_id, alias, name, plugin_id, input_ports,
output_ports):
self._id = device_id
self._alias = alias
self._name = name
self._plugin_id = plugin_id
self._input_ports = sorted(input_ports)
self._output_ports = sorted(output_ports)
@property
def id(self):
return self._id
@property
def alias(self):
return self._alias
@property
def name(self):
return self._name
@property
def plugin_id(self):
return self._plugin_id
@property
def input_ports(self):
return self._input_ports
@property
def output_ports(self):
return self._output_ports
def __cmp__(self, other):
return cmp(self._alias, other._alias)
class Port(object):
"""Represents a port.
Attributes:
id: the unique id of this port
universe: the universe that this port belongs to
active: True if this port is active
description: the description of the port
supports_rdm: if the port supports RDM
"""
def __init__(self, port_id, universe, active, description, supports_rdm):
self._id = port_id
self._universe = universe
self._active = active
self._description = description
self._supports_rdm = supports_rdm
@property
def id(self):
return self._id
@property
def universe(self):
return self._universe
@property
def active(self):
return self._active
@property
def description(self):
return self._description
@property
def supports_rdm(self):
return self._supports_rdm
def __cmp__(self, other):
return cmp(self._id, other._id)
class Universe(object):
"""Represents a universe.
Attributes:
id: the integer universe id
name: the name of this universe
merge_mode: the merge mode this universe is using
"""
LTP = Ola_pb2.LTP
HTP = Ola_pb2.HTP
def __init__(self, universe_id, name, merge_mode):
self._id = universe_id
self._name = name
self._merge_mode = merge_mode
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def merge_mode(self):
return self._merge_mode
def __cmp__(self, other):
return cmp(self._id, other._id)
class RequestStatus(object):
"""Represents the status of an reqeust.
Attributes:
state: the state of the operation
message: an error message if it failed
"""
SUCCESS, FAILED, CANCELLED = range(3)
def __init__(self, controller):
if controller.Failed():
self._state = self.FAILED
self._message = controller.ErrorText()
elif controller.IsCanceled():
self._state = self.CANCELLED
self._message = controller.ErrorText()
else:
self._state = self.SUCCESS
self._message = None
def Succeeded(self):
"""Returns true if this request succeeded."""
return self._state == self.SUCCESS
@property
def state(self):
return self._state
@property
def message(self):
return self._message
class RDMNack(object):
NACK_SYMBOLS_TO_VALUES = {
'NR_UNKNOWN_PID': (0, 'Unknown PID'),
'NR_FORMAT_ERROR': (1, 'Format Error'),
'NR_HARDWARE_FAULT': (2, 'Hardware fault'),
'NR_PROXY_REJECT': (3, 'Proxy reject'),
'NR_WRITE_PROTECT': (4, 'Write protect'),
'NR_UNSUPPORTED_COMMAND_CLASS': (5, 'Unsupported command class'),
'NR_DATA_OUT_OF_RANGE': (6, 'Data out of range'),
'NR_BUFFER_FULL': (7, 'Buffer full'),
'NR_PACKET_SIZE_UNSUPPORTED': (8, 'Packet size unsupported'),
'NR_SUB_DEVICE_OUT_OF_RANGE': (9, 'Sub device out of range'),
'NR_PROXY_BUFFER_FULL': (10, 'Proxy buffer full'),
}
# this is populated below
_CODE_TO_OBJECT = {}
def __init__(self, nack_value, description):
self._value = nack_value
self._description = description
@property
def value(self):
return self._value
def __str__(self):
return self._description
def __cmp__(self, other):
return cmp(self.value, other.value)
@classmethod
def LookupCode(cls, code):
obj = cls._CODE_TO_OBJECT.get(code, None)
if not obj:
obj = RDMNack(code, 'Unknown')
return obj
for symbol, (value, description) in RDMNack.NACK_SYMBOLS_TO_VALUES.iteritems():
nack = RDMNack(value, description)
setattr(RDMNack, symbol, nack)
RDMNack._CODE_TO_OBJECT[value] = nack
class RDMResponse(object):
"""Represents a RDM Response.
Failures can occur at many layers, the recommended way for dealing with
responses is:
Check .status.Succeeded(), if not true this indicates a rpc or server
error.
Check .response_code, if not RDM_COMPLETED_OK, it indicates a problem with
the RDM transport layer or malformed response.
If .response_code is RDM_COMPLETED_OK, .sub_device, .command_class, .pid,
.queued_messages hold the properties of the response.
Then check .response_type:
if .response_type is ACK:
.data holds the param data of the response.
If .response_type is ACK_TIMER:
.ack_timer: holds the number of ms before the response should be
available.
If .response_type is NACK_REASON:
.nack_reason holds the reason for nack'ing
Attributes:
status: The RequestStatus object for this request / response
response_code: The response code for the RDM request
response_type: The response type (ACK, ACK_TIMER, NACK_REASON) for
the request.
sub_device: The sub device that sent the response
command_class:
pid:
data:
queued_messages: The number of queued messages the remain.
nack_reason: If the response type was NACK_REASON, this is the reason for
the NACK.
ack_timer: If the response type was ACK_TIMER, this is the number of ms to
wait before checking for queued messages.
"""
RESPONSE_CODES_TO_STRING = {
Ola_pb2.RDM_COMPLETED_OK: 'Ok',
Ola_pb2.RDM_WAS_BROADCAST: 'Request was broadcast',
Ola_pb2.RDM_FAILED_TO_SEND: 'Failed to send request',
Ola_pb2.RDM_TIMEOUT: 'Response Timeout',
Ola_pb2.RDM_INVALID_RESPONSE: 'Invalid Response',
Ola_pb2.RDM_UNKNOWN_UID: 'Unknown UID',
Ola_pb2.RDM_CHECKSUM_INCORRECT: 'Incorrect Checksum',
Ola_pb2.RDM_TRANSACTION_MISMATCH: 'Transaction number mismatch',
Ola_pb2.RDM_SUB_DEVICE_MISMATCH: 'Sub device mismatch',
Ola_pb2.RDM_SRC_UID_MISMATCH: 'Source UID in response doesn\'t match',
Ola_pb2.RDM_DEST_UID_MISMATCH: (
'Destination UID in response doesn\'t match'),
Ola_pb2.RDM_WRONG_SUB_START_CODE: 'Incorrect sub start code',
Ola_pb2.RDM_PACKET_TOO_SHORT: (
'RDM response was smaller than the mimimun size'),
Ola_pb2.RDM_PACKET_LENGTH_MISMATCH: (
'The length field of packet didn\'t match length received'),
Ola_pb2.RDM_PARAM_LENGTH_MISMATCH: (
'The parameter length exceeds the remaining packet size'),
Ola_pb2.RDM_INVALID_COMMAND_CLASS: (
'The command class was not one of GET_RESPONSE or SET_RESPONSE'),
Ola_pb2.RDM_COMMAND_CLASS_MISMATCH: (
'The command class didn\'t match the request'),
Ola_pb2.RDM_INVALID_RESPONSE_TYPE: (
'The response type was not ACK, ACK_OVERFLOW, ACK_TIMER or NACK'),
Ola_pb2.RDM_PLUGIN_DISCOVERY_NOT_SUPPORTED: (
'The DISCOVERY Command Class is not supported by this controller'),
Ola_pb2.RDM_DUB_RESPONSE: (
'Discovery Unique Branch response')
}
def __init__(self, controller, response):
self.status = RequestStatus(controller)
self._response_code = response.response_code
self._response_type = response.response_type
self._queued_messages = response.message_count
self.sub_device = response.sub_device
self.command_class = response.command_class
self.pid = response.param_id
self.data = response.data
self._raw_responses = response.raw_response
# we populate these below if required
self._nack_reason = None
self._ack_timer = None
if (self.status.Succeeded() and
self._response_code == Ola_pb2.RDM_COMPLETED_OK):
# check for ack timer or nack
if self._response_type == Ola_pb2.RDM_NACK_REASON:
nack_value = self._get_short_from_data(response.data)
if nack_value is None:
self._response_code = Ola_pb2.RDM_INVALID_RESPONSE
else:
self._nack_reason = RDMNack.LookupCode(nack_value)
elif self._response_type == Ola_pb2.RDM_ACK_TIMER:
self._ack_timer = self._get_short_from_data(response.data)
if self._ack_timer is None:
self._response_code = Ola_pb2.RDM_INVALID_RESPONSE
@property
def response_code(self):
return self._response_code
def ResponseCodeAsString(self):
return self.RESPONSE_CODES_TO_STRING.get(self._response_code, 'Unknown')
@property
def response_type(self):
return self._response_type
@property
def queued_messages(self):
return self._queued_messages
@property
def nack_reason(self):
return self._nack_reason
@property
def raw_response(self):
"""The list of byte strings in the response packets."""
return self._raw_responses
def WasAcked(self):
"""Returns true if this RDM request returned a ACK response."""
return (self.status.Succeeded() and
self.response_code == OlaClient.RDM_COMPLETED_OK and
self.response_type == OlaClient.RDM_ACK)
@property
def ack_timer(self):
return 100 * self._ack_timer
def __str__(self):
if self.response_code != Ola_pb2.RDM_COMPLETED_OK:
return 'RDMResponse: %s' % self.ResponseCodeAsString()
if self.response_type == OlaClient.RDM_ACK:
return 'RDMResponse: %s ACK' % self._command_class()
elif self.response_type == OlaClient.RDM_ACK_TIMER:
return 'RDMResponse: %s ACK TIMER, %d ms' % (
self._command_class(), self.ack_timer)
else:
return 'RDMResponse:, %s NACK %s' % (
self._command_class(), self.nack_reason)
def _get_short_from_data(self, data):
"""Try to unpack the binary data into a short.
Args:
data: the binary data
Returns:
value: None if the unpacking failed
"""
try:
return struct.unpack('!h', data)[0]
except struct.error:
return None
def _command_class(self):
if self.command_class == OlaClient.RDM_GET_RESPONSE:
return 'Get'
elif self.command_class == OlaClient.RDM_SET_RESPONSE :
return 'Set'
elif self.command_class == OlaClient.RDM_DISCOVERY_RESPONSE:
return 'Discovery'
else:
return "UNKNOWN_CC"
class OlaClient(Ola_pb2.OlaClientService):
"""The client used to communicate with olad."""
def __init__(self, our_socket = None, close_callback = None):
"""Create a new client.
Args:
socket: the socket to use for communications, if not provided one is
created.
close_callback: A callable to run if the socket is closed
"""
self._socket = our_socket
if self._socket is None:
self._socket = socket.socket()
try:
self._socket.connect(('localhost', 9010))
except socket.error:
raise OLADNotRunningException('Failed to connect to olad')
self._close_callback = close_callback
self._channel = StreamRpcChannel(self._socket, self, self._SocketClosed)
self._stub = Ola_pb2.OlaServerService_Stub(self._channel)
self._universe_callbacks = {}
def GetSocket(self):
"""Returns the socket used to communicate with the server."""
return self._socket
def SocketReady(self):
"""Called when the socket has new data."""
self._channel.SocketReady()
def _SocketClosed(self):
"""Called by the RPCChannel if the socket is closed."""
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self._socket.close()
self._socket = None
if self._close_callback:
self._close_callback()
def FetchPlugins(self, callback):
"""Fetch the list of plugins.
Args:
callback: the function to call once complete, takes two arguments, a
RequestStatus object and a list of Plugin objects
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
controller = SimpleRpcController()
request = Ola_pb2.PluginListRequest()
done = lambda x, y: self._GetPluginsComplete(callback, x, y)
try:
self._stub.GetPlugins(controller, request, done)
except socket.error:
raise OLADNotRunningException()
return True
def PluginDescription(self, callback, plugin_id):
"""Fetch the list of plugins.
Args:
callback: the function to call once complete, takes two arguments, a
RequestStatus object and a list of Plugin objects
plugin_id: the id of the plugin
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
controller = SimpleRpcController()
request = Ola_pb2.PluginDescriptionRequest()
request.plugin_id = plugin_id
done = lambda x, y: self._PluginDescriptionComplete(callback, x, y)
try:
self._stub.GetPluginDescription(controller, request, done)
except socket.error:
raise OLADNotRunningException()
return True
def FetchDevices(self, callback, plugin_filter=Plugin.OLA_PLUGIN_ALL):
"""Fetch a list of devices from the server.
Args:
callback: The function to call once complete, takes two arguments, a
RequestStatus object and a list of Device objects.
filter: a plugin id to filter by
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
controller = SimpleRpcController()
request = Ola_pb2.DeviceInfoRequest()
request.plugin_id = plugin_filter
done = lambda x, y: self._DeviceInfoComplete(callback, x, y)
try:
self._stub.GetDeviceInfo(controller, request, done)
except socket.error:
raise OLADNotRunningException()
return True
def FetchUniverses(self, callback):
"""Fetch a list of universes from the server
Args:
callback: The function to call once complete, takes two arguments, a
RequestStatus object and a list of Universe objects.
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
controller = SimpleRpcController()
request = Ola_pb2.OptionalUniverseRequest()
done = lambda x, y: self._UniverseInfoComplete(callback, x, y)
try:
self._stub.GetUniverseInfo(controller, request, done)
except socket.error:
raise OLADNotRunningException()
return True
def FetchDmx(self, universe, callback):
"""Fetch a list of universes from the server
Args:
universe: the universe to fetch the data for
callback: The function to call once complete, takes three arguments, a
RequestStatus object, a universe number and a list of dmx data.
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
controller = SimpleRpcController()
request = Ola_pb2.UniverseRequest()
request.universe = universe
done = lambda x, y: self._GetDmxComplete(callback, x, y)
try:
self._stub.GetDmx(controller, request, done)
except socket.error:
raise OLADNotRunningException()
return True
def SendDmx(self, universe, data, callback=None):
"""Send DMX data to the server
Args:
universe: the universe to fetch the data for
data: An array object with the DMX data
callback: The function to call once complete, takes one argument, a
RequestStatus object.
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
controller = SimpleRpcController()
request = Ola_pb2.DmxData()
request.universe = universe
request.data = data.tostring()
done = lambda x, y: self._AckMessageComplete(callback, x, y)
try:
self._stub.UpdateDmxData(controller, request, done)
except socket.error:
raise OLADNotRunningException()
return True
def SetUniverseName(self, universe, name, callback=None):
"""Set the name of a universe.
Args:
universe: the universe to set the name of
name: the new name for the universe
callback: The function to call once complete, takes one argument, a
RequestStatus object.
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
controller = SimpleRpcController()
request = Ola_pb2.UniverseNameRequest()
request.universe = universe
request.name = name
done = lambda x, y: self._AckMessageComplete(callback, x, y)
try:
self._stub.SetUniverseName(controller, request, done)
except socket.error:
raise OLADNotRunningException()
return True
def SetUniverseMergeMode(self, universe, merge_mode, callback=None):
"""Set the merge_mode of a universe.
Args:
universe: the universe to set the name of
merge_mode: either Universe.HTP or Universe.LTP
callback: The function to call once complete, takes one argument, a
RequestStatus object.
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
controller = SimpleRpcController()
request = Ola_pb2.MergeModeRequest()
request.universe = universe
request.merge_mode = merge_mode
done = lambda x, y: self._AckMessageComplete(callback, x, y)
try:
self._stub.SetMergeMode(controller, request, done)
except socket.error:
raise OLADNotRunningException()
return True
def RegisterUniverse(self, universe, action, data_callback, callback=None):
"""Register to receive dmx updates for a universe.
Args:
universe: the universe to set the name of
action: OlaClient.REGISTER or OlaClient.UNREGISTER
data_callback: the function to be called when there is new data, passed
a single argument of type array.
callback: The function to call once complete, takes one argument, a
RequestStatus object.
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
controller = SimpleRpcController()
request = Ola_pb2.RegisterDmxRequest()
request.universe = universe
request.action = action
done = lambda x, y: self._AckMessageComplete(callback, x, y)
try:
self._stub.RegisterForDmx(controller, request, done)
except socket.error:
raise OLADNotRunningException()
if action == self.PATCH:
self._universe_callbacks[universe] = data_callback
elif universe in self._universe_callbacks:
del self._universe_callbacks[universe]
return True
def PatchPort(self, device_alias, port, is_output, action, universe,
callback=None):
"""Patch a port to a universe.
Args:
device_alias: the alias of the device to configure
port: the id of the port
is_output: select the input or output port
action: OlaClient.PATCH or OlcClient.UNPATCH
universe: the universe to set the name of
callback: The function to call once complete, takes one argument, a
RequestStatus object.
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
controller = SimpleRpcController()
request = Ola_pb2.PatchPortRequest()
request.device_alias = device_alias
request.port_id = port
request.action = action
request.is_output = is_output
request.universe = universe
done = lambda x, y: self._AckMessageComplete(callback, x, y)
try:
self._stub.PatchPort(controller, request, done)
except socket.error:
raise OLADNotRunningException()
return True
def ConfigureDevice(self, device_alias, request_data, callback):
"""Send a device config request.
Args:
device_alias: the alias of the device to configure
request_data: the request to send to the device
callback: The function to call once complete, takes two arguments, a
RequestStatus object and a response.
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
controller = SimpleRpcController()
request = Ola_pb2.DeviceConfigRequest()
request.device_alias = device_alias
request.data = request_data
done = lambda x, y: self._ConfigureDeviceComplete(callback, x, y)
try:
self._stub.ConfigureDevice(controller, request, done)
except socket.error:
raise OLADNotRunningException()
return True
def SendTimeCode(self,
time_code_type,
hours,
minutes,
seconds,
frames,
callback=None):
"""Send Time Code Data.
Args:
time_code_type: One of OlaClient.TIMECODE_FILM, OlaClient.TIMECODE_EBU,
OlaClient.TIMECODE_DF or OlaClient.TIMECODE_SMPTE
hours: the hours
minutes: the minutes
seconds: the seconds
frames: the frame count
callback: The function to call once complete, takes one argument, a
RequestStatus object.
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
controller = SimpleRpcController()
request = Ola_pb2.TimeCode()
request.type = time_code_type
request.hours = hours
request.minutes = minutes
request.seconds = seconds
request.frames = frames
done = lambda x, y: self._AckMessageComplete(callback, x, y)
try:
self._stub.SendTimeCode(controller, request, done)
except socket.error:
raise OLADNotRunningException()
return True
def UpdateDmxData(self, controller, request, callback):
"""Called when we receive new DMX data.
Args:
controller: An RpcController object
reqeust: A DmxData message
callback: The callback to run once complete
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
if request.universe in self._universe_callbacks:
data = array.array('B')
data.fromstring(request.data)
self._universe_callbacks[request.universe](data)
response = Ola_pb2.Ack()
callback(response)
return True
def FetchUIDList(self, universe, callback):
"""Used to get a list of UIDs for a particular universe.
Args:
universe: The universe to get the UID list for.
callback: The function to call once complete, takes two arguments, a
RequestStatus object and a iterable of UIDs.
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
controller = SimpleRpcController()
request = Ola_pb2.UniverseRequest()
request.universe = universe
done = lambda x, y: self._FetchUIDsComplete(callback, x, y)
try:
self._stub.GetUIDs(controller, request, done)
except socket.error:
raise OLADNotRunningException()
return True
def RunRDMDiscovery(self, universe, full, callback):
"""Triggers RDM discovery for a universe.
Args:
universe: The universe to run discovery for.
full: true to use full discovery, false for incremental (if supported)
callback: The function to call once complete, takes one argument, a
RequestStatus object.
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
controller = SimpleRpcController()
request = Ola_pb2.DiscoveryRequest()
request.universe = universe
request.full = full
done = lambda x, y: self._FetchUIDsComplete(callback, x, y)
try:
self._stub.ForceDiscovery(controller, request, done)
except socket.error:
raise OLADNotRunningException()
return True
def RDMGet(self, universe, uid, sub_device, param_id, callback, data = ''):
"""Send an RDM get command.
Args:
universe: The universe to get the UID list for.
uid: A UID object
sub_device: The sub device index
param_id: the param ID
callback: The function to call once complete, takes a RDMResponse object
data: the data to send
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
return self._RDMMessage(universe, uid, sub_device, param_id, callback,
data);
def RDMSet(self, universe, uid, sub_device, param_id, callback, data = ''):
"""Send an RDM set command.
Args:
universe: The universe to get the UID list for.
uid: A UID object
sub_device: The sub device index
param_id: the param ID
callback: The function to call once complete, takes a RDMResponse object
data: the data to send
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
return self._RDMMessage(universe, uid, sub_device, param_id, callback,
data, set = True);
def SendRawRDMDiscovery(self,
universe,
uid,
sub_device,
param_id,
callback,
data = ''):
"""Send an RDM Discovery command. Unless you're writing RDM tests you
shouldn't need to use this.
Args:
universe: The universe to get the UID list for.
uid: A UID object
sub_device: The sub device index
param_id: the param ID
callback: The function to call once complete, takes a RDMResponse object
data: the data to send
Returns:
True if the request was sent, False otherwise.
"""
if self._socket is None:
return False
controller = SimpleRpcController()
request = Ola_pb2.RDMDiscoveryRequest()
request.universe = universe
request.uid.esta_id = uid.manufacturer_id
request.uid.device_id = uid.device_id
request.sub_device = sub_device
request.param_id = param_id
request.data = data
request.include_raw_response = True
done = lambda x, y: self._RDMCommandComplete(callback, x, y)
try:
self._stub.RDMDiscoveryCommand(controller, request, done)
except socket.error:
raise OLADNotRunningException()
return True
def _RDMMessage(self, universe, uid, sub_device, param_id, callback, data,
set = False):
controller = SimpleRpcController()
request = Ola_pb2.RDMRequest()
request.universe = universe
request.uid.esta_id = uid.manufacturer_id
request.uid.device_id = uid.device_id
request.sub_device = sub_device
request.param_id = param_id
request.data = data
request.is_set = set
done = lambda x, y: self._RDMCommandComplete(callback, x, y)
try:
self._stub.RDMCommand(controller, request, done)
except socket.error:
raise OLADNotRunningException()
return True
def _GetPluginsComplete(self, callback, controller, response):
"""Called when the list of plugins is returned.
Args:
callback: the callback to run
controller: an RpcController
response: a PluginInfoReply message.
"""
if not callback:
return
status = RequestStatus(controller)
if not status.Succeeded():
return
plugins = [Plugin(p.plugin_id, p.name) for p in response.plugin]
plugins.sort(key=lambda x: x.id)
callback(status, plugins)
def _PluginDescriptionComplete(self, callback, controller, response):
"""Called when the plugin description is returned.
Args:
callback: the callback to run
controller: an RpcController
response: a PluginInfoReply message.
"""
if not callback:
return
status = RequestStatus(controller)
if not status.Succeeded():
return
callback(status, response.description)
def _DeviceInfoComplete(self, callback, controller, response):
"""Called when the Device info request returns.
Args:
callback: the callback to run
controller: an RpcController
response: a DeviceInfoReply message.
"""
if not callback:
return
status = RequestStatus(controller)
if not status.Succeeded():
return
devices = []
for device in response.device:
input_ports = []
output_ports = []
for port in device.input_port:
input_ports.append(Port(port.port_id,
port.universe,
port.active,
port.description,
port.supports_rdm))
for port in device.output_port:
output_ports.append(Port(port.port_id,
port.universe,
port.active,
port.description,
port.supports_rdm))
devices.append(Device(device.device_id,
device.device_alias,
device.device_name,
device.plugin_id,
input_ports,
output_ports))
callback(status, devices)
def _UniverseInfoComplete(self, callback, controller, response):
"""Called when the Universe info request returns.
Args:
callback: the callback to run
controller: an RpcController
response: a UniverseInfoReply message.
"""
if not callback:
return
status = RequestStatus(controller)
if not status.Succeeded():
return
universes = [Universe(u.universe, u.name, u.merge_mode) for u in
response.universe]
callback(status, universes)
def _GetDmxComplete(self, callback, controller, response):
"""Called when the Universe info request returns.
Args:
callback: the callback to run
controller: an RpcController
response: a UniverseInfoReply message.
"""
if not callback:
return
status = RequestStatus(controller)
if not status.Succeeded():
return
data = array.array('B')
data.fromstring(response.data)
callback(status, response.universe, data)
def _AckMessageComplete(self, callback, controller, response):
"""Called when an rpc that returns an Ack completes.
Args:
callback: the callback to run
controller: an RpcController
response: an Ack message.
"""
if not callback:
return
status = RequestStatus(controller)
callback(status)
def _ConfigureDeviceComplete(self, callback, controller, response):
"""Called when a ConfigureDevice request completes.
Args:
callback: the callback to run
controller: an RpcController
response: an DeviceConfigReply message.
"""
if not callback:
return
status = RequestStatus(controller)
if not status.Succeeded():
return
callback(status, response.data)
def _FetchUIDsComplete(self, callback, controller, response):
"""Called when a FetchUIDList request completes.
Args:
callback: the callback to run
controller: an RpcController
response: an UIDListReply message.
"""
if not callback:
return
status = RequestStatus(controller)
uids = []
if response:
for uid in response.uid:
uids.append(UID(uid.esta_id, uid.device_id))
uids.sort()
callback(status, uids)
def _RDMCommandComplete(self, callback, controller, response):
"""Called when a RDM request completes.
Args:
callback: the callback to run
controller: an RpcController
response: an DeviceConfigReply message.
"""
if not callback:
return
callback(RDMResponse(controller, response))
# Populate the patch & register actions
for value in Ola_pb2._PATCHACTION.values:
setattr(OlaClient, value.name, value.number)
for value in Ola_pb2._REGISTERACTION.values:
setattr(OlaClient, value.name, value.number)
# populate time code enums
for value in Ola_pb2._TIMECODETYPE.values:
setattr(OlaClient, value.name, value.number)
# populate the RDM response codes & types
for value in Ola_pb2._RDMRESPONSECODE.values:
setattr(OlaClient, value.name, value.number)
for value in Ola_pb2._RDMRESPONSETYPE.values:
setattr(OlaClient, value.name, value.number)
for value in Ola_pb2._RDMCOMMANDCLASS.values:
setattr(OlaClient, value.name, value.number)
|
lgpl-2.1
|
tachang/uwsgi
|
tests/websockets_chat.py
|
6
|
2621
|
#!./uwsgi --http-socket :9090 --gevent 100 --module tests.websocket_chat --gevent-monkey-patch
import uwsgi
import time
import gevent.select
import redis
def application(env, sr):
ws_scheme = 'ws'
if 'HTTPS' in env or env['wsgi.url_scheme'] == 'https':
ws_scheme = 'wss'
if env['PATH_INFO'] == '/':
sr('200 OK', [('Content-Type','text/html')])
return """
<html>
<head>
<script language="Javascript">
var s = new WebSocket("%s://%s/foobar/");
s.onopen = function() {
alert("connected !!!");
s.send("ciao");
};
s.onmessage = function(e) {
var bb = document.getElementById('blackboard')
var html = bb.innerHTML;
bb.innerHTML = html + '<br/>' + e.data;
};
s.onerror = function(e) {
alert(e);
}
s.onclose = function(e) {
alert("connection closed");
}
function invia() {
var value = document.getElementById('testo').value;
s.send(value);
}
</script>
</head>
<body>
<h1>WebSocket</h1>
<input type="text" id="testo"/>
<input type="button" value="invia" onClick="invia();"/>
<div id="blackboard" style="width:640px;height:480px;background-color:black;color:white;border: solid 2px red;overflow:auto">
</div>
</body>
</html>
""" % (ws_scheme, env['HTTP_HOST'])
elif env['PATH_INFO'] == '/favicon.ico':
return ""
elif env['PATH_INFO'] == '/foobar/':
uwsgi.websocket_handshake(env['HTTP_SEC_WEBSOCKET_KEY'], env.get('HTTP_ORIGIN', ''))
print "websockets..."
r = redis.StrictRedis(host='localhost', port=6379, db=0)
channel = r.pubsub()
channel.subscribe('foobar')
websocket_fd = uwsgi.connection_fd()
redis_fd = channel.connection._sock.fileno()
while True:
# wait max 4 seconds to allow ping to be sent
ready = gevent.select.select([websocket_fd, redis_fd], [], [], 4.0)
# send ping on timeout
if not ready[0]:
uwsgi.websocket_recv_nb()
for fd in ready[0]:
if fd == websocket_fd:
msg = uwsgi.websocket_recv_nb()
if msg:
r.publish('foobar', msg)
elif fd == redis_fd:
msg = channel.parse_response()
# only interested in user messages
if msg[0] == 'message':
uwsgi.websocket_send("[%s] %s" % (time.time(), msg))
|
gpl-2.0
|
pjg101/SickRage
|
lib/sqlalchemy/dialects/firebird/fdb.py
|
79
|
4365
|
# firebird/fdb.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: firebird+fdb
:name: fdb
:dbapi: pyodbc
:connectstring: firebird+fdb://user:password@host:port/path/to/db[?key=value&key=value...]
:url: http://pypi.python.org/pypi/fdb/
fdb is a kinterbasdb compatible DBAPI for Firebird.
.. versionadded:: 0.8 - Support for the fdb Firebird driver.
.. versionchanged:: 0.9 - The fdb dialect is now the default dialect
under the ``firebird://`` URL space, as ``fdb`` is now the official
Python driver for Firebird.
Arguments
----------
The ``fdb`` dialect is based on the :mod:`sqlalchemy.dialects.firebird.kinterbasdb`
dialect, however does not accept every argument that Kinterbasdb does.
* ``enable_rowcount`` - True by default, setting this to False disables
the usage of "cursor.rowcount" with the
Kinterbasdb dialect, which SQLAlchemy ordinarily calls upon automatically
after any UPDATE or DELETE statement. When disabled, SQLAlchemy's
ResultProxy will return -1 for result.rowcount. The rationale here is
that Kinterbasdb requires a second round trip to the database when
.rowcount is called - since SQLA's resultproxy automatically closes
the cursor after a non-result-returning statement, rowcount must be
called, if at all, before the result object is returned. Additionally,
cursor.rowcount may not return correct results with older versions
of Firebird, and setting this flag to False will also cause the
SQLAlchemy ORM to ignore its usage. The behavior can also be controlled on a
per-execution basis using the ``enable_rowcount`` option with
:meth:`.Connection.execution_options`::
conn = engine.connect().execution_options(enable_rowcount=True)
r = conn.execute(stmt)
print r.rowcount
* ``retaining`` - False by default. Setting this to True will pass the
``retaining=True`` keyword argument to the ``.commit()`` and ``.rollback()``
methods of the DBAPI connection, which can improve performance in some
situations, but apparently with significant caveats.
Please read the fdb and/or kinterbasdb DBAPI documentation in order to
understand the implications of this flag.
.. versionadded:: 0.8.2 - ``retaining`` keyword argument specifying
transaction retaining behavior - in 0.8 it defaults to ``True``
for backwards compatibility.
.. versionchanged:: 0.9.0 - the ``retaining`` flag defaults to ``False``.
In 0.8 it defaulted to ``True``.
.. seealso::
http://pythonhosted.org/fdb/usage-guide.html#retaining-transactions - information
on the "retaining" flag.
"""
from .kinterbasdb import FBDialect_kinterbasdb
from ... import util
class FBDialect_fdb(FBDialect_kinterbasdb):
def __init__(self, enable_rowcount=True,
retaining=False, **kwargs):
super(FBDialect_fdb, self).__init__(
enable_rowcount=enable_rowcount,
retaining=retaining, **kwargs)
@classmethod
def dbapi(cls):
return __import__('fdb')
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if opts.get('port'):
opts['host'] = "%s/%s" % (opts['host'], opts['port'])
del opts['port']
opts.update(url.query)
util.coerce_kw_type(opts, 'type_conv', int)
return ([], opts)
def _get_server_version_info(self, connection):
"""Get the version of the Firebird server used by a connection.
Returns a tuple of (`major`, `minor`, `build`), three integers
representing the version of the attached server.
"""
# This is the simpler approach (the other uses the services api),
# that for backward compatibility reasons returns a string like
# LI-V6.3.3.12981 Firebird 2.0
# where the first version is a fake one resembling the old
# Interbase signature.
isc_info_firebird_version = 103
fbconn = connection.connection
version = fbconn.db_info(isc_info_firebird_version)
return self._parse_version_info(version)
dialect = FBDialect_fdb
|
gpl-3.0
|
perkinslr/pypyjs
|
addedLibraries/nevow/wsgi.py
|
2
|
21703
|
# TODO:
# 1. make exception renderer work (currently the code is in appserver.py)
# - srid
import warnings
warnings.warn("nevow.wsgi is deprecated.", category=DeprecationWarning)
import sys, socket, math, time
import cgi # for FieldStorage
import types
from urllib import unquote, quote
from zope.interface import implements
from twisted.web.http import stringToDatetime
from nevow import context, flat, inevow, util
from nevow import __version__ as nevowversion
def log(msg):
print >>sys.stderr, "WSGI: {%s}" % str(msg)
errorMarker = object()
class NevowWSGISite(object):
def __init__(self, request, resource):
self.request = request
self.resource = resource
self.context = context.SiteContext()
def remember(self, obj, inter=None):
self.context.remember(obj, inter)
def getPageContextForRequestContext(self, ctx):
"""Retrieve a resource from this site for a particular request. The
resource will be wrapped in a PageContext which keeps track
of how the resource was located.
"""
path = inevow.IRemainingSegments(ctx)
res = inevow.IResource(self.resource)
pageContext = context.PageContext(tag=res, parent=ctx)
return self.handleSegment(
res.locateChild(pageContext, path),
ctx.tag, path, pageContext)
def handleSegment(self, result, request, path, pageContext):
if result is errorMarker:
return errorMarker
newres, newpath = result
# If the child resource is None then display a 404 page
if newres is None:
from nevow.rend import FourOhFour
return context.PageContext(tag=FourOhFour(), parent=pageContext)
# If we got a deferred then we need to call back later, once the
# child is actually available.
#if isinstance(newres, defer.Deferred):
# return newres.addCallback(
# lambda actualRes: self.handleSegment(
# (actualRes, newpath), request, path, pageContext))
newres = inevow.IResource(newres, persist=True)
if newres is pageContext.tag:
assert not newpath is path, "URL traversal cycle detected when attempting to locateChild %r from resource %r." % (path, pageContext.tag)
assert len(newpath) < len(path), "Infinite loop impending..."
## We found a Resource... update the request.prepath and postpath
for x in xrange(len(path) - len(newpath)):
request.prepath.append(request.postpath.pop(0))
## Create a context object to represent this new resource
ctx = context.PageContext(tag=newres, parent=pageContext)
ctx.remember(tuple(request.prepath), inevow.ICurrentSegments)
ctx.remember(tuple(request.postpath), inevow.IRemainingSegments)
res = newres
path = newpath
if not path:
return ctx
return self.handleSegment(
res.locateChild(ctx, path),
request, path, ctx)
def createWSGIApplication(page, rootURL=None):
"""Given a Page instance, return a WSGI callable.
`rootURL` - URL to be remembered as root
"""
page.flattenFactory = flat.iterflatten
siteCtx = context.SiteContext(tag=None)
def application(environ, start_response):
request = WSGIRequest(environ, start_response)
prefixURL = rootURL
if prefixURL is None:
# Try to guess
proto = request.isSecure() and 'https://' or 'http://'
server = environ['SERVER_NAME']
prefixURL = proto + server + environ.get('SCRIPT_NAME', '/')
request.rememberRootURL(prefixURL)
site = NevowWSGISite(request, page)
request.site = site
result = request.process()
if not request.headersSent:
request.write('') # send headers now
if isinstance(result, str):
yield str(result) # work around wsgiref using StringType
elif isinstance(result, util.Deferred):
## So we can use the wsgi module if twisted is installed
## TODO use render synchronously instead maybe? I'm pretty
## sure after the application callable returns, the request
## is "closed". Investigate with the latest wsgi spec and
## some implementations.
#raise 'PH' + str(dir(result)) + '{{%s}}' % str(result.result)
yield result.result
else:
for x in result:
yield x
return application
# TODO: convert interface comments
class WSGIRequest(object):
implements(inevow.IRequest)
"""A HTTP request.
Subclasses should override the process() method to determine how
the request will be processed.
@ivar method: The HTTP method that was used.
@ivar uri: The full URI that was requested (includes arguments).
@ivar path: The path only (arguments not included).
@ivar args: All of the arguments, including URL and POST arguments.
@type args: A mapping of strings (the argument names) to lists of values.
@ivar received_headers: All received headers
"""
def __init__(self, environ, start_response):
self.environ = environ
self.start_response = start_response
self.outgoingHeaders = []
self.received_headers = {}
self.lastModified = None
self.etag = None
self.method = environ.get('REQUEST_METHOD', 'GET')
self.args = self._parseQuery(environ.get('QUERY_STRING', ''))
try:
self.host = (self.environ['REMOTE_ADDR'], int(self.environ['REMOTE_PORT']))
except KeyError:
pass # TODO
for k,v in environ.items():
if k.startswith('HTTP_'):
self.received_headers[k[5:].lower()] = v
self.setResponseCode("200")
self.headersSent = False
self.appRootURL = None
self.deferred = util.Deferred()
def process(self):
"""When a form is POSTed,
we create a cgi.FieldStorage instance using the data posted,
and set it as the request.fields attribute. This way, we can
get at information about filenames and mime-types of
files that were posted."""
if self.method == 'POST':
self.fields = cgi.FieldStorage(
self.environ['wsgi.input'],
self.received_headers,
environ={'REQUEST_METHOD': 'POST'})
# set various default headers
self.setHeader('server', nevowversion)
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time())
# HTTP date string format
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
weekdayname[wd],
day, monthname[month], year,
hh, mm, ss)
self.setHeader('date', s)
self.setHeader('content-type', 'text/html; charset=UTF-8')
# Resource Identification
self.prepath = []
self.postpath = map(unquote, self.path[1:].split('/'))
self.sitepath = []
requestContext = context.RequestContext(
parent=self.site.context, tag=self)
requestContext.remember( (), inevow.ICurrentSegments)
requestContext.remember(tuple(self.postpath), inevow.IRemainingSegments)
pageContext = self.site.getPageContextForRequestContext(requestContext)
return self.gotPageContext(pageContext)
def gotPageContext(self, pageContext):
if pageContext is errorMarker:
return None
html = pageContext.tag.renderHTTP(pageContext)
if isinstance(html, util.Deferred):
# This is a deferred object
# Let us return it synchronously
# (wsgi has nothing to do with sync, async)
# XXX: Is this correct?
html = html.result
# FIXME: I dunno what to do when a generator comes ..
# Perhaps, it may generate non-str? I dunno
if type(html) is types.GeneratorType:
html = ''.join(list(html))
if html is errorMarker:
## Error webpage has already been rendered and finish called
pass
elif isinstance(html, str):
return html
else:
res = inevow.IResource(html, None)
if res is not None:
pageContext = context.PageContext(tag=res, parent=pageContext)
return self.gotPageContext(pageContext)
else:
# import traceback; traceback.print_stack()
print >>sys.stderr, "html is not a string: %s on %s" % (str(html), pageContext.tag)
return html
def _parseQuery(self, qs):
d = {}
items = [s2 for s1 in qs.split("&") for s2 in s1.split(";")]
for item in items:
try:
k, v = item.split("=", 1)
except ValueError:
# no strict parsing
continue
k = unquote(k.replace("+", " "))
v = unquote(v.replace("+", " "))
if k in d:
d[k].append(v)
else:
d[k] = [v]
return d
def _getPath(self):
pth = self.environ.get('PATH_INFO', '')
if not pth: pth = '/'
return pth
path = property(_getPath)
def _getURI(self):
query = self.environ.get('QUERY_STRING', '')
if query:
query = '?' + query
uri = self.path + query
log('URL:'+uri)
return uri
uri = property(_getURI)
# Methods for received request
def getHeader(self, key):
"""Get a header that was sent from the network.
"""
return self.received_headers.get(key.lower())
def getCookie(self, key):
"""Get a cookie that was sent from the network.
"""
def getAllHeaders(self):
"""Return dictionary of all headers the request received."""
return self.received_headers
def getRequestHostname(self):
"""Get the hostname that the user passed in to the request.
This will either use the Host: header (if it is available) or the
host we are listening on if the header is unavailable.
"""
return (self.getHeader('host') or
socket.gethostbyaddr(self.getHost()[1])[0]
).split(':')[0]
def getHost(self):
"""Get my originally requesting transport's host.
Don't rely on the 'transport' attribute, since Request objects may be
copied remotely. For information on this method's return value, see
twisted.internet.tcp.Port.
"""
def getClientIP(self):
return self.environ.get('REMOTE_ADDR', None)
def getClient(self):
pass
def getUser(self):
pass
def getPassword(self):
pass
def isSecure(self):
return self.environ['wsgi.url_scheme'] == 'https'
def getSession(self, sessionInterface = None):
pass
def URLPath(self):
from nevow import url
return url.URL.fromString(self.appRootURL+self.uri)
def prePathURL(self):
if self.isSecure():
default = 443
else:
default = 80
# TODO: use getHost().port after getHost is implemented
port = default
if port == default:
hostport = ''
else:
hostport = ':%d' % port
# FIXME: This hack, until url module is fixed to support RootURLs
# Or is this the right way to do?
if self.appRootURL:
if self.appRootURL[-1] == '/':
rootURL = self.appRootURL
else:
rootURL = self.appRootURL + '/' # yuck!
return quote('%s%s' % (rootURL,
'/'.join(self.prepath)),
'/:')
return quote('http%s://%s%s/%s' % (
self.isSecure() and 's' or '',
self.getRequestHostname(),
hostport,
'/'.join(self.prepath)), '/:')
def rememberRootURL(self, url=None):
# result = p.renderHTTP(pctx)
"""
Remember the currently-processed part of the URL for later
recalling.
"""
if url is None:
raise NotImplementedError
self.appRootURL = url
def getRootURL(self):
"""
Get a previously-remembered URL.
"""
return self.appRootURL
# Methods for outgoing request
def finish(self):
"""We are finished writing data."""
def write(self, data):
"""
Write some data as a result of an HTTP request. The first
time this is called, it writes out response data.
"""
if self.headersSent:
self._write(data)
return
headerkeys = [k for k,v in self.outgoingHeaders]
self._write = self.start_response(
self.responseCode, self.outgoingHeaders, None)
self.headersSent = True
if data:
self._write(data)
def addCookie(self, k, v, expires=None, domain=None, path=None, max_age=None, comment=None, secure=None):
"""Set an outgoing HTTP cookie.
In general, you should consider using sessions instead of cookies, see
twisted.web.server.Request.getSession and the
twisted.web.server.Session class for details.
"""
def setResponseCode(self, code, message=None):
"""Set the HTTP response code.
"""
self.responseCode = '%s %s' % (code, RESPONSES[int(str(code))])
def setHeader(self, header, value):
"""Set an outgoing HTTP header.
"""
self.outgoingHeaders.append((header.lower(), value))
def redirect(self, url):
"""Utility function that does a redirect.
The request should have finish() called after this.
"""
log('REDIRECT to ' + str(url))
self.setResponseCode(str(302))
self.setHeader('location', url)
def setLastModified(self, when):
"""Set the X{Last-Modified} time for the response to this request.
If I am called more than once, I ignore attempts to set
Last-Modified earlier, only replacing the Last-Modified time
if it is to a later value.
If I am a conditional request, I may modify my response code
to L{NOT_MODIFIED} if appropriate for the time given.
@param when: The last time the resource being returned was
modified, in seconds since the epoch.
@type when: number
@return: If I am a X{If-Modified-Since} conditional request and
the time given is not newer than the condition, I return
L{http.CACHED<CACHED>} to indicate that you should write no
body. Otherwise, I return a false value.
"""
# time.time() may be a float, but the HTTP-date strings are
# only good for whole seconds.
when = long(math.ceil(when))
if (not self.lastModified) or (self.lastModified < when):
self.lastModified = when
modified_since = self.getHeader('if-modified-since')
if modified_since:
modified_since = stringToDatetime(modified_since)
if modified_since >= when:
self.setResponseCode(NOT_MODIFIED)
return '' # TODO: return http.CACHED (requires Twisted)
return None
def setETag(self, etag):
"""Set an X{entity tag} for the outgoing response.
That's \"entity tag\" as in the HTTP/1.1 X{ETag} header, \"used
for comparing two or more entities from the same requested
resource.\"
If I am a conditional request, I may modify my response code
to L{NOT_MODIFIED<twisted.protocols.http.NOT_MODIFIED>} or
L{PRECONDITION_FAILED<twisted.protocols.http.PRECONDITION_FAILED>},
if appropriate for the tag given.
@param etag: The entity tag for the resource being returned.
@type etag: string
@return: If I am a X{If-None-Match} conditional request and
the tag matches one in the request, I return
L{CACHED<twisted.protocols.http.CACHED>} to indicate that
you should write no body. Otherwise, I return a false
value.
"""
if etag:
self.etag = etag
tags = self.getHeader("if-none-match")
if tags:
tags = tags.split()
if (etag in tags) or ('*' in tags):
self.setResponseCode(((self.method in ("HEAD", "GET"))
and NOT_MODIFIED)
or PRECONDITION_FAILED)
return '' # TODO: return http.CACHED (requires Twisted)
return None
def setHost(self, host, port, ssl=0):
"""Change the host and port the request thinks it's using.
This method is useful for working with reverse HTTP proxies (e.g.
both Squid and Apache's mod_proxy can do this), when the address
the HTTP client is using is different than the one we're listening on.
For example, Apache may be listening on https://www.example.com, and then
forwarding requests to http://localhost:8080, but we don't want HTML produced
by Twisted to say 'http://localhost:8080', they should say 'https://www.example.com',
so we do:
>>> request.setHost('www.example.com', 443, ssl=1)
This method is experimental.
"""
# Methods not part of IRequest
#
producer = None
def registerProducer(self, other, _):
assert self.producer is None
self.producer = other
while self.producer is not None:
self.producer.resumeProducing()
def unregisterProducer(self):
self.producer = None
def finish(self):
self.deferred.callback('')
# FIXME: copied from twisted.web.http
_CONTINUE = 100
SWITCHING = 101
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
MULTIPLE_CHOICE = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTH_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE_SPACE = 507
NOT_EXTENDED = 510
RESPONSES = {
# 100
_CONTINUE: "Continue",
SWITCHING: "Switching Protocols",
# 200
OK: "OK",
CREATED: "Created",
ACCEPTED: "Accepted",
NON_AUTHORITATIVE_INFORMATION: "Non-Authoritative Information",
NO_CONTENT: "No Content",
RESET_CONTENT: "Reset Content.",
PARTIAL_CONTENT: "Partial Content",
MULTI_STATUS: "Multi-Status",
# 300
MULTIPLE_CHOICE: "Multiple Choices",
MOVED_PERMANENTLY: "Moved Permanently",
FOUND: "Found",
SEE_OTHER: "See Other",
NOT_MODIFIED: "Not Modified",
USE_PROXY: "Use Proxy",
# 306 not defined??
TEMPORARY_REDIRECT: "Temporary Redirect",
# 400
BAD_REQUEST: "Bad Request",
UNAUTHORIZED: "Unauthorized",
PAYMENT_REQUIRED: "Payment Required",
FORBIDDEN: "Forbidden",
NOT_FOUND: "Not Found",
NOT_ALLOWED: "Method Not Allowed",
NOT_ACCEPTABLE: "Not Acceptable",
PROXY_AUTH_REQUIRED: "Proxy Authentication Required",
REQUEST_TIMEOUT: "Request Time-out",
CONFLICT: "Conflict",
GONE: "Gone",
LENGTH_REQUIRED: "Length Required",
PRECONDITION_FAILED: "Precondition Failed",
REQUEST_ENTITY_TOO_LARGE: "Request Entity Too Large",
REQUEST_URI_TOO_LONG: "Request-URI Too Long",
UNSUPPORTED_MEDIA_TYPE: "Unsupported Media Type",
REQUESTED_RANGE_NOT_SATISFIABLE: "Requested Range not satisfiable",
EXPECTATION_FAILED: "Expectation Failed",
# 500
INTERNAL_SERVER_ERROR: "Internal Server Error",
NOT_IMPLEMENTED: "Not Implemented",
BAD_GATEWAY: "Bad Gateway",
SERVICE_UNAVAILABLE: "Service Unavailable",
GATEWAY_TIMEOUT: "Gateway Time-out",
HTTP_VERSION_NOT_SUPPORTED: "HTTP Version not supported",
INSUFFICIENT_STORAGE_SPACE: "Insufficient Storage Space",
NOT_EXTENDED: "Not Extended"
}
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
|
mit
|
js0701/chromium-crosswalk
|
tools/perf/benchmarks/blink_style.py
|
17
|
1383
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from measurements import blink_style
import page_sets
from telemetry import benchmark
@benchmark.Disabled('reference', 'win8')
class BlinkStyleTop25(perf_benchmark.PerfBenchmark):
"""Measures performance of Blink's style engine (CSS Parsing, Style Recalc,
etc.) on the top 25 pages.
"""
test = blink_style.BlinkStyle
page_set = page_sets.Top25PageSet
@classmethod
def Name(cls):
return 'blink_style.top_25'
@benchmark.Disabled('reference')
@benchmark.Enabled('android')
class BlinkStyleKeyMobileSites(perf_benchmark.PerfBenchmark):
"""Measures performance of Blink's style engine (CSS Parsing, Style Recalc,
etc.) on key mobile sites.
"""
test = blink_style.BlinkStyle
page_set = page_sets.KeyMobileSitesPageSet
@classmethod
def Name(cls):
return 'blink_style.key_mobile_sites'
@benchmark.Disabled('reference')
@benchmark.Enabled('android')
class BlinkStylePolymer(perf_benchmark.PerfBenchmark):
"""Measures performance of Blink's style engine (CSS Parsing, Style Recalc,
etc.) for Polymer cases.
"""
test = blink_style.BlinkStyle
page_set = page_sets.PolymerPageSet
@classmethod
def Name(cls):
return 'blink_style.polymer'
|
bsd-3-clause
|
nvoron23/statsmodels
|
statsmodels/sandbox/examples/example_maxent.py
|
33
|
1286
|
"""
This is an example of using scipy.maxentropy to solve Jaynes' dice problem
See Golan, Judge, and Miller Section 2.3
"""
from scipy import maxentropy
import numpy as np
samplespace = [1., 2., 3., 4., 5., 6.]
def sump(x):
return x in samplespace
def meanp(x):
return np.mean(x)
# Set the constraints
# 1) We have a proper probability
# 2) The mean is equal to...
F = [sump, meanp]
model = maxentropy.model(F, samplespace)
# set the desired feature expectations
K = np.ones((5,2))
K[:,1] = [2.,3.,3.5,4.,5.]
model.verbose = False
for i in range(K.shape[0]):
model.fit(K[i])
# Output the distribution
print("\nFitted model parameters are:\n" + str(model.params))
print("\nFitted distribution is:")
p = model.probdist()
for j in range(len(model.samplespace)):
x = model.samplespace[j]
print("y = %-15s\tx = %-15s" %(str(K[i,1])+":",str(x) + ":") + \
" p(x) = "+str(p[j]))
# Now show how well the constraints are satisfied:
print()
print("Desired constraints:")
print("\tsum_{i}p_{i}= 1")
print("\tE[X] = %-15s" % str(K[i,1]))
print()
print("Actual expectations under the fitted model:")
print("\tsum_{i}p_{i} =", np.sum(p))
print("\tE[X] = " + str(np.sum(p*np.arange(1,7))))
|
bsd-3-clause
|
itsnotmyfault1/kimcopter2
|
crazyflie-pc-client/lib/cflib/crazyflie/__init__.py
|
1
|
13576
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2011-2013 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
The Crazyflie module is used to easily connect/send/receive data
from a Crazyflie.
Each function in the Crazyflie has a class in the module that can be used
to access that functionality. The same design is then used in the Crazyflie
firmware which makes the mapping 1:1 in most cases.
"""
__author__ = 'Bitcraze AB'
__all__ = ['Crazyflie']
import logging
logger = logging.getLogger(__name__)
import time
from threading import Thread
from threading import Timer
from .commander import Commander
from .console import Console
from .param import Param
from .log import Log
from .toccache import TocCache
import cflib.crtp
from cflib.utils.callbacks import Caller
class State:
"""Stat of the connection procedure"""
DISCONNECTED = 0
INITIALIZED = 1
CONNECTED = 2
SETUP_FINISHED = 3
class Crazyflie():
"""The Crazyflie class"""
# Callback callers
disconnected = Caller()
connectionLost = Caller()
connected = Caller()
connectionInitiated = Caller()
connectSetupFinished = Caller()
connectionFailed = Caller()
receivedPacket = Caller()
linkQuality = Caller()
state = State.DISCONNECTED
def __init__(self, link=None, ro_cache=None, rw_cache=None):
"""
Create the objects from this module and register callbacks.
ro_cache -- Path to read-only cache (string)
rw_cache -- Path to read-write cache (string)
"""
self.link = link
self._toc_cache = TocCache(ro_cache=ro_cache,
rw_cache=rw_cache)
self.incoming = _IncomingPacketHandler(self)
self.incoming.setDaemon(True)
self.incoming.start()
self.commander = Commander(self)
self.log = Log(self)
self.console = Console(self)
self.param = Param(self)
self._log_toc_updated = False
self._param_toc_updated = False
self.link_uri = ""
# Used for retry when no reply was sent back
self.receivedPacket.add_callback(self._check_for_initial_packet_cb)
self.receivedPacket.add_callback(self._check_for_answers)
self.answer_timers = {}
# Connect callbacks to logger
self.disconnected.add_callback(
lambda uri: logger.info("Callback->Disconnected from [%s]", uri))
self.connected.add_callback(
lambda uri: logger.info("Callback->Connected to [%s]", uri))
self.connectionLost.add_callback(
lambda uri, errmsg: logger.info("Callback->Connectionl ost to"
" [%s]: %s", uri, errmsg))
self.connectionFailed.add_callback(
lambda uri, errmsg: logger.info("Callback->Connected failed to"
" [%s]: %s", uri, errmsg))
self.connectionInitiated.add_callback(
lambda uri: logger.info("Callback->Connection initialized[%s]",
uri))
self.connectSetupFinished.add_callback(
lambda uri: logger.info("Callback->Connection setup finished [%s]",
uri))
def _start_connection_setup(self):
"""Start the connection setup by refreshing the TOCs"""
logger.info("We are connected[%s], request connection setup",
self.link_uri)
self.log.refresh_toc(self._log_toc_updated_cb, self._toc_cache)
def _param_toc_updated_cb(self):
"""Called when the param TOC has been fully updated"""
logger.info("Param TOC finished updating")
self._param_toc_updated = True
if (self._log_toc_updated is True and self._param_toc_updated is True):
self.connectSetupFinished.call(self.link_uri)
def _log_toc_updated_cb(self):
"""Called when the log TOC has been fully updated"""
logger.info("Log TOC finished updating")
self._log_toc_updated = True
self.param.refresh_toc(self._param_toc_updated_cb, self._toc_cache)
if (self._log_toc_updated and self._param_toc_updated):
logger.info("All TOCs finished updating")
self.connectSetupFinished.call(self.link_uri)
def _link_error_cb(self, errmsg):
"""Called from the link driver when there's an error"""
logger.warning("Got link error callback [%s] in state [%s]",
errmsg, self.state)
if (self.link is not None):
self.link.close()
self.link = None
if (self.state == State.INITIALIZED):
self.connectionFailed.call(self.link_uri, errmsg)
if (self.state == State.CONNECTED or
self.state == State.SETUP_FINISHED):
self.disconnected.call(self.link_uri)
self.connectionLost.call(self.link_uri, errmsg)
self.state = State.DISCONNECTED
def _link_quality_cb(self, percentage):
"""Called from link driver to report link quality"""
self.linkQuality.call(percentage)
def _check_for_initial_packet_cb(self, data):
"""
Called when first packet arrives from Crazyflie.
This is used to determine if we are connected to something that is
answering.
"""
self.state = State.CONNECTED
self.connected.call(self.link_uri)
self.receivedPacket.remove_callback(self._check_for_initial_packet_cb)
def open_link(self, link_uri):
"""
Open the communication link to a copter at the given URI and setup the
connection (download log/parameter TOC).
"""
self.connectionInitiated.call(link_uri)
self.state = State.INITIALIZED
self.link_uri = link_uri
self._log_toc_updated = False
self._param_toc_updated = False
try:
self.link = cflib.crtp.get_link_driver(link_uri,
self._link_quality_cb,
self._link_error_cb)
# Add a callback so we can check that any data is comming
# back from the copter
self.receivedPacket.add_callback(self._check_for_initial_packet_cb)
self._start_connection_setup()
except Exception as ex: # pylint: disable=W0703
# We want to catch every possible exception here and show
# it in the user interface
import traceback
logger.error("Couldn't load link driver: %s\n\n%s",
ex, traceback.format_exc())
exception_text = "Couldn't load link driver: %s\n\n%s" % (
ex, traceback.format_exc())
if self.link:
self.link.close()
self.link = None
self.connectionFailed.call(link_uri, exception_text)
def close_link(self):
"""Close the communication link."""
logger.info("Closing link")
if (self.link is not None):
self.commander.send_setpoint(0, 0, 0, 0, False)
if (self.link is not None):
self.link.close()
self.link = None
self.disconnected.call(self.link_uri)
def add_port_callback(self, port, cb):
"""Add a callback to cb on port"""
self.incoming.add_port_callback(port, cb)
def remove_port_callback(self, port, cb):
"""Remove the callback cb on port"""
self.incoming.remove_port_callback(port, cb)
def _no_answer_do_retry(self, pk):
"""Resend packets that we have not gotten answers to"""
logger.debug("ExpectAnswer: No answer on [%d], do retry", pk.port)
# Cancel timer before calling for retry to help bug hunting
old_timer = self.answer_timers[pk.port]
if (old_timer is not None):
old_timer.cancel()
self.send_packet(pk, True)
else:
logger.warning("ExpectAnswer: ERROR! Was doing retry but"
"timer was None")
def _check_for_answers(self, pk):
"""
Callback called for every packet received to check if we are
waiting for an answer on this port. If so, then cancel the retry
timer.
"""
try:
timer = self.answer_timers[pk.port]
if (timer is not None):
logger.debug("ExpectAnswer: Got answer back on port [%d]"
", cancelling timer", pk.port)
timer.cancel()
self.answer_timers[pk.port] = None
except KeyError:
# We are not waiting for any answer on this port, ignore..
pass
def send_packet(self, pk, expect_answer=False):
"""
Send a packet through the link interface.
pk -- Packet to send
expect_answer -- True if a packet from the Crazyflie is expected to
be sent back, otherwise false
"""
if (self.link is not None):
self.link.send_packet(pk)
if (expect_answer):
logger.debug("ExpectAnswer: Will expect answer on port [%d]",
pk.port)
new_timer = Timer(0.2, lambda: self._no_answer_do_retry(pk))
try:
old_timer = self.answer_timers[pk.port]
if (old_timer is not None):
old_timer.cancel()
# If we get here a second call has been made to send
# packet on this port before we have gotten the first
# one back. This is an error and might cause loss of
# packets!!
logger.warning("ExpectAnswer: ERROR! Older timer whas"
" running while scheduling new one on "
"[%d]", pk.port)
except KeyError:
pass
self.answer_timers[pk.port] = new_timer
new_timer.start()
class _IncomingPacketHandler(Thread):
"""Handles incoming packets and sends the data to the correct receivers"""
def __init__(self, cf):
Thread.__init__(self)
self.cf = cf
self.cb = []
def add_port_callback(self, port, cb):
"""Add a callback for data that comes on a specific port"""
logger.debug("Adding callback on port [%d] to [%s]", port, cb)
self.add_header_callback(cb, port, 0, 0xff, 0x0)
def remove_port_callback(self, port, cb):
"""Remove a callback for data that comes on a specific port"""
logger.debug("Removing callback on port [%d] to [%s]", port, cb)
for port_callback in self.cb:
if (port_callback[0] == port and port_callback[4] == cb):
self.cb.remove(port_callback)
def add_header_callback(self, cb, port, channel, port_mask=0xFF,
channel_mask=0xFF):
"""
Add a callback for a specific port/header callback with the
possibility to add a mask for channel and port for multiple
hits for same callback.
"""
self.cb.append([port, port_mask, channel, channel_mask, cb])
def run(self):
while(True):
if self.cf.link is None:
time.sleep(1)
continue
pk = self.cf.link.receive_packet(1)
if pk is None:
continue
#All-packet callbacks
self.cf.receivedPacket.call(pk)
found = False
for cb in self.cb:
if (cb[0] == pk.port & cb[1] and
cb[2] == pk.channel & cb[3]):
try:
cb[4](pk)
except Exception: # pylint: disable=W0703
# Disregard pylint warning since we want to catch all
# exceptions and we can't know what will happen in
# the callbacks.
import traceback
logger.warning("Exception while doing callback on port"
" [%d]\n\n%s", pk.port,
traceback.format_exc())
if (cb[0] != 0xFF):
found = True
if not found:
logger.warning("Got packet on header (%d,%d) but no callback "
"to handle it", pk.port, pk.channel)
|
gpl-2.0
|
XiaodunServerGroup/ddyedx
|
common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
|
7
|
62964
|
"""
Tests for the various pieces of the CombinedOpenEndedGrading system
OpenEndedChild
OpenEndedModule
"""
import json
import logging
import unittest
from datetime import datetime
from lxml import etree
from mock import Mock, MagicMock, ANY, patch
from pytz import UTC
from webob.multidict import MultiDict
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule
from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module
from xmodule.open_ended_grading_classes.grading_service_module import GradingServiceError
from xmodule.combined_open_ended_module import CombinedOpenEndedModule
from xmodule.modulestore import Location
from xmodule.tests import get_test_system, test_util_open_ended
from xmodule.progress import Progress
from xmodule.tests.test_util_open_ended import (
DummyModulestore, TEST_STATE_SA_IN,
MOCK_INSTANCE_STATE, TEST_STATE_SA, TEST_STATE_AI, TEST_STATE_AI2, TEST_STATE_AI2_INVALID,
TEST_STATE_SINGLE, TEST_STATE_PE_SINGLE, MockUploadedFile, INSTANCE_INCONSISTENT_STATE,
INSTANCE_INCONSISTENT_STATE2, INSTANCE_INCONSISTENT_STATE3, INSTANCE_INCONSISTENT_STATE4,
INSTANCE_INCONSISTENT_STATE5
)
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
import capa.xqueue_interface as xqueue_interface
log = logging.getLogger(__name__)
ORG = 'edX'
COURSE = 'open_ended' # name of directory with course data
class OpenEndedChildTest(unittest.TestCase):
"""
Test the open ended child class
"""
location = Location(["i4x", "edX", "sa_test", "selfassessment",
"SampleQuestion"])
metadata = json.dumps({'attempts': '10'})
prompt = etree.XML("<prompt>This is a question prompt</prompt>")
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
<option>Second option</option>
</category>
</rubric></rubric>'''
max_score = 1
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': None,
's3_interface': "",
'open_ended_grading_interface': {},
'skip_basic_checks': False,
'control': {
'required_peer_grading': 1,
'peer_grader_count': 1,
'min_to_calibrate': 3,
'max_to_calibrate': 6,
'peer_grade_finished_submissions_when_none_pending': False,
}
}
definition = Mock()
descriptor = Mock()
def setUp(self):
self.test_system = get_test_system()
self.test_system.open_ended_grading_interface = None
self.openendedchild = OpenEndedChild(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
def test_latest_answer_empty(self):
answer = self.openendedchild.latest_answer()
self.assertEqual(answer, "")
def test_latest_score_empty(self):
answer = self.openendedchild.latest_score()
self.assertEqual(answer, None)
def test_latest_post_assessment_empty(self):
answer = self.openendedchild.latest_post_assessment(self.test_system)
self.assertEqual(answer, "")
def test_new_history_entry(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
answer = self.openendedchild.latest_answer()
self.assertEqual(answer, new_answer)
new_answer = "Newer Answer"
self.openendedchild.new_history_entry(new_answer)
answer = self.openendedchild.latest_answer()
self.assertEqual(new_answer, answer)
def test_record_latest_score(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
new_score = 3
self.openendedchild.record_latest_score(new_score)
score = self.openendedchild.latest_score()
self.assertEqual(score, 3)
new_score = 4
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(new_score)
score = self.openendedchild.latest_score()
self.assertEqual(score, 4)
def test_record_latest_post_assessment(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
post_assessment = "Post assessment"
self.openendedchild.record_latest_post_assessment(post_assessment)
self.assertEqual(post_assessment,
self.openendedchild.latest_post_assessment(self.test_system))
def test_get_score(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
score = self.openendedchild.get_score()
self.assertEqual(score['score'], 0)
self.assertEqual(score['total'], self.static_data['max_score'])
new_score = 4
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(new_score)
score = self.openendedchild.get_score()
self.assertEqual(score['score'], new_score)
self.assertEqual(score['total'], self.static_data['max_score'])
def test_reset(self):
self.openendedchild.reset(self.test_system)
state = json.loads(self.openendedchild.get_instance_state())
self.assertEqual(state['child_state'], OpenEndedChild.INITIAL)
def test_is_last_response_correct(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(self.static_data['max_score'])
self.assertEqual(self.openendedchild.is_last_response_correct(),
'correct')
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(0)
self.assertEqual(self.openendedchild.is_last_response_correct(),
'incorrect')
class OpenEndedModuleTest(unittest.TestCase):
"""
Test the open ended module class
"""
location = Location(["i4x", "edX", "sa_test", "selfassessment",
"SampleQuestion"])
metadata = json.dumps({'attempts': '10'})
prompt = etree.XML("<prompt>This is a question prompt</prompt>")
rubric = etree.XML('''<rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
</category>
</rubric>''')
max_score = 4
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': None,
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'control': {
'required_peer_grading': 1,
'peer_grader_count': 1,
'min_to_calibrate': 3,
'max_to_calibrate': 6,
'peer_grade_finished_submissions_when_none_pending': False,
}
}
oeparam = etree.XML('''
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
''')
definition = {'oeparam': oeparam}
descriptor = Mock()
feedback = {
"success": True,
"feedback": "Grader Feedback"
}
single_score_msg = {
'correct': True,
'score': 4,
'msg': 'Grader Message',
'feedback': json.dumps(feedback),
'grader_type': 'IN',
'grader_id': '1',
'submission_id': '1',
'success': True,
'rubric_scores': [0],
'rubric_scores_complete': True,
'rubric_xml': etree.tostring(rubric)
}
multiple_score_msg = {
'correct': True,
'score': [0, 1],
'msg': 'Grader Message',
'feedback': [json.dumps(feedback), json.dumps(feedback)],
'grader_type': 'PE',
'grader_id': ['1', '2'],
'submission_id': '1',
'success': True,
'rubric_scores': [[0], [0]],
'rubric_scores_complete': [True, True],
'rubric_xml': [etree.tostring(rubric), etree.tostring(rubric)]
}
def setUp(self):
self.test_system = get_test_system()
self.test_system.open_ended_grading_interface = None
self.test_system.location = self.location
self.mock_xqueue = MagicMock()
self.mock_xqueue.send_to_queue.return_value = (0, "Queued")
def constructed_callback(dispatch="score_update"):
return dispatch
self.test_system.xqueue = {'interface': self.mock_xqueue, 'construct_callback': constructed_callback,
'default_queuename': 'testqueue',
'waittime': 1}
self.openendedmodule = OpenEndedModule(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
def test_message_post(self):
"""Test message_post() sends feedback to xqueue."""
submission_time = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
feedback_post = {
'feedback': 'feedback text',
'submission_id': '1',
'grader_id': '1',
'score': 3
}
result = self.openendedmodule.message_post(feedback_post, self.test_system)
self.assertTrue(result['success'])
# make sure it's actually sending something we want to the queue
mock_send_to_queue_body_arg = json.loads(self.mock_xqueue.send_to_queue.call_args[1]['body'])
self.assertEqual(mock_send_to_queue_body_arg['feedback'], feedback_post['feedback'])
self.assertEqual(mock_send_to_queue_body_arg['submission_id'], int(feedback_post['submission_id']))
self.assertEqual(mock_send_to_queue_body_arg['grader_id'], int(feedback_post['grader_id']))
self.assertEqual(mock_send_to_queue_body_arg['score'], feedback_post['score'])
body_arg_student_info = json.loads(mock_send_to_queue_body_arg['student_info'])
self.assertEqual(body_arg_student_info['anonymous_student_id'], self.test_system.anonymous_student_id)
self.assertGreaterEqual(body_arg_student_info['submission_time'], submission_time)
state = json.loads(self.openendedmodule.get_instance_state())
self.assertEqual(state['child_state'], OpenEndedModule.DONE)
def test_message_post_fail(self):
"""Test message_post() if unable to send feedback to xqueue."""
self.mock_xqueue.send_to_queue.return_value = (1, "Not Queued")
feedback_post = {
'feedback': 'feedback text',
'submission_id': '1',
'grader_id': '1',
'score': 3
}
result = self.openendedmodule.message_post(feedback_post, self.test_system)
self.assertFalse(result['success'])
state = json.loads(self.openendedmodule.get_instance_state())
self.assertNotEqual(state['child_state'], OpenEndedModule.DONE)
def test_send_to_grader(self):
student_response = "This is a student submission"
submission_time = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
result, __ = self.openendedmodule.send_to_grader(student_response, self.test_system)
self.assertTrue(result)
mock_send_to_queue_body_arg = json.loads(self.mock_xqueue.send_to_queue.call_args[1]['body'])
self.assertEqual(mock_send_to_queue_body_arg['student_response'], student_response)
self.assertEqual(mock_send_to_queue_body_arg['max_score'], self.max_score)
body_arg_student_info = json.loads(mock_send_to_queue_body_arg['student_info'])
self.assertEqual(body_arg_student_info['anonymous_student_id'], self.test_system.anonymous_student_id)
self.assertGreaterEqual(body_arg_student_info['submission_time'], submission_time)
def test_send_to_grader_fail(self):
"""Test send_to_grader() if unable to send submission to xqueue."""
student_response = "This is a student submission"
self.mock_xqueue.send_to_queue.return_value = (1, "Not Queued")
result, __ = self.openendedmodule.send_to_grader(student_response, self.test_system)
self.assertFalse(result)
def test_save_answer_fail(self):
"""Test save_answer() if unable to send submission to grader."""
submission = "This is a student submission"
self.openendedmodule.send_to_grader = Mock(return_value=(False, "Failed"))
response = self.openendedmodule.save_answer(
{"student_answer": submission},
get_test_system()
)
self.assertFalse(response['success'])
self.assertNotEqual(self.openendedmodule.latest_answer(), submission)
self.assertEqual(self.openendedmodule.stored_answer, submission)
state = json.loads(self.openendedmodule.get_instance_state())
self.assertEqual(state['child_state'], OpenEndedModule.INITIAL)
self.assertEqual(state['stored_answer'], submission)
def update_score_single(self):
self.openendedmodule.new_history_entry("New Entry")
get = {'queuekey': "abcd",
'xqueue_body': json.dumps(self.single_score_msg)}
self.openendedmodule.update_score(get, self.test_system)
def update_score_multiple(self):
self.openendedmodule.new_history_entry("New Entry")
get = {'queuekey': "abcd",
'xqueue_body': json.dumps(self.multiple_score_msg)}
self.openendedmodule.update_score(get, self.test_system)
def test_latest_post_assessment(self):
self.update_score_single()
assessment = self.openendedmodule.latest_post_assessment(self.test_system)
self.assertFalse(assessment == '')
# check for errors
self.assertFalse('errors' in assessment)
def test_update_score_single(self):
self.update_score_single()
score = self.openendedmodule.latest_score()
self.assertEqual(score, 4)
def test_update_score_multiple(self):
"""
Tests that a score of [0, 1] gets aggregated to 1. A change in behavior added by @jbau
"""
self.update_score_multiple()
score = self.openendedmodule.latest_score()
self.assertEquals(score, 1)
@patch('xmodule.open_ended_grading_classes.open_ended_module.log.error')
def test_update_score_nohistory(self, error_logger):
"""
Tests error handling when there is no child_history
"""
# NOTE that we are not creating any history items
get = {'queuekey': "abcd",
'xqueue_body': json.dumps(self.multiple_score_msg)}
error_msg = ("Trying to update score without existing studentmodule child_history:\n"
" location: i4x://edX/sa_test/selfassessment/SampleQuestion\n"
" score: 1\n"
" grader_ids: [u'1', u'2']\n"
" submission_ids: [u'1', u'1']")
self.openendedmodule.update_score(get, self.test_system)
(msg,), _ = error_logger.call_args
self.assertTrue(error_logger.called)
self.assertEqual(msg, error_msg)
def test_open_ended_display(self):
"""
Test storing answer with the open ended module.
"""
# Create a module with no state yet. Important that this start off as a blank slate.
test_module = OpenEndedModule(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
saved_response = "Saved response."
submitted_response = "Submitted response."
# Initially, there will be no stored answer.
self.assertEqual(test_module.stored_answer, None)
# And the initial answer to display will be an empty string.
self.assertEqual(test_module.get_display_answer(), "")
# Now, store an answer in the module.
test_module.handle_ajax("store_answer", {'student_answer': saved_response}, get_test_system())
# The stored answer should now equal our response.
self.assertEqual(test_module.stored_answer, saved_response)
self.assertEqual(test_module.get_display_answer(), saved_response)
# Mock out the send_to_grader function so it doesn't try to connect to the xqueue.
test_module.send_to_grader = Mock(return_value=(True, "Success"))
# Submit a student response to the question.
test_module.handle_ajax(
"save_answer",
{"student_answer": submitted_response},
get_test_system()
)
# Submitting an answer should clear the stored answer.
self.assertEqual(test_module.stored_answer, None)
# Confirm that the answer is stored properly.
self.assertEqual(test_module.latest_answer(), submitted_response)
def test_parse_score_msg(self):
"""
Test _parse_score_msg with empty dict.
"""
assessment = self.openendedmodule._parse_score_msg("{}", self.test_system)
self.assertEqual(assessment.get("valid"), False)
class CombinedOpenEndedModuleTest(unittest.TestCase):
"""
Unit tests for the combined open ended xmodule
"""
location = Location(["i4x", "edX", "open_ended", "combinedopenended",
"SampleQuestion"])
definition_template = """
<combinedopenended attempts="10000">
{rubric}
{prompt}
<task>
{task1}
</task>
<task>
{task2}
</task>
</combinedopenended>
"""
prompt = "<prompt>This is a question prompt</prompt>"
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
<option>Second option</option>
</category>
</rubric></rubric>'''
max_score = 1
metadata = {'attempts': '10', 'max_score': max_score}
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': "",
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'graded': True,
}
oeparam = etree.XML('''
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
''')
task_xml1 = '''
<selfassessment>
<hintprompt>
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
'''
task_xml2 = '''
<openended min_score_to_attempt="1" max_score_to_attempt="1">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>'''
definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]}
full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2)
descriptor = Mock(data=full_definition)
test_system = get_test_system()
test_system.open_ended_grading_interface = None
combinedoe_container = CombinedOpenEndedModule(
descriptor=descriptor,
runtime=test_system,
field_data=DictFieldData({
'data': full_definition,
'weight': '1',
}),
scope_ids=ScopeIds(None, None, None, None),
)
def setUp(self):
self.combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=self.static_data)
def test_get_tag_name(self):
"""
Test to see if the xml tag name is correct
"""
name = self.combinedoe.get_tag_name("<t>Tag</t>")
self.assertEqual(name, "t")
def test_get_last_response(self):
"""
See if we can parse the last response
"""
response_dict = self.combinedoe.get_last_response(0)
self.assertEqual(response_dict['type'], "selfassessment")
self.assertEqual(response_dict['max_score'], self.max_score)
self.assertEqual(response_dict['state'], CombinedOpenEndedV1Module.INITIAL)
def test_create_task(self):
combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2])
first_task = combinedoe.create_task(combinedoe.task_states[0], combinedoe.task_xml[0])
self.assertIsInstance(first_task, SelfAssessmentModule)
second_task = combinedoe.create_task(combinedoe.task_states[1], combinedoe.task_xml[1])
self.assertIsInstance(second_task, OpenEndedModule)
def test_get_task_number(self):
combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2])
first_task = combinedoe.get_task_number(0)
self.assertIsInstance(first_task, SelfAssessmentModule)
second_task = combinedoe.get_task_number(1)
self.assertIsInstance(second_task, OpenEndedModule)
third_task = combinedoe.get_task_number(2)
self.assertIsNone(third_task)
def test_update_task_states(self):
"""
See if we can update the task states properly
"""
changed = self.combinedoe.update_task_states()
self.assertFalse(changed)
current_task = self.combinedoe.current_task
current_task.change_state(CombinedOpenEndedV1Module.DONE)
changed = self.combinedoe.update_task_states()
self.assertTrue(changed)
def test_get_max_score(self):
"""
Try to get the max score of the problem
"""
self.combinedoe.update_task_states()
self.combinedoe.state = "done"
self.combinedoe.is_scored = True
max_score = self.combinedoe.max_score()
self.assertEqual(max_score, 1)
def test_container_get_max_score(self):
"""
See if we can get the max score from the actual xmodule
"""
# The progress view requires that this function be exposed
max_score = self.combinedoe_container.max_score()
self.assertEqual(max_score, None)
def test_container_get_progress(self):
"""
See if we can get the progress from the actual xmodule
"""
progress = self.combinedoe_container.max_score()
self.assertEqual(progress, None)
def test_get_progress(self):
"""
Test if we can get the correct progress from the combined open ended class
"""
self.combinedoe.update_task_states()
self.combinedoe.state = "done"
self.combinedoe.is_scored = True
progress = self.combinedoe.get_progress()
self.assertIsInstance(progress, Progress)
# progress._a is the score of the xmodule, which is 0 right now.
self.assertEqual(progress._a, 0)
# progress._b is the max_score (which is 1), divided by the weight (which is 1).
self.assertEqual(progress._b, 1)
def test_container_weight(self):
"""
Check the problem weight in the container
"""
weight = self.combinedoe_container.weight
self.assertEqual(weight, 1)
def test_container_child_weight(self):
"""
Test the class to see if it picks up the right weight
"""
weight = self.combinedoe_container.child_module.weight
self.assertEqual(weight, 1)
def test_get_score(self):
"""
See if scoring works
"""
score_dict = self.combinedoe.get_score()
self.assertEqual(score_dict['score'], 0)
self.assertEqual(score_dict['total'], 1)
def test_alternate_orderings(self):
"""
Try multiple ordering of definitions to see if the problem renders different steps correctly.
"""
t1 = self.task_xml1
t2 = self.task_xml2
xml_to_test = [[t1], [t2], [t1, t1], [t1, t2], [t2, t2], [t2, t1], [t1, t2, t1]]
for xml in xml_to_test:
definition = {'prompt': etree.XML(self.prompt), 'rubric': etree.XML(self.rubric), 'task_xml': xml}
descriptor = Mock(data=definition)
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=self.static_data)
changed = combinedoe.update_task_states()
self.assertFalse(changed)
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state={'task_states': TEST_STATE_SA})
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state={'task_states': TEST_STATE_SA_IN})
def test_get_score_realistic(self):
"""
Try to parse the correct score from a json instance state
"""
instance_state = json.loads(MOCK_INSTANCE_STATE)
rubric = """
<rubric>
<rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
<option>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option>
<option>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option>
<option>The response is correct, complete, and contains evidence of higher-order thinking.</option>
</category>
</rubric>
</rubric>
"""
definition = {'prompt': etree.XML(self.prompt), 'rubric': etree.XML(rubric),
'task_xml': [self.task_xml1, self.task_xml2]}
descriptor = Mock(data=definition)
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=instance_state)
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 15.0)
def generate_oe_module(self, task_state, task_number, task_xml):
"""
Return a combined open ended module with the specified parameters
"""
definition = {
'prompt': etree.XML(self.prompt),
'rubric': etree.XML(self.rubric),
'task_xml': task_xml
}
descriptor = Mock(data=definition)
module = Mock(scope_ids=Mock(usage_id='dummy-usage-id'))
instance_state = {'task_states': task_state, 'graded': True}
if task_number is not None:
instance_state.update({'current_task_number': task_number})
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=instance_state)
return combinedoe
def ai_state_reset(self, task_state, task_number=None):
"""
See if state is properly reset
"""
combinedoe = self.generate_oe_module(task_state, task_number, [self.task_xml2])
html = combinedoe.get_html()
self.assertIsInstance(html, basestring)
score = combinedoe.get_score()
if combinedoe.is_scored:
self.assertEqual(score['score'], 0)
else:
self.assertEqual(score['score'], None)
def ai_state_success(self, task_state, task_number=None, iscore=2, tasks=None):
"""
See if state stays the same
"""
if tasks is None:
tasks = [self.task_xml1, self.task_xml2]
combinedoe = self.generate_oe_module(task_state, task_number, tasks)
html = combinedoe.get_html()
self.assertIsInstance(html, basestring)
score = combinedoe.get_score()
self.assertEqual(int(score['score']), iscore)
def test_ai_state_reset(self):
self.ai_state_reset(TEST_STATE_AI)
def test_ai_state2_reset(self):
self.ai_state_reset(TEST_STATE_AI2)
def test_ai_invalid_state(self):
self.ai_state_reset(TEST_STATE_AI2_INVALID)
def test_ai_state_rest_task_number(self):
self.ai_state_reset(TEST_STATE_AI, task_number=2)
self.ai_state_reset(TEST_STATE_AI, task_number=5)
self.ai_state_reset(TEST_STATE_AI, task_number=1)
self.ai_state_reset(TEST_STATE_AI, task_number=0)
def test_ai_state_success(self):
self.ai_state_success(TEST_STATE_AI)
def test_state_single(self):
self.ai_state_success(TEST_STATE_SINGLE, iscore=12)
def test_state_pe_single(self):
self.ai_state_success(TEST_STATE_PE_SINGLE, iscore=0, tasks=[self.task_xml2])
class CombinedOpenEndedModuleConsistencyTest(unittest.TestCase):
"""
Unit tests for the combined open ended xmodule rubric scores consistency.
"""
# location, definition_template, prompt, rubric, max_score, metadata, oeparam, task_xml1, task_xml2
# All these variables are used to construct the xmodule descriptor.
location = Location(["i4x", "edX", "open_ended", "combinedopenended",
"SampleQuestion"])
definition_template = """
<combinedopenended attempts="10000">
{rubric}
{prompt}
<task>
{task1}
</task>
<task>
{task2}
</task>
</combinedopenended>
"""
prompt = "<prompt>This is a question prompt</prompt>"
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
<option>Second option</option>
</category>
</rubric></rubric>'''
max_score = 10
metadata = {'attempts': '10', 'max_score': max_score}
oeparam = etree.XML('''
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
''')
task_xml1 = '''
<selfassessment>
<hintprompt>
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
'''
task_xml2 = '''
<openended min_score_to_attempt="1" max_score_to_attempt="10">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>'''
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': "",
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'graded': True,
}
definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]}
full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2)
descriptor = Mock(data=full_definition)
test_system = get_test_system()
test_system.open_ended_grading_interface = None
combinedoe_container = CombinedOpenEndedModule(
descriptor=descriptor,
runtime=test_system,
field_data=DictFieldData({
'data': full_definition,
'weight': '1',
}),
scope_ids=ScopeIds(None, None, None, None),
)
def setUp(self):
self.combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE))
def test_get_score(self):
"""
If grader type is ML score should be updated from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
score_dict = self.combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)
def test_get_score_with_pe_grader(self):
"""
If grader type is PE score should not be updated from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE2))
score_dict = combinedoe.get_score()
self.assertNotEqual(score_dict['score'], 15.0)
def test_get_score_with_different_score_value_in_rubric(self):
"""
If grader type is ML score should be updated from rubric scores. Aggregate rubric scores = sum([5])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE3))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 25.0)
self.assertEqual(score_dict['total'], 5.0)
def test_get_score_with_old_task_states(self):
"""
If grader type is ML and old_task_states are present in instance inconsistent state score should be updated
from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE4))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)
def test_get_score_with_score_missing(self):
"""
If grader type is ML and score field is missing in instance inconsistent state score should be updated from
rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE5))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)
class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
"""
Test the student flow in the combined open ended xmodule
"""
problem_location = Location(["i4x", "edX", "open_ended", "combinedopenended", "SampleQuestion"])
answer = "blah blah"
assessment = [0, 1]
hint = "blah"
def get_module_system(self, descriptor):
def construct_callback(dispatch="score_update"):
return dispatch
test_system = get_test_system()
test_system.open_ended_grading_interface = None
test_system.xqueue['interface'] = Mock(
send_to_queue=Mock(return_value=(0, "Queued"))
)
test_system.xqueue['construct_callback'] = construct_callback
return test_system
def setUp(self):
self.setup_modulestore(COURSE)
def _handle_ajax(self, dispatch, content):
# Load the module from persistence
module = self._module()
# Call handle_ajax on the module
result = module.handle_ajax(dispatch, content)
# Persist the state
module.save()
return result
def _module(self):
return self.get_module_from_location(self.problem_location, COURSE)
def test_open_ended_load_and_save(self):
"""
See if we can load the module and save an answer
@return:
"""
# Try saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(task_one_json['child_history'][0]['answer'], self.answer)
def test_open_ended_flow_reset(self):
"""
Test the flow of the module if we complete the self assessment step and then reset
@return:
"""
assessment = [0, 1]
# Simulate a student saving an answer
self._handle_ajax("get_html", {})
self._handle_ajax("save_answer", {"student_answer": self.answer})
self._handle_ajax("get_html", {})
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
self._handle_ajax("save_assessment", assessment_dict)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
self._handle_ajax("get_combined_rubric", {})
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 0)
html = self._module().render('student_view').content
self.assertIsInstance(html, basestring)
rubric = self._handle_ajax("get_combined_rubric", {})
self.assertIsInstance(rubric, basestring)
self.assertEqual(self._module().state, "assessing")
self._handle_ajax("reset", {})
self.assertEqual(self._module().current_task_number, 0)
def test_open_ended_flow_with_xqueue_failure(self):
"""
Test a two step problem where the student first goes through the self assessment step, and then the
open ended step with the xqueue failing in the first step.
"""
assessment = [1, 1]
# Simulate a student saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
status = self._handle_ajax("get_status", {})
self.assertIsInstance(status, basestring)
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
mock_xqueue_interface = Mock(
send_to_queue=Mock(return_value=(1, "Not Queued"))
)
# Call handle_ajax on the module with xqueue down
module = self._module()
with patch.dict(module.xmodule_runtime.xqueue, {'interface': mock_xqueue_interface}):
module.handle_ajax("save_assessment", assessment_dict)
self.assertEqual(module.current_task_number, 1)
self.assertTrue((module.child_module.get_task_number(1).child_created))
module.save()
# Check that next time the OpenEndedModule is loaded it calls send_to_grader
with patch.object(OpenEndedModule, 'send_to_grader') as mock_send_to_grader:
mock_send_to_grader.return_value = (False, "Not Queued")
module = self._module().child_module.get_score()
self.assertTrue(mock_send_to_grader.called)
self.assertTrue((self._module().child_module.get_task_number(1).child_created))
# Loading it this time should send submission to xqueue correctly
self.assertFalse((self._module().child_module.get_task_number(1).child_created))
self.assertEqual(self._module().current_task_number, 1)
self.assertEqual(self._module().state, OpenEndedChild.ASSESSING)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 1)
self._module().render('student_view')
# Try to get the rubric from the module
self._handle_ajax("get_combined_rubric", {})
self.assertEqual(self._module().state, OpenEndedChild.ASSESSING)
# Make a fake reply from the queue
queue_reply = {
'queuekey': "",
'xqueue_body': json.dumps({
'score': 0,
'feedback': json.dumps({"spelling": "Spelling: Ok.", "grammar": "Grammar: Ok.",
"markup-text": " all of us can think of a book that we hope none of our children or any other children have taken off the shelf . but if i have the right to remove that book from the shelf that work i abhor then you also have exactly the same right and so does everyone else . and then we <bg>have no books left</bg> on the shelf for any of us . <bs>katherine</bs> <bs>paterson</bs> , author write a persuasive essay to a newspaper reflecting your vies on censorship <bg>in libraries . do</bg> you believe that certain materials , such as books , music , movies , magazines , <bg>etc . , should be</bg> removed from the shelves if they are found <bg>offensive ? support your</bg> position with convincing arguments from your own experience , observations <bg>, and or reading .</bg> "}),
'grader_type': "ML",
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_xml': "<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>",
'rubric_scores_complete': True,
})
}
self._handle_ajax("check_for_score", {})
# Update the module with the fake queue reply
self._handle_ajax("score_update", queue_reply)
module = self._module()
self.assertFalse(module.ready_to_reset)
self.assertEqual(module.current_task_number, 1)
# Get html and other data client will request
module.render('student_view')
self._handle_ajax("skip_post_assessment", {})
# Get all results
self._handle_ajax("get_combined_rubric", {})
# reset the problem
self._handle_ajax("reset", {})
self.assertEqual(self._module().state, "initial")
def test_open_ended_flow_correct(self):
"""
Test a two step problem where the student first goes through the self assessment step, and then the
open ended step.
@return:
"""
assessment = [1, 1]
# Simulate a student saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
status = self._handle_ajax("get_status", {})
self.assertIsInstance(status, basestring)
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
self._handle_ajax("save_assessment", assessment_dict)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 1)
self._module().render('student_view')
# Try to get the rubric from the module
self._handle_ajax("get_combined_rubric", {})
# Make a fake reply from the queue
queue_reply = {
'queuekey': "",
'xqueue_body': json.dumps({
'score': 0,
'feedback': json.dumps({"spelling": "Spelling: Ok.", "grammar": "Grammar: Ok.",
"markup-text": " all of us can think of a book that we hope none of our children or any other children have taken off the shelf . but if i have the right to remove that book from the shelf that work i abhor then you also have exactly the same right and so does everyone else . and then we <bg>have no books left</bg> on the shelf for any of us . <bs>katherine</bs> <bs>paterson</bs> , author write a persuasive essay to a newspaper reflecting your vies on censorship <bg>in libraries . do</bg> you believe that certain materials , such as books , music , movies , magazines , <bg>etc . , should be</bg> removed from the shelves if they are found <bg>offensive ? support your</bg> position with convincing arguments from your own experience , observations <bg>, and or reading .</bg> "}),
'grader_type': "ML",
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_xml': "<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>",
'rubric_scores_complete': True,
})
}
self._handle_ajax("check_for_score", {})
# Update the module with the fake queue reply
self._handle_ajax("score_update", queue_reply)
module = self._module()
self.assertFalse(module.ready_to_reset)
self.assertEqual(module.current_task_number, 1)
# Get html and other data client will request
module.render('student_view')
self._handle_ajax("skip_post_assessment", {})
# Get all results
self._handle_ajax("get_combined_rubric", {})
# reset the problem
self._handle_ajax("reset", {})
self.assertEqual(self._module().state, "initial")
class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore):
"""
Test if student is able to reset the problem
"""
problem_location = Location(["i4x", "edX", "open_ended", "combinedopenended", "SampleQuestion1Attempt"])
answer = "blah blah"
assessment = [0, 1]
hint = "blah"
def get_module_system(self, descriptor):
test_system = get_test_system()
test_system.open_ended_grading_interface = None
test_system.xqueue['interface'] = Mock(
send_to_queue=Mock(return_value=(0, "Queued"))
)
return test_system
def setUp(self):
self.setup_modulestore(COURSE)
def _handle_ajax(self, dispatch, content):
# Load the module from persistence
module = self._module()
# Call handle_ajax on the module
result = module.handle_ajax(dispatch, content)
# Persist the state
module.save()
return result
def _module(self):
return self.get_module_from_location(self.problem_location, COURSE)
def test_reset_fail(self):
"""
Test the flow of the module if we complete the self assessment step and then reset
Since the problem only allows one attempt, should fail.
@return:
"""
assessment = [0, 1]
# Simulate a student saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
self._handle_ajax("save_assessment", assessment_dict)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 0)
html = self._module().render('student_view').content
self.assertIsInstance(html, basestring)
# Module should now be done
rubric = self._handle_ajax("get_combined_rubric", {})
self.assertIsInstance(rubric, basestring)
self.assertEqual(self._module().state, "done")
# Try to reset, should fail because only 1 attempt is allowed
reset_data = json.loads(self._handle_ajax("reset", {}))
self.assertEqual(reset_data['success'], False)
class OpenEndedModuleXmlImageUploadTest(unittest.TestCase, DummyModulestore):
"""
Test if student is able to upload images properly.
"""
problem_location = Location(["i4x", "edX", "open_ended", "combinedopenended", "SampleQuestionImageUpload"])
answer_text = "Hello, this is my amazing answer."
file_text = "Hello, this is my amazing file."
file_name = "Student file 1"
answer_link = "http://www.edx.org"
autolink_tag = "<a href="
def get_module_system(self, descriptor):
test_system = get_test_system()
test_system.open_ended_grading_interface = None
test_system.s3_interface = test_util_open_ended.S3_INTERFACE
test_system.xqueue['interface'] = Mock(
send_to_queue=Mock(return_value=(0, "Queued"))
)
return test_system
def setUp(self):
self.setup_modulestore(COURSE)
def test_file_upload_fail(self):
"""
Test to see if a student submission without a file attached fails.
"""
module = self.get_module_from_location(self.problem_location, COURSE)
# Simulate a student saving an answer
response = module.handle_ajax("save_answer", {"student_answer": self.answer_text})
response = json.loads(response)
self.assertFalse(response['success'])
self.assertIn('error', response)
@patch(
'xmodule.open_ended_grading_classes.openendedchild.S3Connection',
test_util_open_ended.MockS3Connection
)
@patch(
'xmodule.open_ended_grading_classes.openendedchild.Key',
test_util_open_ended.MockS3Key
)
def test_file_upload_success(self):
"""
Test to see if a student submission with a file is handled properly.
"""
module = self.get_module_from_location(self.problem_location, COURSE)
# Simulate a student saving an answer with a file
response = module.handle_ajax("save_answer", {
"student_answer": self.answer_text,
"valid_files_attached": True,
"student_file": [MockUploadedFile(self.file_name, self.file_text)],
})
response = json.loads(response)
self.assertTrue(response['success'])
self.assertIn(self.file_name, response['student_response'])
self.assertIn(self.autolink_tag, response['student_response'])
def test_link_submission_success(self):
"""
Students can submit links instead of files. Check that the link is properly handled.
"""
module = self.get_module_from_location(self.problem_location, COURSE)
# Simulate a student saving an answer with a link.
response = module.handle_ajax("save_answer", {
"student_answer": "{0} {1}".format(self.answer_text, self.answer_link)
})
response = json.loads(response)
self.assertTrue(response['success'])
self.assertIn(self.answer_link, response['student_response'])
self.assertIn(self.autolink_tag, response['student_response'])
class OpenEndedModuleUtilTest(unittest.TestCase):
"""
Tests for the util functions of OpenEndedModule. Currently just for the html_sanitizer and <br/> inserter
"""
script_dirty = u'<script>alert("xss!")</script>'
script_clean = u'alert("xss!")'
img_dirty = u'<img alt="cats" height="200" onclick="eval()" src="http://example.com/lolcats.jpg" width="200">'
img_clean = u'<img alt="cats" height="200" src="http://example.com/lolcats.jpg" width="200">'
embed_dirty = u'<embed height="200" id="cats" onhover="eval()" src="http://example.com/lolcats.swf" width="200"/>'
embed_clean = u'<embed height="200" id="cats" src="http://example.com/lolcats.swf" width="200">'
iframe_dirty = u'<iframe class="cats" height="200" onerror="eval()" src="http://example.com/lolcats" width="200"/>'
iframe_clean = u'<iframe class="cats" height="200" src="http://example.com/lolcats" width="200"></iframe>'
text = u'I am a \u201c\xfcber student\u201d'
text_lessthan_noencd = u'This used to be broken < by the other parser. 3>5'
text_lessthan_encode = u'This used to be broken < by the other parser. 3>5'
text_linebreaks = u"St\xfcdent submission:\nI like lamp."
text_brs = u"St\xfcdent submission:<br/>I like lamp."
link_text = u'I love going to www.lolcatz.com'
link_atag = u'I love going to <a href="http://www.lolcatz.com" target="_blank">www.lolcatz.com</a>'
def test_script(self):
"""
Basic test for stripping <script>
"""
self.assertEqual(OpenEndedChild.sanitize_html(self.script_dirty), self.script_clean)
def test_img(self):
"""
Basic test for passing through img, but stripping bad attr
"""
self.assertEqual(OpenEndedChild.sanitize_html(self.img_dirty), self.img_clean)
def test_embed(self):
"""
Basic test for passing through embed, but stripping bad attr
"""
self.assertEqual(OpenEndedChild.sanitize_html(self.embed_dirty), self.embed_clean)
def test_iframe(self):
"""
Basic test for passing through iframe, but stripping bad attr
"""
self.assertEqual(OpenEndedChild.sanitize_html(self.iframe_dirty), self.iframe_clean)
def test_text(self):
"""
Test for passing through text unchanged, including unicode
"""
self.assertEqual(OpenEndedChild.sanitize_html(self.text), self.text)
def test_lessthan(self):
"""
Tests that `<` in text context is handled properly
"""
self.assertEqual(OpenEndedChild.sanitize_html(self.text_lessthan_noencd), self.text_lessthan_encode)
def test_linebreaks(self):
"""
tests the replace_newlines function
"""
self.assertEqual(OpenEndedChild.replace_newlines(self.text_linebreaks), self.text_brs)
def test_linkify(self):
"""
tests the replace_newlines function
"""
self.assertEqual(OpenEndedChild.sanitize_html(self.link_text), self.link_atag)
def test_combined(self):
"""
tests a combination of inputs
"""
test_input = u"{}\n{}\n{}\n\n{}{}\n{}".format(self.link_text,
self.text,
self.script_dirty,
self.embed_dirty,
self.text_lessthan_noencd,
self.img_dirty)
test_output = u"{}<br/>{}<br/>{}<br/><br/>{}{}<br/>{}".format(self.link_atag,
self.text,
self.script_clean,
self.embed_clean,
self.text_lessthan_encode,
self.img_clean)
self.assertEqual(OpenEndedChild.sanitize_html(test_input), test_output)
|
agpl-3.0
|
dkalashnik/tempest
|
tempest/tests/services/compute/test_tenant_networks_client.py
|
2
|
2912
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslo_serialization import jsonutils as json
from oslotest import mockpatch
from tempest.services.compute.json import tenant_networks_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestTenantNetworksClient(base.TestCase):
FAKE_NETWORK = {
"cidr": "None",
"id": "c2329eb4-cc8e-4439-ac4c-932369309e36",
"label": u'\u30d7'
}
FAKE_NETWORKS = [FAKE_NETWORK]
NETWORK_ID = FAKE_NETWORK['id']
def setUp(self):
super(TestTenantNetworksClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = tenant_networks_client.TenantNetworksClient(
fake_auth, 'compute', 'regionOne')
def _test_list_tenant_networks(self, bytes_body=False):
serialized_body = json.dumps({"networks": self.FAKE_NETWORKS})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.list_tenant_networks()
self.assertEqual({"networks": self.FAKE_NETWORKS}, resp)
def test_list_tenant_networks_with_str_body(self):
self._test_list_tenant_networks()
def test_list_tenant_networks_with_bytes_body(self):
self._test_list_tenant_networks(bytes_body=True)
def _test_show_tenant_network(self, bytes_body=False):
serialized_body = json.dumps({"network": self.FAKE_NETWORK})
if bytes_body:
serialized_body = serialized_body.encode('utf-8')
mocked_resp = (httplib2.Response({'status': 200}), serialized_body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=mocked_resp))
resp = self.client.show_tenant_network(self.NETWORK_ID)
self.assertEqual({"network": self.FAKE_NETWORK}, resp)
def test_show_tenant_network_with_str_body(self):
self._test_show_tenant_network()
def test_show_tenant_network_with_bytes_body(self):
self._test_show_tenant_network(bytes_body=True)
|
apache-2.0
|
janlindstrom/percona-xtrabackup
|
storage/innobase/xtrabackup/test/python/subunit/chunked.py
|
64
|
7181
|
#
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
# Copyright (C) 2011 Martin Pool <mbp@sourcefrog.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Encoder/decoder for http style chunked encoding."""
from testtools.compat import _b
empty = _b('')
class Decoder(object):
"""Decode chunked content to a byte stream."""
def __init__(self, output, strict=True):
"""Create a decoder decoding to output.
:param output: A file-like object. Bytes written to the Decoder are
decoded to strip off the chunking and written to the output.
Up to a full write worth of data or a single control line may be
buffered (whichever is larger). The close method should be called
when no more data is available, to detect short streams; the
write method will return none-None when the end of a stream is
detected. The output object must accept bytes objects.
:param strict: If True (the default), the decoder will not knowingly
accept input that is not conformant to the HTTP specification.
(This does not imply that it will catch every nonconformance.)
If False, it will accept incorrect input that is still
unambiguous.
"""
self.output = output
self.buffered_bytes = []
self.state = self._read_length
self.body_length = 0
self.strict = strict
self._match_chars = _b("0123456789abcdefABCDEF\r\n")
self._slash_n = _b('\n')
self._slash_r = _b('\r')
self._slash_rn = _b('\r\n')
self._slash_nr = _b('\n\r')
def close(self):
"""Close the decoder.
:raises ValueError: If the stream is incomplete ValueError is raised.
"""
if self.state != self._finished:
raise ValueError("incomplete stream")
def _finished(self):
"""Finished reading, return any remaining bytes."""
if self.buffered_bytes:
buffered_bytes = self.buffered_bytes
self.buffered_bytes = []
return empty.join(buffered_bytes)
else:
raise ValueError("stream is finished")
def _read_body(self):
"""Pass body bytes to the output."""
while self.body_length and self.buffered_bytes:
if self.body_length >= len(self.buffered_bytes[0]):
self.output.write(self.buffered_bytes[0])
self.body_length -= len(self.buffered_bytes[0])
del self.buffered_bytes[0]
# No more data available.
if not self.body_length:
self.state = self._read_length
else:
self.output.write(self.buffered_bytes[0][:self.body_length])
self.buffered_bytes[0] = \
self.buffered_bytes[0][self.body_length:]
self.body_length = 0
self.state = self._read_length
return self.state()
def _read_length(self):
"""Try to decode a length from the bytes."""
count_chars = []
for bytes in self.buffered_bytes:
for pos in range(len(bytes)):
byte = bytes[pos:pos+1]
if byte not in self._match_chars:
break
count_chars.append(byte)
if byte == self._slash_n:
break
if not count_chars:
return
if count_chars[-1] != self._slash_n:
return
count_str = empty.join(count_chars)
if self.strict:
if count_str[-2:] != self._slash_rn:
raise ValueError("chunk header invalid: %r" % count_str)
if self._slash_r in count_str[:-2]:
raise ValueError("too many CRs in chunk header %r" % count_str)
self.body_length = int(count_str.rstrip(self._slash_nr), 16)
excess_bytes = len(count_str)
while excess_bytes:
if excess_bytes >= len(self.buffered_bytes[0]):
excess_bytes -= len(self.buffered_bytes[0])
del self.buffered_bytes[0]
else:
self.buffered_bytes[0] = self.buffered_bytes[0][excess_bytes:]
excess_bytes = 0
if not self.body_length:
self.state = self._finished
if not self.buffered_bytes:
# May not call into self._finished with no buffered data.
return empty
else:
self.state = self._read_body
return self.state()
def write(self, bytes):
"""Decode bytes to the output stream.
:raises ValueError: If the stream has already seen the end of file
marker.
:returns: None, or the excess bytes beyond the end of file marker.
"""
if bytes:
self.buffered_bytes.append(bytes)
return self.state()
class Encoder(object):
"""Encode content to a stream using HTTP Chunked coding."""
def __init__(self, output):
"""Create an encoder encoding to output.
:param output: A file-like object. Bytes written to the Encoder
will be encoded using HTTP chunking. Small writes may be buffered
and the ``close`` method must be called to finish the stream.
"""
self.output = output
self.buffered_bytes = []
self.buffer_size = 0
def flush(self, extra_len=0):
"""Flush the encoder to the output stream.
:param extra_len: Increase the size of the chunk by this many bytes
to allow for a subsequent write.
"""
if not self.buffer_size and not extra_len:
return
buffered_bytes = self.buffered_bytes
buffer_size = self.buffer_size
self.buffered_bytes = []
self.buffer_size = 0
self.output.write(_b("%X\r\n" % (buffer_size + extra_len)))
if buffer_size:
self.output.write(empty.join(buffered_bytes))
return True
def write(self, bytes):
"""Encode bytes to the output stream."""
bytes_len = len(bytes)
if self.buffer_size + bytes_len >= 65536:
self.flush(bytes_len)
self.output.write(bytes)
else:
self.buffered_bytes.append(bytes)
self.buffer_size += bytes_len
def close(self):
"""Finish the stream. This does not close the output stream."""
self.flush()
self.output.write(_b("0\r\n"))
|
gpl-2.0
|
sillydan1/WhatEverEngine
|
openglcsharp/Lib/encodings/koi8_u.py
|
593
|
14018
|
""" Python Character Mapping Codec koi8_u generated from 'python-mappings/KOI8-U.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-u',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
u'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u2580' # 0x8B -> UPPER HALF BLOCK
u'\u2584' # 0x8C -> LOWER HALF BLOCK
u'\u2588' # 0x8D -> FULL BLOCK
u'\u258c' # 0x8E -> LEFT HALF BLOCK
u'\u2590' # 0x8F -> RIGHT HALF BLOCK
u'\u2591' # 0x90 -> LIGHT SHADE
u'\u2592' # 0x91 -> MEDIUM SHADE
u'\u2593' # 0x92 -> DARK SHADE
u'\u2320' # 0x93 -> TOP HALF INTEGRAL
u'\u25a0' # 0x94 -> BLACK SQUARE
u'\u2219' # 0x95 -> BULLET OPERATOR
u'\u221a' # 0x96 -> SQUARE ROOT
u'\u2248' # 0x97 -> ALMOST EQUAL TO
u'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
u'\xa0' # 0x9A -> NO-BREAK SPACE
u'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
u'\xb0' # 0x9C -> DEGREE SIGN
u'\xb2' # 0x9D -> SUPERSCRIPT TWO
u'\xb7' # 0x9E -> MIDDLE DOT
u'\xf7' # 0x9F -> DIVISION SIGN
u'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
u'\u0454' # 0xA4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u0456' # 0xA6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0xA7 -> CYRILLIC SMALL LETTER YI (UKRAINIAN)
u'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u0491' # 0xAD -> CYRILLIC SMALL LETTER UKRAINIAN GHE WITH UPTURN
u'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
u'\u0404' # 0xB4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u0406' # 0xB6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0407' # 0xB7 -> CYRILLIC CAPITAL LETTER YI (UKRAINIAN)
u'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u0490' # 0xBD -> CYRILLIC CAPITAL LETTER UKRAINIAN GHE WITH UPTURN
u'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa9' # 0xBF -> COPYRIGHT SIGN
u'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
u'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
u'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
u'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
u'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
u'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
u'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
u'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
u'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
u'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
u'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
u'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
u'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
u'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
u'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
u'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
u'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
u'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
u'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
u'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
u'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
u'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
u'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
u'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
u'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
u'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
u'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
u'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
u'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
u'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
u'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
u'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
u'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
apache-2.0
|
kleientertainment/ds_mod_tools
|
pkg/win32/Python27/Lib/codecs.py
|
84
|
36364
|
""" codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import __builtin__, sys
### Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError, why:
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = '\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = '\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = '\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = '\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = '\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
return self
def __repr__(self):
return "<%s.%s object for encoding %s at 0x%x>" % (self.__class__.__module__, self.__class__.__name__, self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can be
passed piece by piece to the encode() method. The IncrementalEncoder remembers
the state of the Encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the encoder.
"""
return 0
def setstate(self, state):
"""
Set the current state of the encoder. state must have been
returned by getstate().
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
self.buffer = "" # unencoded input that is kept between calls to encode()
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can be
passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Creates a IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the decoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).
"""
return (b"", 0)
def setstate(self, state):
"""
Set the current state of the decoder.
state must have been returned by getstate(). The effect of
setstate((b"", 0)) must be equivalent to reset().
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete byte
sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
self.buffer = "" # undecoded input that is kept between calls to decode()
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = ""
def getstate(self):
# additional state info is always 0
return (self.buffer, 0)
def setstate(self, state):
# ignore additional state info
self.buffer = state[0]
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing
(binary) data.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences (only for encoding).
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
if whence == 0 and offset == 0:
self.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading
(binary) data.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = ""
# For str->str decoding this will stay a str
# For str->unicode decoding the first read will promote it to unicode
self.charbuffer = ""
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = "".join(self.linebuffer)
self.linebuffer = None
# read until we get the required number of characters (if available)
while True:
# can the request can be satisfied from the character buffer?
if chars < 0:
if size < 0:
if self.charbuffer:
break
elif len(self.charbuffer) >= size:
break
else:
if len(self.charbuffer) >= chars:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError, exc:
if firstline:
newchars, decodedbytes = self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(True)
if len(lines)<=1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = ""
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(False)[0]
return line
readsize = size or 72
line = ""
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if data.endswith("\r"):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = "".join(lines[1:]) + self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(False)[0]
break
if readsize<8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as list of lines.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = ""
self.charbuffer = u""
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.stream.seek(offset, whence)
self.reset()
def next(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def next(self):
""" Return the next decoded line from the input stream."""
return self.reader.next()
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
self.reader.reset()
if whence == 0 and offset == 0:
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with codecs.open(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances provide a frontend - backend
view of encoding data.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the stream is first decoded into an
intermediate format (which is dependent on the given codec
combination) and then written to the stream using an instance
of the provided Writer class.
In the other direction, data is read from the stream using a
Reader instance and then return encoded data to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
input to .read() and output of .write()) while
Reader and Writer work on the backend (reading and
writing to the stream).
You can use these objects to do transparent direct
recodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode, decode must adhere to the Codec interface, Reader,
Writer must be factory functions or classes providing the
StreamReader, StreamWriter interface resp.
encode and decode are needed for the frontend translation,
Reader and Writer for the backend translation. Unicode is
used as intermediate encoding.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(1)
def next(self):
""" Return the next decoded line from the input stream."""
data = self.reader.next()
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Files are always opened in binary mode, even if no binary mode
was specified. This is done to avoid data loss due to encodings
using 8-bit values. The default file mode is 'rb' meaning to
open the file in binary read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to line buffered.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None:
if 'U' in mode:
# No automatic conversion of '\n' is done on reading and writing
mode = mode.strip().replace('U', '')
if mode[:1] not in set('rwa'):
mode = 'r' + mode
if 'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = __builtin__.open(filename, mode, buffering)
if encoding is None:
return file
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Strings written to the wrapped file are interpreted according
to the given data_encoding and then written to the original
file as string using file_encoding. The intermediate encoding
will usually be Unicode but depends on the specified codecs.
Strings are read from the file using file_encoding and then
passed back to the caller as string using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using a IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using a IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode("", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
res = {}
for i in rng:
res[i]=i
return res
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \u001a.
"""
m = {}
for k,v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
|
mit
|
eamuntz/Django-Tut
|
env/lib/python2.7/site-packages/django/core/management/commands/dbshell.py
|
329
|
1243
|
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
class Command(BaseCommand):
help = ("Runs the command-line client for specified database, or the "
"default database if none is provided.")
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database onto which to '
'open a shell. Defaults to the "default" database.'),
)
requires_model_validation = False
def handle(self, **options):
connection = connections[options.get('database')]
try:
connection.client.runshell()
except OSError:
# Note that we're assuming OSError means that the client program
# isn't installed. There's a possibility OSError would be raised
# for some other reason, in which case this error message would be
# inaccurate. Still, this message catches the common case.
raise CommandError('You appear not to have the %r program installed or on your path.' % \
connection.client.executable_name)
|
mit
|
Jackysonglanlan/devops
|
IDEs/sublime/shared-pkgs/Packages/pygments/all/pygments/lexers/nimrod.py
|
45
|
5105
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.nimrod
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Nimrod language.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['NimrodLexer']
class NimrodLexer(RegexLexer):
"""
For `Nimrod <http://nimrod-code.org/>`_ source code.
.. versionadded:: 1.5
"""
name = 'Nimrod'
aliases = ['nimrod', 'nim']
filenames = ['*.nim', '*.nimrod']
mimetypes = ['text/x-nimrod']
flags = re.MULTILINE | re.IGNORECASE | re.UNICODE
def underscorize(words):
newWords = []
new = ""
for word in words:
for ch in word:
new += (ch + "_?")
newWords.append(new)
new = ""
return "|".join(newWords)
keywords = [
'addr', 'and', 'as', 'asm', 'atomic', 'bind', 'block', 'break',
'case', 'cast', 'const', 'continue', 'converter', 'discard',
'distinct', 'div', 'elif', 'else', 'end', 'enum', 'except', 'finally',
'for', 'generic', 'if', 'implies', 'in', 'yield',
'is', 'isnot', 'iterator', 'lambda', 'let', 'macro', 'method',
'mod', 'not', 'notin', 'object', 'of', 'or', 'out', 'proc',
'ptr', 'raise', 'ref', 'return', 'shl', 'shr', 'template', 'try',
'tuple', 'type', 'when', 'while', 'with', 'without', 'xor'
]
keywordsPseudo = [
'nil', 'true', 'false'
]
opWords = [
'and', 'or', 'not', 'xor', 'shl', 'shr', 'div', 'mod', 'in',
'notin', 'is', 'isnot'
]
types = [
'int', 'int8', 'int16', 'int32', 'int64', 'float', 'float32', 'float64',
'bool', 'char', 'range', 'array', 'seq', 'set', 'string'
]
tokens = {
'root': [
(r'##.*$', String.Doc),
(r'#.*$', Comment),
(r'[*=><+\-/@$~&%!?|\\\[\]]', Operator),
(r'\.\.|\.|,|\[\.|\.\]|\{\.|\.\}|\(\.|\.\)|\{|\}|\(|\)|:|\^|`|;',
Punctuation),
# Strings
(r'(?:[\w]+)"', String, 'rdqs'),
(r'"""', String, 'tdqs'),
('"', String, 'dqs'),
# Char
("'", String.Char, 'chars'),
# Keywords
(r'(%s)\b' % underscorize(opWords), Operator.Word),
(r'(p_?r_?o_?c_?\s)(?![(\[\]])', Keyword, 'funcname'),
(r'(%s)\b' % underscorize(keywords), Keyword),
(r'(%s)\b' % underscorize(['from', 'import', 'include']),
Keyword.Namespace),
(r'(v_?a_?r)\b', Keyword.Declaration),
(r'(%s)\b' % underscorize(types), Keyword.Type),
(r'(%s)\b' % underscorize(keywordsPseudo), Keyword.Pseudo),
# Identifiers
(r'\b((?![_\d])\w)(((?!_)\w)|(_(?!_)\w))*', Name),
# Numbers
(r'[0-9][0-9_]*(?=([e.]|\'f(32|64)))',
Number.Float, ('float-suffix', 'float-number')),
(r'0x[a-f0-9][a-f0-9_]*', Number.Hex, 'int-suffix'),
(r'0b[01][01_]*', Number.Bin, 'int-suffix'),
(r'0o[0-7][0-7_]*', Number.Oct, 'int-suffix'),
(r'[0-9][0-9_]*', Number.Integer, 'int-suffix'),
# Whitespace
(r'\s+', Text),
(r'.+$', Error),
],
'chars': [
(r'\\([\\abcefnrtvl"\']|x[a-f0-9]{2}|[0-9]{1,3})', String.Escape),
(r"'", String.Char, '#pop'),
(r".", String.Char)
],
'strings': [
(r'(?<!\$)\$(\d+|#|\w+)+', String.Interpol),
(r'[^\\\'"$\n]+', String),
# quotes, dollars and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'\$', String)
# newlines are an error (use "nl" state)
],
'dqs': [
(r'\\([\\abcefnrtvl"\']|\n|x[a-f0-9]{2}|[0-9]{1,3})',
String.Escape),
(r'"', String, '#pop'),
include('strings')
],
'rdqs': [
(r'"(?!")', String, '#pop'),
(r'""', String.Escape),
include('strings')
],
'tdqs': [
(r'"""(?!")', String, '#pop'),
include('strings'),
include('nl')
],
'funcname': [
(r'((?![\d_])\w)(((?!_)\w)|(_(?!_)\w))*', Name.Function, '#pop'),
(r'`.+`', Name.Function, '#pop')
],
'nl': [
(r'\n', String)
],
'float-number': [
(r'\.(?!\.)[0-9_]*', Number.Float),
(r'e[+-]?[0-9][0-9_]*', Number.Float),
default('#pop')
],
'float-suffix': [
(r'\'f(32|64)', Number.Float),
default('#pop')
],
'int-suffix': [
(r'\'i(32|64)', Number.Integer.Long),
(r'\'i(8|16)', Number.Integer),
default('#pop')
],
}
|
mit
|
Flow8/angular-phonecat
|
node_modules/protractor/node_modules/accessibility-developer-tools/scripts/parse_aria_schemas.py
|
381
|
3069
|
import json
import re
import urllib
import xml.etree.ElementTree as ET
def parse_attributes():
schema = urllib.urlopen('http://www.w3.org/MarkUp/SCHEMA/aria-attributes-1.xsd')
tree = ET.parse(schema)
for node in tree.iter():
node.tag = re.sub(r'{.*}', r'', node.tag)
type_map = {
'states': 'state',
'props': 'property'
}
properties = {}
groups = tree.getroot().findall('attributeGroup')
print groups
for group in groups:
print(group.get('name'))
name_match = re.match(r'ARIA\.(\w+)\.attrib', group.get('name'))
if not name_match:
continue
group_type = name_match.group(1)
print group_type
if group_type not in type_map:
continue
type = type_map[group_type]
for child in group:
name = re.sub(r'aria-', r'', child.attrib['name'])
property = {}
property['type'] = type
if 'type' in child.attrib:
valueType = re.sub(r'xs:', r'', child.attrib['type'])
if valueType == 'IDREF':
property['valueType'] = 'idref'
elif valueType == 'IDREFS':
property['valueType'] = 'idref_list'
else:
property['valueType'] = valueType
else:
type_spec = child.findall('simpleType')[0]
restriction_spec = type_spec.findall('restriction')[0]
base = restriction_spec.attrib['base']
if base == 'xs:NMTOKENS':
property['valueType'] = 'token_list'
elif base == 'xs:NMTOKEN':
property['valueType'] = 'token'
else:
raise Exception('Unknown value type: %s' % base)
values = []
for value_type in restriction_spec:
values.append(value_type.get('value'))
property['values'] = values
if 'default' in child.attrib:
property['defaultValue'] = child.attrib['default']
properties[name] = property
return json.dumps(properties, sort_keys=True, indent=4, separators=(',', ': '))
if __name__ == "__main__":
attributes_json = parse_attributes()
constants_file = open('src/js/Constants.js', 'r')
new_constants_file = open('src/js/Constants.new.js', 'w')
in_autogen_block = False
for line in constants_file:
if not in_autogen_block:
new_constants_file.write('%s' % line)
if re.match(r'// BEGIN ARIA_PROPERTIES_AUTOGENERATED', line):
in_autogen_block = True
if re.match(r'// END ARIA_PROPERTIES_AUTOGENERATED', line):
break
new_constants_file.write('/** @type {Object.<string, Object>} */\n')
new_constants_file.write('axs.constants.ARIA_PROPERTIES = %s;\n' % attributes_json)
new_constants_file.write('// END ARIA_PROPERTIES_AUTOGENERATED\n')
for line in constants_file:
new_constants_file.write('%s' % line)
|
mit
|
vitmod/dvbapp
|
po/xml2po.py
|
40
|
1590
|
#!/usr/bin/python
import sys
import os
import string
import re
from xml.sax import make_parser
from xml.sax.handler import ContentHandler, property_lexical_handler
try:
from _xmlplus.sax.saxlib import LexicalHandler
no_comments = False
except ImportError:
class LexicalHandler:
pass
no_comments = True
class parseXML(ContentHandler, LexicalHandler):
def __init__(self, attrlist):
self.isPointsElement, self.isReboundsElement = 0, 0
self.attrlist = attrlist
self.last_comment = None
self.ishex = re.compile('#[0-9a-fA-F]+\Z')
def comment(self, comment):
if "TRANSLATORS:" in comment:
self.last_comment = comment
def startElement(self, name, attrs):
for x in ["text", "title", "value", "caption", "description"]:
try:
k = str(attrs[x])
if k.strip() != "" and not self.ishex.match(k):
attrlist.add((attrs[x], self.last_comment))
self.last_comment = None
except KeyError:
pass
parser = make_parser()
attrlist = set()
contentHandler = parseXML(attrlist)
parser.setContentHandler(contentHandler)
if not no_comments:
parser.setProperty(property_lexical_handler, contentHandler)
for arg in sys.argv[1:]:
if os.path.isdir(arg):
for file in os.listdir(arg):
if (file.endswith(".xml")):
parser.parse(os.path.join(arg, file))
else:
parser.parse(arg)
attrlist = list(attrlist)
attrlist.sort(key=lambda a: a[0])
for (k,c) in attrlist:
print
print '#: ' + arg
string.replace(k, "\\n", "\"\n\"")
if c:
for l in c.split('\n'):
print "#. ", l
print 'msgid "' + str(k) + '"'
print 'msgstr ""'
attrlist = set()
|
gpl-2.0
|
ppwwyyxx/tensorflow
|
tensorflow/python/keras/optimizer_v2/adamax_test.py
|
6
|
16435
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adamax."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.optimizer_v2 import adamax
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adamax_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
m_t = beta1 * m + (1 - beta1) * g_t
v_t = np.maximum(beta2 * v, np.abs(g_t))
param_t = param - (alpha / (1 - beta1**(t + 1))) * (m_t / (v_t + epsilon))
return param_t, m_t, v_t
def adamax_sparse_update_numpy(param,
indices,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
m_t, v_t, param_t = np.copy(m), np.copy(v), np.copy(param)
m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t
v_t_slice = np.maximum(beta2 * v[indices], np.abs(g_t))
param_t_slice = param[indices] - (
(alpha / (1 - beta1**(t + 1))) * (m_t_slice / (v_t_slice + epsilon)))
m_t[indices] = m_t_slice
v_t[indices] = v_t_slice
param_t[indices] = param_t_slice
return param_t, m_t, v_t
def get_beta_accumulators(opt, dtype):
local_step = math_ops.cast(opt.iterations + 1, dtype)
beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype)
beta_1_power = math_ops.pow(beta_1_t, local_step)
return beta_1_power
class AdamaxOptimizerTest(test.TestCase):
def doTestSparse(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype) # pylint: disable=cell-var-from-loop
m0, v0, m1, v1 = zero_slots(), zero_slots(), zero_slots(), zero_slots()
var0_np = np.array([1.0, 2.0, 3.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([4.0, 5.0, 6.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
grads1_np_indices = np.array([2, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
opt = adamax.Adamax()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0, 3.0], var0.eval())
self.assertAllClose([4.0, 5.0, 6.0], var1.eval())
beta1_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adamax
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
update.run()
var0_np, m0, v0 = adamax_sparse_update_numpy(
var0_np, grads0_np_indices, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_sparse_update_numpy(
var1_np, grads1_np_indices, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
@test_util.run_deprecated_v1
def testResourceSparse(self):
self.doTestSparse(use_resource=True)
@test_util.run_deprecated_v1
def testSparseDevicePlacement(self):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.cached_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop
optimizer = adamax.Adamax(3.0)
minimize_op = optimizer.minimize(g_sum, var_list=[var])
variables.global_variables_initializer().run()
minimize_op.run()
@test_util.run_deprecated_v1
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adamax.Adamax().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adamax.Adamax().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testBasic(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph(), use_gpu=True):
# Initialize variables for numpy implementation.
m0 = np.array([0.0, 0.0])
v0 = np.array([0.0, 0.0])
m1 = np.array([0.0, 0.0])
v1 = np.array([0.0, 0.0])
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.Adamax()
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of Adamax
for t in range(3):
beta_1_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0), rtol=1e-2)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1), rtol=1e-2)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testBasicWithLearningRateDecay(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.session(graph=ops.Graph(), use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
decay = 0.002
opt = adamax.Adamax(learning_rate=learning_rate, decay=decay)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of Adamax
for t in range(3):
beta_1_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
lr = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adamax_update_numpy(
var0_np, grads0_np, t, m0, v0, alpha=lr)
var1_np, m1, v1 = adamax_update_numpy(
var1_np, grads1_np, t, m1, v1, alpha=lr)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0),
rtol=1e-2)
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1),
rtol=1e-2)
@test_util.run_deprecated_v1
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.Adamax(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adamax
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
update.run()
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
@test_util.run_deprecated_v1
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.Adamax()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta1_power = get_beta_accumulators(opt, dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined Adamax1 and Adamax2.
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = adamax.Adamax(1.)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and v2.
self.assertEqual(5, len({id(v) for v in opt.variables()}))
def testConstructAdamaxWithLR(self):
opt = adamax.Adamax(lr=1.0)
opt_2 = adamax.Adamax(learning_rate=0.1, lr=1.0)
opt_3 = adamax.Adamax(learning_rate=0.1)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt_2.lr, variables.Variable)
self.assertIsInstance(opt_3.lr, variables.Variable)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
ruuk/script.bluray.com
|
lib/html5lib/ihatexml.py
|
129
|
15309
|
import re
baseChar = """[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | [#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] | [#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] | [#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 | [#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] | [#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] | [#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] | [#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] | [#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 | [#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] | [#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] | [#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D | [#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] | [#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] | [#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] | [#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] | [#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] | [#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] | [#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 | [#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] | [#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] | [#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] | [#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] | [#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] | [#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] | [#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] | [#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] | [#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] | [#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] | [#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A | #x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 | #x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] | #x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] | [#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] | [#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C | #x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 | [#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] | [#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] | [#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 | [#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] | [#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B | #x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE | [#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] | [#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 | [#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] | [#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
combiningCharacter = """[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] | [#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 | [#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] | [#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] | #x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] | [#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] | [#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 | #x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] | [#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC | [#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] | #x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] | [#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] | [#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] | [#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] | [#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] | [#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] | #x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 | [#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] | #x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] | [#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] | [#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] | #x3099 | #x309A"""
digit = """[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] | [#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] | [#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] | [#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
extender = """#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 | [#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
letter = " | ".join([baseChar, ideographic])
#Without the
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
extender])
nameFirst = " | ".join([letter, "_"])
reChar = re.compile(r"#x([\d|A-F]{4,4})")
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
def charStringToList(chars):
charRanges = [item.strip() for item in chars.split(" | ")]
rv = []
for item in charRanges:
foundMatch = False
for regexp in (reChar, reCharRange):
match = regexp.match(item)
if match is not None:
rv.append([hexToInt(item) for item in match.groups()])
if len(rv[-1]) == 1:
rv[-1] = rv[-1]*2
foundMatch = True
break
if not foundMatch:
assert len(item) == 1
rv.append([ord(item)] * 2)
rv = normaliseCharList(rv)
return rv
def normaliseCharList(charList):
charList = sorted(charList)
for item in charList:
assert item[1] >= item[0]
rv = []
i = 0
while i < len(charList):
j = 1
rv.append(charList[i])
while i + j < len(charList) and charList[i+j][0] <= rv[-1][1] + 1:
rv[-1][1] = charList[i+j][1]
j += 1
i += j
return rv
#We don't really support characters above the BMP :(
max_unicode = int("FFFF", 16)
def missingRanges(charList):
rv = []
if charList[0] != 0:
rv.append([0, charList[0][0] - 1])
for i, item in enumerate(charList[:-1]):
rv.append([item[1]+1, charList[i+1][0] - 1])
if charList[-1][1] != max_unicode:
rv.append([charList[-1][1] + 1, max_unicode])
return rv
def listToRegexpStr(charList):
rv = []
for item in charList:
if item[0] == item[1]:
rv.append(escapeRegexp(unichr(item[0])))
else:
rv.append(escapeRegexp(unichr(item[0])) + "-" +
escapeRegexp(unichr(item[1])))
return "[%s]"%"".join(rv)
def hexToInt(hex_str):
return int(hex_str, 16)
def escapeRegexp(string):
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
"[", "]", "|", "(", ")", "-")
for char in specialCharacters:
string = string.replace(char, "\\" + char)
if char in string:
print string
return string
#output from the above
nonXmlNameBMPRegexp = re.compile(u'[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
nonXmlNameFirstBMPRegexp = re.compile(u'[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
class InfosetFilter(object):
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
def __init__(self, replaceChars = None,
dropXmlnsLocalName = False,
dropXmlnsAttrNs = False,
preventDoubleDashComments = False,
preventDashAtCommentEnd = False,
replaceFormFeedCharacters = True):
self.dropXmlnsLocalName = dropXmlnsLocalName
self.dropXmlnsAttrNs = dropXmlnsAttrNs
self.preventDoubleDashComments = preventDoubleDashComments
self.preventDashAtCommentEnd = preventDashAtCommentEnd
self.replaceFormFeedCharacters = replaceFormFeedCharacters
self.replaceCache = {}
def coerceAttribute(self, name, namespace=None):
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
#Need a datalosswarning here
return None
elif (self.dropXmlnsAttrNs and
namespace == "http://www.w3.org/2000/xmlns/"):
return None
else:
return self.toXmlName(name)
def coerceElement(self, name, namespace=None):
return self.toXmlName(name)
def coerceComment(self, data):
if self.preventDoubleDashComments:
while "--" in data:
data = data.replace("--", "- -")
return data
def coerceCharacters(self, data):
if self.replaceFormFeedCharacters:
data = data.replace("\x0C", " ")
#Other non-xml characters
return data
def toXmlName(self, name):
nameFirst = name[0]
nameRest = name[1:]
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
if m:
nameFirstOutput = self.getReplacementCharacter(nameFirst)
else:
nameFirstOutput = nameFirst
nameRestOutput = nameRest
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
for char in replaceChars:
replacement = self.getReplacementCharacter(char)
nameRestOutput = nameRestOutput.replace(char, replacement)
return nameFirstOutput + nameRestOutput
def getReplacementCharacter(self, char):
if char in self.replaceCache:
replacement = self.replaceCache[char]
else:
replacement = self.escapeChar(char)
return replacement
def fromXmlName(self, name):
for item in set(self.replacementRegexp.findall(name)):
name = name.replace(item, self.unescapeChar(item))
return name
def escapeChar(self, char):
replacement = "U" + hex(ord(char))[2:].upper().rjust(5, "0")
self.replaceCache[char] = replacement
return replacement
def unescapeChar(self, charcode):
return unichr(int(charcode[1:], 16))
|
gpl-2.0
|
twisted/mantissa
|
xmantissa/test/historic/test_privateApplication3to4.py
|
1
|
3405
|
"""
Tests for the upgrade of L{PrivateApplication} schema from 3 to 4.
"""
from axiom.userbase import LoginSystem
from axiom.test.historic.stubloader import StubbedTest
from xmantissa.ixmantissa import ITemplateNameResolver, IWebViewer
from xmantissa.website import WebSite
from xmantissa.webapp import PrivateApplication
from xmantissa.publicweb import CustomizedPublicPage
from xmantissa.webgestalt import AuthenticationApplication
from xmantissa.prefs import PreferenceAggregator, DefaultPreferenceCollection
from xmantissa.search import SearchAggregator
from xmantissa.test.historic.stub_privateApplication3to4 import (
USERNAME, DOMAIN, PREFERRED_THEME, PRIVATE_KEY)
class PrivateApplicationUpgradeTests(StubbedTest):
"""
Tests for L{xmantissa.webapp.privateApplication3to4}.
"""
def setUp(self):
d = StubbedTest.setUp(self)
def siteStoreUpgraded(ignored):
loginSystem = self.store.findUnique(LoginSystem)
account = loginSystem.accountByAddress(USERNAME, DOMAIN)
self.subStore = account.avatars.open()
return self.subStore.whenFullyUpgraded()
d.addCallback(siteStoreUpgraded)
return d
def test_powerup(self):
"""
At version 4, L{PrivateApplication} should be an
L{ITemplateNameResolver} powerup on its store.
"""
application = self.subStore.findUnique(PrivateApplication)
powerups = list(self.subStore.powerupsFor(ITemplateNameResolver))
self.assertIn(application, powerups)
def test_webViewer(self):
"""
At version 5, L{PrivateApplication} should be an
L{IWebViewer} powerup on its store.
"""
application = self.subStore.findUnique(PrivateApplication)
interfaces = list(self.subStore.interfacesFor(application))
self.assertIn(IWebViewer, interfaces)
def test_attributes(self):
"""
All of the attributes of L{PrivateApplication} should have the same
values on the upgraded item as they did before the upgrade.
"""
application = self.subStore.findUnique(PrivateApplication)
self.assertEqual(application.preferredTheme, PREFERRED_THEME)
self.assertEqual(application.privateKey, PRIVATE_KEY)
website = self.subStore.findUnique(WebSite)
self.assertIdentical(application.website, website)
customizedPublicPage = self.subStore.findUnique(CustomizedPublicPage)
self.assertIdentical(
application.customizedPublicPage, customizedPublicPage)
authenticationApplication = self.subStore.findUnique(
AuthenticationApplication)
self.assertIdentical(
application.authenticationApplication, authenticationApplication)
preferenceAggregator = self.subStore.findUnique(PreferenceAggregator)
self.assertIdentical(
application.preferenceAggregator, preferenceAggregator)
defaultPreferenceCollection = self.subStore.findUnique(
DefaultPreferenceCollection)
self.assertIdentical(
application.defaultPreferenceCollection,
defaultPreferenceCollection)
searchAggregator = self.subStore.findUnique(SearchAggregator)
self.assertIdentical(application.searchAggregator, searchAggregator)
self.assertIdentical(application.privateIndexPage, None)
|
mit
|
Lemueler/Petro-UI
|
utils.py
|
1
|
10256
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Deepin, Inc.
# 2012 Kaisheng Ye
#
# Author: Kaisheng Ye <kaisheng.ye@gmail.com>
# Maintainer: Kaisheng Ye <kaisheng.ye@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from constant import DEFAULT_FONT, DEFAULT_FONT_SIZE
import gtk
import cairo
import pangocairo
import pango
import math
import socket
import os
import traceback
import sys
import datetime
from contextlib import contextmanager
from weather.Weather_info import Weather_info
import test
@contextmanager
def cairo_disable_antialias(cr):
# Save antialias.
antialias = cr.get_antialias()
cr.set_antialias(cairo.ANTIALIAS_NONE)
try:
yield
except Exception, e:
print 'function cairo_disable_antialias got error: %s' % e
traceback.print_exc(file=sys.stdout)
else:
# Restore antialias.
cr.set_antialias(antialias)
def cairo_popover(widget,
surface_context,
trayicon_x, trayicon_y,
trayicon_w, trayicon_h,
radius,
arrow_width, arrow_height, offs=0, pos_type=gtk.POS_TOP):
cr = surface_context
x = trayicon_x
y = trayicon_y
w = trayicon_w - trayicon_x * 2
h = trayicon_h - trayicon_x * 2
# set position top, bottom.
if pos_type == gtk.POS_BOTTOM:
y = y - arrow_height
h -= y
# draw.
cr.arc(x + radius,
y + arrow_height + radius,
radius,
math.pi,
math.pi * 1.5)
if pos_type == gtk.POS_TOP:
y_padding = y + arrow_height
arrow_height_padding = arrow_height
cr.line_to(offs, y_padding)
cr.rel_line_to(arrow_width / 2.0, -arrow_height_padding)
cr.rel_line_to(arrow_width / 2.0, arrow_height_padding)
cr.arc(x + w - radius,
y + arrow_height + radius,
radius,
math.pi * 1.5,
math.pi * 2.0)
cr.arc(x + w - radius,
y + h - radius,
radius,
0,
math.pi * 0.5)
if pos_type == gtk.POS_BOTTOM:
y_padding = trayicon_y + h - arrow_height
arrow_height_padding = arrow_height
cr.line_to(offs + arrow_width, y_padding)
cr.rel_line_to(-arrow_width / 2.0, arrow_height_padding)
cr.rel_line_to(-arrow_width / 2.0, -arrow_height_padding)
cr.arc(x + radius,
y + h - radius,
radius,
math.pi * 0.5,
math.pi)
cr.close_path()
def new_surface(width, height):
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
surface_context = cairo.Context(surface)
return surface, surface_context
def propagate_expose(widget, event):
if hasattr(widget, "get_child") and widget.get_child():
widget.propagate_expose(widget.get_child(), event)
def get_text_size(text, text_size=DEFAULT_FONT_SIZE, text_font=DEFAULT_FONT):
try:
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 0, 0)
cr = cairo.Context(surface)
context = pangocairo.CairoContext(cr)
layout = context.create_layout()
temp_font = pango.FontDescription("%s %s" % (text_font, text_size))
layout.set_font_description(temp_font)
layout.set_text(text)
return layout.get_pixel_size()
except:
return (0, 0)
def get_home_path():
return os.path.expandvars("$HOME")
def get_config_path():
return os.path.join(get_home_path(), ".config/deepin-system-settings/tray")
def get_config_file():
return os.path.join(get_config_path(), "config.ini")
def config_path_check():
if os.path.exists(get_config_path()):
return True
else:
return False
def config_file_check():
if os.path.exists(get_config_file()):
return True
else:
return False
def init_config_path():
os.makedirs(get_config_path())
def pixbuf_check(element):
return isinstance(element, gtk.gdk.Pixbuf)
def text_check(element):
return isinstance(element, str)
def cn_check():
return os.environ["LANGUAGE"].startswith("zh_")
def app_check(bind_name):
try:
app_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
app_socket.bind(bind_name)
return False
except:
return True
def clear_app_bind(bind_name):
try:
os.remove(bind_name)
except Exception, e:
print "clear_app_bind[error]:", e
def get_run_app_path(add_path="image"):
file = os.path.abspath(sys.argv[0])
path = os.path.dirname(file)
return os.path.join(path, add_path)
def cairo_popover_rectangle(widget,
surface_context,
trayicon_x, trayicon_y,
trayicon_w, trayicon_h,
radius):
cr = surface_context
x = trayicon_x
y = trayicon_y
w = trayicon_w - (trayicon_x * 2)
h = trayicon_h - (trayicon_x * 2)
# draw.
cr.arc(x + radius,
y + radius,
radius,
math.pi,
math.pi * 1.5)
cr.arc(x + w - radius,
y + radius,
radius,
math.pi * 1.5,
math.pi * 2.0)
cr.arc(x + w - radius,
y + h - radius,
radius,
0,
math.pi * 0.5)
cr.arc(x + radius,
y + h - radius,
radius,
math.pi * 0.5,
math.pi)
cr.close_path()
def in_window_check(widget, event):
toplevel = widget.get_toplevel()
window_x, window_y = toplevel.get_position()
x_root = event.x_root
y_root = event.y_root
if not ((x_root >= window_x
and x_root < window_x + widget.allocation.width)
and (y_root >= window_y
and y_root < window_y + widget.allocation.height)):
return True
def move_window(widget, event, window):
'''
Move window with given widget and event.
This function generic use for move window when mouse drag on target widget.
@param widget: Gtk.Widget instance to drag.
@param event: Gdk.Event instance, generic, event come from gtk signal call.
@param window: Gtk.Window instance.
'''
if event.button == 1:
window.begin_move_drag(
event.button,
int(event.x_root),
int(event.y_root),
event.time)
return False
def container_remove_all(container):
'''
Handy function to remove all children widget from container.
@param container: Gtk.Container instance.
'''
container.foreach(lambda widget: container.remove(widget))
def get_screen_size(widget):
'''
Get screen size from the toplevel window associated with widget.
@param widget: Gtk.Widget instance.
@return: Return screen size as (screen_width, screen_height)
'''
screen = widget.get_screen()
width = screen.get_width()
height = screen.get_height()
return (width, height)
def scroll_to_top(scrolled_window):
'''
Scroll scrolled_window to top position.
@param scrolled_window: Gtk.ScrolledWindow instance.
'''
scrolled_window.get_vadjustment().set_value(0)
def scroll_to_bottom(scrolled_window):
'''
Scroll scrolled_window to bottom position.
@param scrolled_window: Gtk.ScrolledWindow instance.
'''
vadjust = scrolled_window.get_vadjustment()
vadjust.set_value(vadjust.get_upper() - vadjust.get_page_size())
def window_is_max(widget):
'''
Whether window is maximized.
@param widget: Gtk.Widget instance.
@return: Return True if widget's toplevel window is maximized.
'''
toplevel_window = widget.get_toplevel()
if (toplevel_window.window.get_state()
and gtk.gdk.WINDOW_STATE_MAXIMIZED == gtk.gdk.WINDOW_STATE_MAXIMIZED):
return True
else:
return False
def remove_signal_id(signal_id):
'''
Remove signal id.
@param signal_id: Signal id that return by function gobject.connect.
'''
if signal_id:
(signal_object, signal_handler_id) = signal_id
if signal_object.handler_is_connected(signal_handler_id):
signal_object.disconnect(signal_handler_id)
signal_id = None
def get_image_files(directory):
imagetypes = ['png', 'jpg', 'jpeg']
tmp = []
if not os.path.exists(directory):
return None
for root, dirs, fileNames in os.walk(directory):
if fileNames:
for filename in fileNames:
if '.' in filename and filename.split('.')[1] in imagetypes:
tmp.append(os.path.join(root, filename))
return tmp
def alpha_color_hex_to_cairo((color, alpha)):
(r, g, b) = color_hex_to_cairo(color)
return (r, g, b, alpha)
def color_hex_to_cairo(color):
gdk_color = gtk.gdk.color_parse(color)
return (gdk_color.red / 65535.0,
gdk_color.green / 65535.0,
gdk_color.blue / 65535.0)
def get_weekdays(days):
day_id = datetime.datetime.now().weekday()
data = []
weekday_info = ["周一", "周二", "周三", "周四", "周五", "周六", "周日"]
for i in xrange(0, days):
data.append(weekday_info[day_id])
day_id += 1
if day_id == 7:
day_id = 0
return data
def get_weather_pic(weather):
test.debug_tip(__file__, weather)
image = Weather_info.get(weather)
if image:
return image
else:
return "./image/weather/na.png"
def check_network():
import urllib2
try:
urllib2.urlopen('http://wlemuel.sinaapp.com', timeout=1)
return True
except urllib2.URLError:
return False
|
lgpl-3.0
|
wangscript/libjingle-1
|
trunk/testing/gtest/test/gtest_shuffle_test.py
|
3023
|
12549
|
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
|
bsd-3-clause
|
MiyamotoAkira/kivy
|
doc/sources/conf.py
|
35
|
7092
|
# -*- coding: utf-8 -*-
#
# Kivy documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 21 22:37:12 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'autodoc', 'sphinx.ext.todo', 'preprocess', 'sphinx.ext.ifconfig',
'sphinx.ext.viewcode', 'sphinx.ext.mathjax']
# Todo configuration
todo_include_todos = True
# XXX HACK mathieu: monkey patch the autodoc module, to give a better priority
# for ClassDocumenter, or the cython class will be documented as AttributeClass
import sphinx.ext.autodoc
sphinx.ext.autodoc.ClassDocumenter.priority = 10
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Kivy'
copyright = '2010, The Kivy Authors'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
os.environ['KIVY_DOC_INCLUDE'] = '1'
import kivy
print(kivy.__file__)
version = kivy.__version__
release = kivy.__version__
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# suppress exclusion warnings
exclude_patterns = ['guide/layouts.rst', 'api-kivy.lib.osc*']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'kivy_pygments_theme.KivyStyle'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'fresh.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
html_logo = '.static/logo-kivy.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Kivydoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Kivy.tex', 'Kivy Documentation',
'The Kivy Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
latex_elements = {
'fontpkg': r'\usepackage{mathpazo}',
'papersize': 'a4paper',
'pointsize': '10pt',
'preamble': r'\usepackage{kivystyle}'
}
latex_additional_files = ['kivystyle.sty',
'../../kivy/data/logo/kivy-icon-512.png']
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
from kivy import setupconfig
replacements = {
'cython_install': 'Cython==' + setupconfig.CYTHON_MAX,
'cython_note': (
'This version of **Kivy requires at least Cython version {0}**, '
'and has been tested through {1}. Later versions may work, '
'but as they have not been tested there is no guarantee.'
).format(setupconfig.CYTHON_MIN, setupconfig.CYTHON_MAX)
}
if setupconfig.CYTHON_BAD:
replacements['cython_note'] += (' **The following versions of Cython have '
'known issues and cannot be used with Kivy'
': {0}**').format(setupconfig.CYTHON_BAD)
epilog = []
for key, value in replacements.items():
rep = '.. |{0}| replace:: {1}'.format(key, value)
epilog.append(rep)
rst_epilog = '\n'.join(epilog)
|
mit
|
krsjoseph/youtube-dl
|
youtube_dl/cache.py
|
179
|
2980
|
from __future__ import unicode_literals
import errno
import io
import json
import os
import re
import shutil
import traceback
from .compat import compat_expanduser, compat_getenv
from .utils import write_json_file
class Cache(object):
def __init__(self, ydl):
self._ydl = ydl
def _get_root_dir(self):
res = self._ydl.params.get('cachedir')
if res is None:
cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache')
res = os.path.join(cache_root, 'youtube-dl')
return compat_expanduser(res)
def _get_cache_fn(self, section, key, dtype):
assert re.match(r'^[a-zA-Z0-9_.-]+$', section), \
'invalid section %r' % section
assert re.match(r'^[a-zA-Z0-9_.-]+$', key), 'invalid key %r' % key
return os.path.join(
self._get_root_dir(), section, '%s.%s' % (key, dtype))
@property
def enabled(self):
return self._ydl.params.get('cachedir') is not False
def store(self, section, key, data, dtype='json'):
assert dtype in ('json',)
if not self.enabled:
return
fn = self._get_cache_fn(section, key, dtype)
try:
try:
os.makedirs(os.path.dirname(fn))
except OSError as ose:
if ose.errno != errno.EEXIST:
raise
write_json_file(data, fn)
except Exception:
tb = traceback.format_exc()
self._ydl.report_warning(
'Writing cache to %r failed: %s' % (fn, tb))
def load(self, section, key, dtype='json', default=None):
assert dtype in ('json',)
if not self.enabled:
return default
cache_fn = self._get_cache_fn(section, key, dtype)
try:
try:
with io.open(cache_fn, 'r', encoding='utf-8') as cachef:
return json.load(cachef)
except ValueError:
try:
file_size = os.path.getsize(cache_fn)
except (OSError, IOError) as oe:
file_size = str(oe)
self._ydl.report_warning(
'Cache retrieval from %s failed (%s)' % (cache_fn, file_size))
except IOError:
pass # No cache available
return default
def remove(self):
if not self.enabled:
self._ydl.to_screen('Cache is disabled (Did you combine --no-cache-dir and --rm-cache-dir?)')
return
cachedir = self._get_root_dir()
if not any((term in cachedir) for term in ('cache', 'tmp')):
raise Exception('Not removing directory %s - this does not look like a cache dir' % cachedir)
self._ydl.to_screen(
'Removing cache dir %s .' % cachedir, skip_eol=True)
if os.path.exists(cachedir):
self._ydl.to_screen('.', skip_eol=True)
shutil.rmtree(cachedir)
self._ydl.to_screen('.')
|
unlicense
|
40223145c2g18/c2g18
|
exts/w2/static/Brython2.0.0-20140209-164925/Lib/test/support.py
|
111
|
67787
|
"""Supporting definitions for the Python regression tests."""
if __name__ != 'test.support':
raise ImportError('support must be imported from the test package')
import contextlib
import errno
import functools
import gc
import socket
import sys
import os
import platform
import shutil
import warnings
import unittest
import importlib
import collections.abc
import re
import subprocess
import imp
import time
import sysconfig
import fnmatch
import logging.handlers
import struct
import tempfile
import _testcapi
try:
import _thread, threading
except ImportError:
_thread = None
threading = None
try:
import multiprocessing.process
except ImportError:
multiprocessing = None
try:
import zlib
except ImportError:
zlib = None
try:
import bz2
except ImportError:
bz2 = None
try:
import lzma
except ImportError:
lzma = None
__all__ = [
"Error", "TestFailed", "ResourceDenied", "import_module", "verbose",
"use_resources", "max_memuse", "record_original_stdout",
"get_original_stdout", "unload", "unlink", "rmtree", "forget",
"is_resource_enabled", "requires", "requires_freebsd_version",
"requires_linux_version", "requires_mac_ver", "find_unused_port",
"bind_port", "IPV6_ENABLED", "is_jython", "TESTFN", "HOST", "SAVEDCWD",
"temp_cwd", "findfile", "create_empty_file", "sortdict",
"check_syntax_error", "open_urlresource", "check_warnings", "CleanImport",
"EnvironmentVarGuard", "TransientResource", "captured_stdout",
"captured_stdin", "captured_stderr", "time_out", "socket_peer_reset",
"ioerror_peer_reset", "run_with_locale", 'temp_umask',
"transient_internet", "set_memlimit", "bigmemtest", "bigaddrspacetest",
"BasicTestRunner", "run_unittest", "run_doctest", "threading_setup",
"threading_cleanup", "reap_children", "cpython_only", "check_impl_detail",
"get_attribute", "swap_item", "swap_attr", "requires_IEEE_754",
"TestHandler", "Matcher", "can_symlink", "skip_unless_symlink",
"skip_unless_xattr", "import_fresh_module", "requires_zlib",
"PIPE_MAX_SIZE", "failfast", "anticipate_failure", "run_with_tz",
"requires_bz2", "requires_lzma", "suppress_crash_popup",
]
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class ResourceDenied(unittest.SkipTest):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
@contextlib.contextmanager
def _ignore_deprecated_imports(ignore=True):
"""Context manager to suppress package and module deprecation
warnings when importing them.
If ignore is False, this context manager has no effect."""
if ignore:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".+ (module|package)",
DeprecationWarning)
yield
else:
yield
def import_module(name, deprecated=False):
"""Import and return the module to be tested, raising SkipTest if
it is not available.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
with _ignore_deprecated_imports(deprecated):
try:
return importlib.import_module(name)
except ImportError as msg:
raise unittest.SkipTest(str(msg))
def _save_and_remove_module(name, orig_modules):
"""Helper function to save and remove a module from sys.modules
Raise ImportError if the module can't be imported."""
# try to import the module and raise an error if it can't be imported
if name not in sys.modules:
__import__(name)
del sys.modules[name]
for modname in list(sys.modules):
if modname == name or modname.startswith(name + '.'):
orig_modules[modname] = sys.modules[modname]
del sys.modules[modname]
def _save_and_block_module(name, orig_modules):
"""Helper function to save and block a module in sys.modules
Return True if the module was in sys.modules, False otherwise."""
saved = True
try:
orig_modules[name] = sys.modules[name]
except KeyError:
saved = False
sys.modules[name] = None
return saved
def anticipate_failure(condition):
"""Decorator to mark a test that is known to be broken in some cases
Any use of this decorator should have a comment identifying the
associated tracker issue.
"""
if condition:
return unittest.expectedFailure
return lambda f: f
def import_fresh_module(name, fresh=(), blocked=(), deprecated=False):
"""Imports and returns a module, deliberately bypassing the sys.modules cache
and importing a fresh copy of the module. Once the import is complete,
the sys.modules cache is restored to its original state.
Modules named in fresh are also imported anew if needed by the import.
If one of these modules can't be imported, None is returned.
Importing of modules named in blocked is prevented while the fresh import
takes place.
If deprecated is True, any module or package deprecation messages
will be suppressed."""
# NOTE: test_heapq, test_json and test_warnings include extra sanity checks
# to make sure that this utility function is working as expected
with _ignore_deprecated_imports(deprecated):
# Keep track of modules saved for later restoration as well
# as those which just need a blocking entry removed
orig_modules = {}
names_to_remove = []
_save_and_remove_module(name, orig_modules)
try:
for fresh_name in fresh:
_save_and_remove_module(fresh_name, orig_modules)
for blocked_name in blocked:
if not _save_and_block_module(blocked_name, orig_modules):
names_to_remove.append(blocked_name)
fresh_module = importlib.import_module(name)
except ImportError:
fresh_module = None
finally:
for orig_name, module in orig_modules.items():
sys.modules[orig_name] = module
for name_to_remove in names_to_remove:
del sys.modules[name_to_remove]
return fresh_module
def get_attribute(obj, name):
"""Get an attribute, raising SkipTest if AttributeError is raised."""
try:
attribute = getattr(obj, name)
except AttributeError:
raise unittest.SkipTest("object %r has no attribute %r" % (obj, name))
else:
return attribute
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
real_max_memuse = 0
failfast = False
match_tests = None
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
if sys.platform.startswith("win"):
def _waitfor(func, pathname, waitall=False):
# Peform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or '.'
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on a i7@4.3GHz shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existance of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn('tests may fail, delete still pending for ' + pathname,
RuntimeWarning, stacklevel=4)
def _unlink(filename):
_waitfor(os.unlink, filename)
def _rmdir(dirname):
_waitfor(os.rmdir, dirname)
def _rmtree(path):
def _rmtree_inner(path):
for name in os.listdir(path):
fullname = os.path.join(path, name)
if os.path.isdir(fullname):
_waitfor(_rmtree_inner, fullname, waitall=True)
os.rmdir(fullname)
else:
os.unlink(fullname)
_waitfor(_rmtree_inner, path, waitall=True)
_waitfor(os.rmdir, path)
else:
_unlink = os.unlink
_rmdir = os.rmdir
_rmtree = shutil.rmtree
def unlink(filename):
try:
_unlink(filename)
except OSError as error:
# The filename need not exist.
if error.errno not in (errno.ENOENT, errno.ENOTDIR):
raise
def rmdir(dirname):
try:
_rmdir(dirname)
except OSError as error:
# The directory need not exist.
if error.errno != errno.ENOENT:
raise
def rmtree(path):
try:
_rmtree(path)
except OSError as error:
if error.errno != errno.ENOENT:
raise
def make_legacy_pyc(source):
"""Move a PEP 3147 pyc/pyo file to its legacy pyc/pyo location.
The choice of .pyc or .pyo extension is done based on the __debug__ flag
value.
:param source: The file system path to the source file. The source file
does not need to exist, however the PEP 3147 pyc file must exist.
:return: The file system path to the legacy pyc file.
"""
pyc_file = imp.cache_from_source(source)
up_one = os.path.dirname(os.path.abspath(source))
legacy_pyc = os.path.join(up_one, source + ('c' if __debug__ else 'o'))
os.rename(pyc_file, legacy_pyc)
return legacy_pyc
def forget(modname):
"""'Forget' a module was ever imported.
This removes the module from sys.modules and deletes any PEP 3147 or
legacy .pyc and .pyo files.
"""
unload(modname)
for dirname in sys.path:
source = os.path.join(dirname, modname + '.py')
# It doesn't matter if they exist or not, unlink all possible
# combinations of PEP 3147 and legacy pyc and pyo files.
unlink(source + 'c')
unlink(source + 'o')
unlink(imp.cache_from_source(source, debug_override=True))
unlink(imp.cache_from_source(source, debug_override=False))
# On some platforms, should not run gui test even if it is allowed
# in `use_resources'.
if sys.platform.startswith('win'):
import ctypes
import ctypes.wintypes
def _is_gui_available():
UOI_FLAGS = 1
WSF_VISIBLE = 0x0001
class USEROBJECTFLAGS(ctypes.Structure):
_fields_ = [("fInherit", ctypes.wintypes.BOOL),
("fReserved", ctypes.wintypes.BOOL),
("dwFlags", ctypes.wintypes.DWORD)]
dll = ctypes.windll.user32
h = dll.GetProcessWindowStation()
if not h:
raise ctypes.WinError()
uof = USEROBJECTFLAGS()
needed = ctypes.wintypes.DWORD()
res = dll.GetUserObjectInformationW(h,
UOI_FLAGS,
ctypes.byref(uof),
ctypes.sizeof(uof),
ctypes.byref(needed))
if not res:
raise ctypes.WinError()
return bool(uof.dwFlags & WSF_VISIBLE)
else:
def _is_gui_available():
return True
def is_resource_enabled(resource):
"""Test whether a resource is enabled. Known resources are set by
regrtest.py."""
return use_resources is not None and resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True. The
possibility of False being returned occurs when regrtest.py is
executing.
"""
if resource == 'gui' and not _is_gui_available():
raise unittest.SkipTest("Cannot use the 'gui' resource")
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe(1).f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the %r resource not enabled" % resource
raise ResourceDenied(msg)
def _requires_unix_version(sysname, min_version):
"""Decorator raising SkipTest if the OS is `sysname` and the version is less
than `min_version`.
For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if
the FreeBSD version is less than 7.2.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if platform.system() == sysname:
version_txt = platform.release().split('-', 1)[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"%s version %s or higher required, not %s"
% (sysname, min_version_txt, version_txt))
return wrapper
return decorator
def requires_freebsd_version(*min_version):
"""Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version is
less than `min_version`.
For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD
version is less than 7.2.
"""
return _requires_unix_version('FreeBSD', min_version)
def requires_linux_version(*min_version):
"""Decorator raising SkipTest if the OS is Linux and the Linux version is
less than `min_version`.
For example, @requires_linux_version(2, 6, 32) raises SkipTest if the Linux
version is less than 2.6.32.
"""
return _requires_unix_version('Linux', min_version)
def requires_mac_ver(*min_version):
"""Decorator raising SkipTest if the OS is Mac OS X and the OS X
version if less than min_version.
For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
is lesser than 10.5.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"Mac OS X %s or higher required, not %s"
% (min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
HOST = 'localhost'
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
socket.error will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it.
"""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def _is_ipv6_enabled():
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(('::1', 0))
return True
except (socket.error, socket.gaierror):
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
# A constant likely larger than the underlying OS pipe buffer size.
# Windows limit seems to be around 512B, and many Unix kernels have a 64K pipe
# buffer size or 16*PAGE_SIZE: take a few megs to be sure. This
PIPE_MAX_SIZE = 3 * 1000 * 1000
# decorator for skipping tests on non-IEEE 754 platforms
requires_IEEE_754 = unittest.skipUnless(
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
requires_zlib = unittest.skipUnless(zlib, 'requires zlib')
requires_bz2 = unittest.skipUnless(bz2, 'requires bz2')
requires_lzma = unittest.skipUnless(lzma, 'requires lzma')
is_jython = sys.platform.startswith('java')
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
else:
TESTFN = '@test'
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
# module name.
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
# FS_NONASCII: non-ASCII character encodable by os.fsencode(),
# or None if there is no such character.
FS_NONASCII = None
for character in (
# First try printable and common characters to have a readable filename.
# For each character, the encoding list are just example of encodings able
# to encode the character (the list is not exhaustive).
# U+00E6 (Latin Small Letter Ae): cp1252, iso-8859-1
'\u00E6',
# U+0130 (Latin Capital Letter I With Dot Above): cp1254, iso8859_3
'\u0130',
# U+0141 (Latin Capital Letter L With Stroke): cp1250, cp1257
'\u0141',
# U+03C6 (Greek Small Letter Phi): cp1253
'\u03C6',
# U+041A (Cyrillic Capital Letter Ka): cp1251
'\u041A',
# U+05D0 (Hebrew Letter Alef): Encodable to cp424
'\u05D0',
# U+060C (Arabic Comma): cp864, cp1006, iso8859_6, mac_arabic
'\u060C',
# U+062A (Arabic Letter Teh): cp720
'\u062A',
# U+0E01 (Thai Character Ko Kai): cp874
'\u0E01',
# Then try more "special" characters. "special" because they may be
# interpreted or displayed differently depending on the exact locale
# encoding and the font.
# U+00A0 (No-Break Space)
'\u00A0',
# U+20AC (Euro Sign)
'\u20AC',
):
try:
os.fsdecode(os.fsencode(character))
except UnicodeError:
pass
else:
FS_NONASCII = character
break
# TESTFN_UNICODE is a non-ascii filename
TESTFN_UNICODE = TESTFN + "-\xe0\xf2\u0258\u0141\u011f"
if sys.platform == 'darwin':
# In Mac OS X's VFS API file names are, by definition, canonically
# decomposed Unicode, encoded using UTF-8. See QA1173:
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
import unicodedata
TESTFN_UNICODE = unicodedata.normalize('NFD', TESTFN_UNICODE)
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNENCODABLE is a filename (str type) that should *not* be able to be
# encoded by the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename.
TESTFN_UNENCODABLE = None
if os.name in ('nt', 'ce'):
# skip win32s (0) or Windows 9x/ME (1)
if sys.getwindowsversion().platform >= 2:
# Different kinds of characters from various languages to minimize the
# probability that the whole name is encodable to MBCS (issue #9819)
TESTFN_UNENCODABLE = TESTFN + "-\u5171\u0141\u2661\u0363\uDC80"
try:
TESTFN_UNENCODABLE.encode(TESTFN_ENCODING)
except UnicodeEncodeError:
pass
else:
print('WARNING: The filename %r CAN be encoded by the filesystem encoding (%s). '
'Unicode filename tests may not be effective'
% (TESTFN_UNENCODABLE, TESTFN_ENCODING))
TESTFN_UNENCODABLE = None
# Mac OS X denies unencodable filenames (invalid utf-8)
elif sys.platform != 'darwin':
try:
# ascii and utf-8 cannot encode the byte 0xff
b'\xff'.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
# 0xff will be encoded using the surrogate character u+DCFF
TESTFN_UNENCODABLE = TESTFN \
+ b'-\xff'.decode(TESTFN_ENCODING, 'surrogateescape')
else:
# File system encoding (eg. ISO-8859-* encodings) can encode
# the byte 0xff. Skip some unicode filename tests.
pass
# TESTFN_UNDECODABLE is a filename (bytes type) that should *not* be able to be
# decoded from the filesystem encoding (in strict mode). It can be None if we
# cannot generate such filename (ex: the latin1 encoding can decode any byte
# sequence). On UNIX, TESTFN_UNDECODABLE can be decoded by os.fsdecode() thanks
# to the surrogateescape error handler (PEP 383), but not from the filesystem
# encoding in strict mode.
TESTFN_UNDECODABLE = None
''' #fixme brython
for name in (
# b'\xff' is not decodable by os.fsdecode() with code page 932. Windows
# accepts it to create a file or a directory, or don't accept to enter to
# such directory (when the bytes name is used). So test b'\xe7' first: it is
# not decodable from cp932.
b'\xe7w\xf0',
# undecodable from ASCII, UTF-8
b'\xff',
# undecodable from iso8859-3, iso8859-6, iso8859-7, cp424, iso8859-8, cp856
# and cp857
b'\xae\xd5'
# undecodable from UTF-8 (UNIX and Mac OS X)
b'\xed\xb2\x80', b'\xed\xb4\x80',
# undecodable from shift_jis, cp869, cp874, cp932, cp1250, cp1251, cp1252,
# cp1253, cp1254, cp1255, cp1257, cp1258
b'\x81\x98',
):
try:
name.decode(TESTFN_ENCODING)
except UnicodeDecodeError:
TESTFN_UNDECODABLE = os.fsencode(TESTFN) + name
break
'''
if FS_NONASCII:
TESTFN_NONASCII = TESTFN + '-' + FS_NONASCII
else:
TESTFN_NONASCII = None
# Save the initial cwd
SAVEDCWD = os.getcwd()
@contextlib.contextmanager
def temp_cwd(name='tempcwd', quiet=False, path=None):
"""
Context manager that temporarily changes the CWD.
An existing path may be provided as *path*, in which case this
function makes no changes to the file system.
Otherwise, the new CWD is created in the current directory and it's
named *name*. If *quiet* is False (default) and it's not possible to
create or change the CWD, an error is raised. If it's True, only a
warning is raised and the original CWD is used.
"""
saved_dir = os.getcwd()
is_temporary = False
if path is None:
path = name
try:
os.mkdir(name)
is_temporary = True
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to create temp CWD ' + name,
RuntimeWarning, stacklevel=3)
try:
os.chdir(path)
except OSError:
if not quiet:
raise
warnings.warn('tests may fail, unable to change the CWD to ' + path,
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
if is_temporary:
rmtree(name)
if hasattr(os, "umask"):
@contextlib.contextmanager
def temp_umask(umask):
"""Context manager that temporarily sets the process umask."""
oldmask = os.umask(umask)
try:
yield
finally:
os.umask(oldmask)
def findfile(file, here=__file__, subdir=None):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
if os.path.isabs(file):
return file
if subdir is not None:
file = os.path.join(subdir, file)
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
fn = os.path.join(dn, file)
if os.path.exists(fn): return fn
return file
def create_empty_file(filename):
"""Create an empty file. If the file already exists, truncate it."""
fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.close(fd)
def sortdict(dict):
"Like repr(dict), but in sorted order."
items = sorted(dict.items())
reprpairs = ["%r: %r" % pair for pair in items]
withcommas = ", ".join(reprpairs)
return "{%s}" % withcommas
def make_bad_fd():
"""
Create an invalid file descriptor by opening and closing a file and return
its fd.
"""
file = open(TESTFN, "wb")
try:
return file.fileno()
finally:
file.close()
unlink(TESTFN)
def check_syntax_error(testcase, statement):
testcase.assertRaises(SyntaxError, compile, statement,
'<test string>', 'exec')
def open_urlresource(url, *args, **kw):
import urllib.request, urllib.parse
check = kw.pop('check', None)
filename = urllib.parse.urlparse(url)[2].split('/')[-1] # '/': it's URL!
fn = os.path.join(os.path.dirname(__file__), "data", filename)
def check_valid_file(fn):
f = open(fn, *args, **kw)
if check is None:
return f
elif check(f):
f.seek(0)
return f
f.close()
if os.path.exists(fn):
f = check_valid_file(fn)
if f is not None:
return f
unlink(fn)
# Verify the requirement before downloading the file
requires('urlfetch')
print('\tfetching %s ...' % url, file=get_original_stdout())
f = urllib.request.urlopen(url, timeout=15)
try:
with open(fn, "wb") as out:
s = f.read()
while s:
out.write(s)
s = f.read()
finally:
f.close()
f = check_valid_file(fn)
if f is not None:
return f
raise TestFailed('invalid resource %r' % fn)
class WarningsRecorder(object):
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
def __getattr__(self, attr):
if len(self._warnings) > self._last:
return getattr(self._warnings[-1], attr)
elif attr in warnings.WarningMessage._WARNING_DETAILS:
return None
raise AttributeError("%r has no attribute %r" % (self, attr))
@property
def warnings(self):
return self._warnings[self._last:]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False):
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get('__warningregistry__')
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules['warnings'].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = list(w)
missing = []
for msg, cat in filters:
seen = False
for w in reraise[:]:
warning = w.message
# Filter out the matching messages
if (re.match(msg, str(warning), re.I) and
issubclass(warning.__class__, cat)):
seen = True
reraise.remove(w)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %s" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" %
missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs):
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get('quiet')
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
class CleanImport(object):
"""Context manager to force import to return a new module reference.
This is useful for testing module-level behaviours, such as
the emission of a DeprecationWarning on import.
Use like this:
with CleanImport("foo"):
importlib.import_module("foo") # new reference
"""
def __init__(self, *module_names):
self.original_modules = sys.modules.copy()
for module_name in module_names:
if module_name in sys.modules:
module = sys.modules[module_name]
# It is possible that module_name is just an alias for
# another module (e.g. stub for modules renamed in 3.x).
# In that case, we also need delete the real module to clear
# the import cache.
if module.__name__ != module_name:
del sys.modules[module.__name__]
del sys.modules[module_name]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.modules.update(self.original_modules)
class EnvironmentVarGuard(collections.abc.MutableMapping):
"""Class to help protect the environment variable properly. Can be used as
a context manager."""
def __init__(self):
self._environ = os.environ
self._changed = {}
def __getitem__(self, envvar):
return self._environ[envvar]
def __setitem__(self, envvar, value):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
self._environ[envvar] = value
def __delitem__(self, envvar):
# Remember the initial value on the first access
if envvar not in self._changed:
self._changed[envvar] = self._environ.get(envvar)
if envvar in self._environ:
del self._environ[envvar]
def keys(self):
return self._environ.keys()
def __iter__(self):
return iter(self._environ)
def __len__(self):
return len(self._environ)
def set(self, envvar, value):
self[envvar] = value
def unset(self, envvar):
del self[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for (k, v) in self._changed.items():
if v is None:
if k in self._environ:
del self._environ[k]
else:
self._environ[k] = v
os.environ = self._environ
class DirsOnSysPath(object):
"""Context manager to temporarily add directories to sys.path.
This makes a copy of sys.path, appends any directories given
as positional arguments, then reverts sys.path to the copied
settings when the context ends.
Note that *all* sys.path modifications in the body of the
context manager, including replacement of the object,
will be reverted at the end of the block.
"""
def __init__(self, *paths):
self.original_value = sys.path[:]
self.original_object = sys.path
sys.path.extend(paths)
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
sys.path = self.original_object
sys.path[:] = self.original_value
class TransientResource(object):
"""Raise ResourceDenied if an exception is raised while the context manager
is in effect that matches the specified exception and attributes."""
def __init__(self, exc, **kwargs):
self.exc = exc
self.attrs = kwargs
def __enter__(self):
return self
def __exit__(self, type_=None, value=None, traceback=None):
"""If type_ is a subclass of self.exc and value has attributes matching
self.attrs, raise ResourceDenied. Otherwise let the exception
propagate (if any)."""
if type_ is not None and issubclass(self.exc, type_):
for attr, attr_value in self.attrs.items():
if not hasattr(value, attr):
break
if getattr(value, attr) != attr_value:
break
else:
raise ResourceDenied("an optional resource is not available")
# Context managers that raise ResourceDenied when various issues
# with the Internet connection manifest themselves as exceptions.
# XXX deprecate these and use transient_internet() instead
time_out = TransientResource(IOError, errno=errno.ETIMEDOUT)
socket_peer_reset = TransientResource(socket.error, errno=errno.ECONNRESET)
ioerror_peer_reset = TransientResource(IOError, errno=errno.ECONNRESET)
@contextlib.contextmanager
def transient_internet(resource_name, *, timeout=30.0, errnos=()):
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
default_errnos = [
('ECONNREFUSED', 111),
('ECONNRESET', 104),
('EHOSTUNREACH', 113),
('ENETUNREACH', 101),
('ETIMEDOUT', 110),
]
default_gai_errnos = [
('EAI_AGAIN', -3),
('EAI_FAIL', -4),
('EAI_NONAME', -2),
('EAI_NODATA', -5),
# Encountered when trying to resolve IPv6-only hostnames
('WSANO_DATA', 11004),
]
denied = ResourceDenied("Resource %r is not available" % resource_name)
captured_errnos = errnos
gai_errnos = []
if not captured_errnos:
captured_errnos = [getattr(errno, name, num)
for (name, num) in default_errnos]
gai_errnos = [getattr(socket, name, num)
for (name, num) in default_gai_errnos]
def filter_error(err):
n = getattr(err, 'errno', None)
if (isinstance(err, socket.timeout) or
(isinstance(err, socket.gaierror) and n in gai_errnos) or
n in captured_errnos):
if not verbose:
sys.stderr.write(denied.args[0] + "\n")
raise denied from err
old_timeout = socket.getdefaulttimeout()
try:
if timeout is not None:
socket.setdefaulttimeout(timeout)
yield
except IOError as err:
# urllib can wrap original socket errors multiple times (!), we must
# unwrap to get at the original error.
while True:
a = err.args
if len(a) >= 1 and isinstance(a[0], IOError):
err = a[0]
# The error can also be wrapped as args[1]:
# except socket.error as msg:
# raise IOError('socket error', msg).with_traceback(sys.exc_info()[2])
elif len(a) >= 2 and isinstance(a[1], IOError):
err = a[1]
else:
break
filter_error(err)
raise
# XXX should we catch generic exceptions and look for their
# __cause__ or __context__?
finally:
socket.setdefaulttimeout(old_timeout)
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
import io
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, io.StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as s:
print("hello")
self.assertEqual(s.getvalue(), "hello")
"""
return captured_output("stdout")
def captured_stderr():
return captured_output("stderr")
def captured_stdin():
return captured_output("stdin")
def gc_collect():
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if is_jython:
time.sleep(0.1)
gc.collect()
gc.collect()
@contextlib.contextmanager
def disable_gc():
have_gc = gc.isenabled()
gc.disable()
try:
yield
finally:
if have_gc:
gc.enable()
def python_is_optimized():
"""Find if Python was built with optimizations."""
cflags = sysconfig.get_config_var('PY_CFLAGS') or ''
final_opt = ""
for opt in cflags.split():
if opt.startswith('-O'):
final_opt = opt
return final_opt != '' and final_opt != '-O0'
_header = 'nP'
_align = '0n'
if hasattr(sys, "gettotalrefcount"):
_header = '2P' + _header
_align = '0P'
_vheader = _header + 'n'
def calcobjsize(fmt):
return struct.calcsize(_header + fmt + _align)
def calcvobjsize(fmt):
return struct.calcsize(_vheader + fmt + _align)
_TPFLAGS_HAVE_GC = 1<<14
_TPFLAGS_HEAPTYPE = 1<<9
def check_sizeof(test, o, size):
result = sys.getsizeof(o)
# add GC header size
if ((type(o) == type) and (o.__flags__ & _TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & _TPFLAGS_HAVE_GC))):
size += _testcapi.SIZEOF_PYGC_HEAD
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
test.assertEqual(result, size, msg)
#=======================================================================
# Decorator for running a function in a different locale, correctly resetting
# it afterwards.
def run_with_locale(catstr, *locales):
def decorator(func):
def inner(*args, **kwds):
try:
import locale
category = getattr(locale, catstr)
orig_locale = locale.setlocale(category)
except AttributeError:
# if the test author gives us an invalid category string
raise
except:
# cannot retrieve original locale, so do nothing
locale = orig_locale = None
else:
for loc in locales:
try:
locale.setlocale(category, loc)
break
except:
pass
# now run the function, resetting the locale on exceptions
try:
return func(*args, **kwds)
finally:
if locale and orig_locale:
locale.setlocale(category, orig_locale)
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Decorator for running a function in a specific timezone, correctly
# resetting it afterwards.
def run_with_tz(tz):
def decorator(func):
def inner(*args, **kwds):
try:
tzset = time.tzset
except AttributeError:
raise unittest.SkipTest("tzset required")
if 'TZ' in os.environ:
orig_tz = os.environ['TZ']
else:
orig_tz = None
os.environ['TZ'] = tz
tzset()
# now run the function, resetting the tz on exceptions
try:
return func(*args, **kwds)
finally:
if orig_tz is None:
del os.environ['TZ']
else:
os.environ['TZ'] = orig_tz
time.tzset()
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner
return decorator
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use
# should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
_4G = 4 * _1G
MAX_Py_ssize_t = sys.maxsize
def set_memlimit(limit):
global max_memuse
global real_max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
real_max_memuse = memlimit
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
class _MemoryWatchdog:
"""An object which periodically watches the process' memory consumption
and prints it out.
"""
def __init__(self):
self.procfile = '/proc/{pid}/statm'.format(pid=os.getpid())
self.started = False
def start(self):
try:
f = open(self.procfile, 'r')
except OSError as e:
warnings.warn('/proc not available for stats: {}'.format(e),
RuntimeWarning)
sys.stderr.flush()
return
watchdog_script = findfile("memory_watchdog.py")
self.mem_watchdog = subprocess.Popen([sys.executable, watchdog_script],
stdin=f, stderr=subprocess.DEVNULL)
f.close()
self.started = True
def stop(self):
if self.started:
self.mem_watchdog.terminate()
self.mem_watchdog.wait()
def bigmemtest(size, memuse, dry_run=True):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it.
if 'dry_run' is False, it means the test doesn't support dummy runs
when -M is not specified.
"""
def decorator(f):
def wrapper(self):
size = wrapper.size
memuse = wrapper.memuse
if not real_max_memuse:
maxsize = 5147
else:
maxsize = size
if ((real_max_memuse or not dry_run)
and real_max_memuse < maxsize * memuse):
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (size * memuse / (1024 ** 3)))
if real_max_memuse and verbose:
print()
print(" ... expected peak memory use: {peak:.1f}G"
.format(peak=size * memuse / (1024 ** 3)))
watchdog = _MemoryWatchdog()
watchdog.start()
else:
watchdog = None
try:
return f(self, maxsize)
finally:
if watchdog:
watchdog.stop()
wrapper.size = size
wrapper.memuse = memuse
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if MAX_Py_ssize_t >= 2**63 - 1 and max_memuse >= 2**31:
raise unittest.SkipTest(
"not enough memory: try a 32-bit build instead")
else:
raise unittest.SkipTest(
"not enough memory: %.1fG minimum needed"
% (MAX_Py_ssize_t / (1024 ** 3)))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
def _id(obj):
return obj
def requires_resource(resource):
if resource == 'gui' and not _is_gui_available():
return unittest.skip("resource 'gui' is not available")
if is_resource_enabled(resource):
return _id
else:
return unittest.skip("resource {0!r} is not enabled".format(resource))
def cpython_only(test):
"""
Decorator for tests only applicable on CPython.
"""
return impl_detail(cpython=True)(test)
def impl_detail(msg=None, **guards):
if check_impl_detail(**guards):
return _id
if msg is None:
guardnames, default = _parse_guards(guards)
if default:
msg = "implementation detail not available on {0}"
else:
msg = "implementation detail specific to {0}"
guardnames = sorted(guardnames.keys())
msg = msg.format(' or '.join(guardnames))
return unittest.skip(msg)
def _parse_guards(guards):
# Returns a tuple ({platform_name: run_me}, default_value)
if not guards:
return ({'cpython': True}, False)
is_true = list(guards.values())[0]
assert list(guards.values()) == [is_true] * len(guards) # all True or all False
return (guards, not is_true)
# Use the following check to guard CPython's implementation-specific tests --
# or to run them only on the implementation(s) guarded by the arguments.
def check_impl_detail(**guards):
"""This function returns True or False depending on the host platform.
Examples:
if check_impl_detail(): # only on CPython (default)
if check_impl_detail(jython=True): # only on Jython
if check_impl_detail(cpython=False): # everywhere except on CPython
"""
guards, default = _parse_guards(guards)
return guards.get(platform.python_implementation().lower(), default)
def no_tracing(func):
"""Decorator to temporarily turn off tracing for the duration of a test."""
if not hasattr(sys, 'gettrace'):
return func
else:
@functools.wraps(func)
def wrapper(*args, **kwargs):
original_trace = sys.gettrace()
try:
sys.settrace(None)
return func(*args, **kwargs)
finally:
sys.settrace(original_trace)
return wrapper
def refcount_test(test):
"""Decorator for tests which involve reference counting.
To start, the decorator does not run the test if is not run by CPython.
After that, any trace function is unset during the test to prevent
unexpected refcounts caused by the trace function.
"""
return no_tracing(cpython_only(test))
def _filter_suite(suite, pred):
"""Recursively filter test cases in a suite based on a predicate."""
newtests = []
for test in suite._tests:
if isinstance(test, unittest.TestSuite):
_filter_suite(test, pred)
newtests.append(test)
else:
if pred(test):
newtests.append(test)
suite._tests = newtests
def _run_suite(suite):
"""Run tests from a unittest.TestSuite-derived class."""
if verbose:
runner = unittest.TextTestRunner(sys.stdout, verbosity=2,
failfast=failfast)
else:
runner = BasicTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
if len(result.errors) == 1 and not result.failures:
err = result.errors[0][1]
elif len(result.failures) == 1 and not result.errors:
err = result.failures[0][1]
else:
err = "multiple errors occurred"
if not verbose: err += "; run in verbose mode for details"
raise TestFailed(err)
def run_unittest(*classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
def case_pred(test):
if match_tests is None:
return True
for name in test.id().split("."):
if fnmatch.fnmatchcase(name, match_tests):
return True
return False
_filter_suite(suite, case_pred)
_run_suite(suite)
#=======================================================================
# Check for the presence of docstrings.
HAVE_DOCSTRINGS = (check_impl_detail(cpython=False) or
sys.platform == 'win32' or
sysconfig.get_config_var('WITH_DOC_STRINGS'))
requires_docstrings = unittest.skipUnless(HAVE_DOCSTRINGS,
"test requires docstrings")
#=======================================================================
# doctest driver.
def run_doctest(module, verbosity=None, optionflags=0):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
import doctest
if verbosity is None:
verbosity = verbose
else:
verbosity = None
f, t = doctest.testmod(module, verbose=verbosity, optionflags=optionflags)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
if verbose:
print('doctest (%s) ... %d tests with zero failures' %
(module.__name__, t))
return f, t
#=======================================================================
# Support for saving and restoring the imported modules.
def modules_setup():
return sys.modules.copy(),
def modules_cleanup(oldmodules):
# Encoders/decoders are registered permanently within the internal
# codec cache. If we destroy the corresponding modules their
# globals will be set to None which will trip up the cached functions.
encodings = [(k, v) for k, v in sys.modules.items()
if k.startswith('encodings.')]
sys.modules.clear()
sys.modules.update(encodings)
# XXX: This kind of problem can affect more than just encodings. In particular
# extension modules (such as _ssl) don't cope with reloading properly.
# Really, test modules should be cleaning out the test specific modules they
# know they added (ala test_runpy) rather than relying on this function (as
# test_importhooks and test_pkg do currently).
# Implicitly imported *real* modules should be left alone (see issue 10556).
sys.modules.update(oldmodules)
#=======================================================================
# Threading support to prevent reporting refleaks when running regrtest.py -R
# NOTE: we use thread._count() rather than threading.enumerate() (or the
# moral equivalent thereof) because a threading.Thread object is still alive
# until its __bootstrap() method has returned, even after it has been
# unregistered from the threading module.
# thread._count(), on the other hand, only gets decremented *after* the
# __bootstrap() method has returned, which gives us reliable reference counts
# at the end of a test run.
def threading_setup():
if _thread:
return _thread._count(), threading._dangling.copy()
else:
return 1, ()
def threading_cleanup(*original_values):
if not _thread:
return
_MAX_COUNT = 10
for count in range(_MAX_COUNT):
values = _thread._count(), threading._dangling
if values == original_values:
break
time.sleep(0.1)
gc_collect()
# XXX print a warning in case of failure?
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
If threading is unavailable this function does nothing.
"""
if not _thread:
return func
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
def reap_children():
"""Use this function at the end of test_main() whenever sub-processes
are started. This will help ensure that no extra children (zombies)
stick around to hog resources and create problems when looking
for refleaks.
"""
# Reap all our dead child processes so we don't leave zombies around.
# These hog resources and might be causing some of the buildbots to die.
if hasattr(os, 'waitpid'):
any_process = -1
while True:
try:
# This will raise an exception on Windows. That's ok.
pid, status = os.waitpid(any_process, os.WNOHANG)
if pid == 0:
break
except:
break
@contextlib.contextmanager
def swap_attr(obj, attr, new_val):
"""Temporary swap out an attribute with a new object.
Usage:
with swap_attr(obj, "attr", 5):
...
This will set obj.attr to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `attr` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if hasattr(obj, attr):
real_val = getattr(obj, attr)
setattr(obj, attr, new_val)
try:
yield
finally:
setattr(obj, attr, real_val)
else:
setattr(obj, attr, new_val)
try:
yield
finally:
delattr(obj, attr)
@contextlib.contextmanager
def swap_item(obj, item, new_val):
"""Temporary swap out an item with a new object.
Usage:
with swap_item(obj, "item", 5):
...
This will set obj["item"] to 5 for the duration of the with: block,
restoring the old value at the end of the block. If `item` doesn't
exist on `obj`, it will be created and then deleted at the end of the
block.
"""
if item in obj:
real_val = obj[item]
obj[item] = new_val
try:
yield
finally:
obj[item] = real_val
else:
obj[item] = new_val
try:
yield
finally:
del obj[item]
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(br"\[\d+ refs\]\r?\n?", b"", stderr).strip()
return stderr
def args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
return subprocess._args_from_interpreter_flags()
#============================================================
# Support for assertions about logging.
#============================================================
class TestHandler(logging.handlers.BufferingHandler):
def __init__(self, matcher):
# BufferingHandler takes a "capacity" argument
# so as to know when to flush. As we're overriding
# shouldFlush anyway, we can set a capacity of zero.
# You can call flush() manually to clear out the
# buffer.
logging.handlers.BufferingHandler.__init__(self, 0)
self.matcher = matcher
def shouldFlush(self):
return False
def emit(self, record):
self.format(record)
self.buffer.append(record.__dict__)
def matches(self, **kwargs):
"""
Look for a saved dict whose keys/values match the supplied arguments.
"""
result = False
for d in self.buffer:
if self.matcher.matches(d, **kwargs):
result = True
break
return result
class Matcher(object):
_partial_matches = ('msg', 'message')
def matches(self, d, **kwargs):
"""
Try to match a single dict with the supplied arguments.
Keys whose values are strings and which are in self._partial_matches
will be checked for partial (i.e. substring) matches. You can extend
this scheme to (for example) do regular expression matching, etc.
"""
result = True
for k in kwargs:
v = kwargs[k]
dv = d.get(k)
if not self.match_value(k, dv, v):
result = False
break
return result
def match_value(self, k, dv, v):
"""
Try to match a single stored value (dv) with a supplied value (v).
"""
if type(v) != type(dv):
result = False
elif type(dv) is not str or k not in self._partial_matches:
result = (v == dv)
else:
result = dv.find(v) >= 0
return result
_can_symlink = None
def can_symlink():
global _can_symlink
if _can_symlink is not None:
return _can_symlink
symlink_path = TESTFN + "can_symlink"
try:
os.symlink(TESTFN, symlink_path)
can = True
except (OSError, NotImplementedError, AttributeError):
can = False
else:
os.remove(symlink_path)
_can_symlink = can
return can
def skip_unless_symlink(test):
"""Skip decorator for tests that require functional symlink"""
ok = can_symlink()
msg = "Requires functional symlink implementation"
return test if ok else unittest.skip(msg)(test)
_can_xattr = None
def can_xattr():
global _can_xattr
if _can_xattr is not None:
return _can_xattr
if not hasattr(os, "setxattr"):
can = False
else:
tmp_fp, tmp_name = tempfile.mkstemp()
try:
with open(TESTFN, "wb") as fp:
try:
# TESTFN & tempfile may use different file systems with
# different capabilities
os.setxattr(tmp_fp, b"user.test", b"")
os.setxattr(fp.fileno(), b"user.test", b"")
# Kernels < 2.6.39 don't respect setxattr flags.
kernel_version = platform.release()
m = re.match("2.6.(\d{1,2})", kernel_version)
can = m is None or int(m.group(1)) >= 39
except OSError:
can = False
finally:
unlink(TESTFN)
unlink(tmp_name)
_can_xattr = can
return can
def skip_unless_xattr(test):
"""Skip decorator for tests that require functional extended attributes"""
ok = can_xattr()
msg = "no non-broken extended attribute support"
return test if ok else unittest.skip(msg)(test)
if sys.platform.startswith('win'):
@contextlib.contextmanager
def suppress_crash_popup():
"""Disable Windows Error Reporting dialogs using SetErrorMode."""
# see http://msdn.microsoft.com/en-us/library/windows/desktop/ms680621%28v=vs.85%29.aspx
# GetErrorMode is not available on Windows XP and Windows Server 2003,
# but SetErrorMode returns the previous value, so we can use that
import ctypes
k32 = ctypes.windll.kernel32
SEM_NOGPFAULTERRORBOX = 0x02
old_error_mode = k32.SetErrorMode(SEM_NOGPFAULTERRORBOX)
k32.SetErrorMode(old_error_mode | SEM_NOGPFAULTERRORBOX)
try:
yield
finally:
k32.SetErrorMode(old_error_mode)
else:
# this is a no-op for other platforms
@contextlib.contextmanager
def suppress_crash_popup():
yield
def patch(test_instance, object_to_patch, attr_name, new_value):
"""Override 'object_to_patch'.'attr_name' with 'new_value'.
Also, add a cleanup procedure to 'test_instance' to restore
'object_to_patch' value for 'attr_name'.
The 'attr_name' should be a valid attribute for 'object_to_patch'.
"""
# check that 'attr_name' is a real attribute for 'object_to_patch'
# will raise AttributeError if it does not exist
getattr(object_to_patch, attr_name)
# keep a copy of the old value
attr_is_local = False
try:
old_value = object_to_patch.__dict__[attr_name]
except (AttributeError, KeyError):
old_value = getattr(object_to_patch, attr_name, None)
else:
attr_is_local = True
# restore the value when the test is done
def cleanup():
if attr_is_local:
setattr(object_to_patch, attr_name, old_value)
else:
delattr(object_to_patch, attr_name)
test_instance.addCleanup(cleanup)
# actually override the attribute
setattr(object_to_patch, attr_name, new_value)
|
gpl-2.0
|
drpaneas/linuxed.gr
|
lib/python2.7/site-packages/unidecode/x074.py
|
252
|
4696
|
data = (
'Han ', # 0x00
'Xuan ', # 0x01
'Yan ', # 0x02
'Qiu ', # 0x03
'Quan ', # 0x04
'Lang ', # 0x05
'Li ', # 0x06
'Xiu ', # 0x07
'Fu ', # 0x08
'Liu ', # 0x09
'Ye ', # 0x0a
'Xi ', # 0x0b
'Ling ', # 0x0c
'Li ', # 0x0d
'Jin ', # 0x0e
'Lian ', # 0x0f
'Suo ', # 0x10
'Chiisai ', # 0x11
'[?] ', # 0x12
'Wan ', # 0x13
'Dian ', # 0x14
'Pin ', # 0x15
'Zhan ', # 0x16
'Cui ', # 0x17
'Min ', # 0x18
'Yu ', # 0x19
'Ju ', # 0x1a
'Chen ', # 0x1b
'Lai ', # 0x1c
'Wen ', # 0x1d
'Sheng ', # 0x1e
'Wei ', # 0x1f
'Dian ', # 0x20
'Chu ', # 0x21
'Zhuo ', # 0x22
'Pei ', # 0x23
'Cheng ', # 0x24
'Hu ', # 0x25
'Qi ', # 0x26
'E ', # 0x27
'Kun ', # 0x28
'Chang ', # 0x29
'Qi ', # 0x2a
'Beng ', # 0x2b
'Wan ', # 0x2c
'Lu ', # 0x2d
'Cong ', # 0x2e
'Guan ', # 0x2f
'Yan ', # 0x30
'Diao ', # 0x31
'Bei ', # 0x32
'Lin ', # 0x33
'Qin ', # 0x34
'Pi ', # 0x35
'Pa ', # 0x36
'Que ', # 0x37
'Zhuo ', # 0x38
'Qin ', # 0x39
'Fa ', # 0x3a
'[?] ', # 0x3b
'Qiong ', # 0x3c
'Du ', # 0x3d
'Jie ', # 0x3e
'Hun ', # 0x3f
'Yu ', # 0x40
'Mao ', # 0x41
'Mei ', # 0x42
'Chun ', # 0x43
'Xuan ', # 0x44
'Ti ', # 0x45
'Xing ', # 0x46
'Dai ', # 0x47
'Rou ', # 0x48
'Min ', # 0x49
'Zhen ', # 0x4a
'Wei ', # 0x4b
'Ruan ', # 0x4c
'Huan ', # 0x4d
'Jie ', # 0x4e
'Chuan ', # 0x4f
'Jian ', # 0x50
'Zhuan ', # 0x51
'Yang ', # 0x52
'Lian ', # 0x53
'Quan ', # 0x54
'Xia ', # 0x55
'Duan ', # 0x56
'Yuan ', # 0x57
'Ye ', # 0x58
'Nao ', # 0x59
'Hu ', # 0x5a
'Ying ', # 0x5b
'Yu ', # 0x5c
'Huang ', # 0x5d
'Rui ', # 0x5e
'Se ', # 0x5f
'Liu ', # 0x60
'Shi ', # 0x61
'Rong ', # 0x62
'Suo ', # 0x63
'Yao ', # 0x64
'Wen ', # 0x65
'Wu ', # 0x66
'Jin ', # 0x67
'Jin ', # 0x68
'Ying ', # 0x69
'Ma ', # 0x6a
'Tao ', # 0x6b
'Liu ', # 0x6c
'Tang ', # 0x6d
'Li ', # 0x6e
'Lang ', # 0x6f
'Gui ', # 0x70
'Zhen ', # 0x71
'Qiang ', # 0x72
'Cuo ', # 0x73
'Jue ', # 0x74
'Zhao ', # 0x75
'Yao ', # 0x76
'Ai ', # 0x77
'Bin ', # 0x78
'Tu ', # 0x79
'Chang ', # 0x7a
'Kun ', # 0x7b
'Zhuan ', # 0x7c
'Cong ', # 0x7d
'Jin ', # 0x7e
'Yi ', # 0x7f
'Cui ', # 0x80
'Cong ', # 0x81
'Qi ', # 0x82
'Li ', # 0x83
'Ying ', # 0x84
'Suo ', # 0x85
'Qiu ', # 0x86
'Xuan ', # 0x87
'Ao ', # 0x88
'Lian ', # 0x89
'Man ', # 0x8a
'Zhang ', # 0x8b
'Yin ', # 0x8c
'[?] ', # 0x8d
'Ying ', # 0x8e
'Zhi ', # 0x8f
'Lu ', # 0x90
'Wu ', # 0x91
'Deng ', # 0x92
'Xiou ', # 0x93
'Zeng ', # 0x94
'Xun ', # 0x95
'Qu ', # 0x96
'Dang ', # 0x97
'Lin ', # 0x98
'Liao ', # 0x99
'Qiong ', # 0x9a
'Su ', # 0x9b
'Huang ', # 0x9c
'Gui ', # 0x9d
'Pu ', # 0x9e
'Jing ', # 0x9f
'Fan ', # 0xa0
'Jin ', # 0xa1
'Liu ', # 0xa2
'Ji ', # 0xa3
'[?] ', # 0xa4
'Jing ', # 0xa5
'Ai ', # 0xa6
'Bi ', # 0xa7
'Can ', # 0xa8
'Qu ', # 0xa9
'Zao ', # 0xaa
'Dang ', # 0xab
'Jiao ', # 0xac
'Gun ', # 0xad
'Tan ', # 0xae
'Hui ', # 0xaf
'Huan ', # 0xb0
'Se ', # 0xb1
'Sui ', # 0xb2
'Tian ', # 0xb3
'[?] ', # 0xb4
'Yu ', # 0xb5
'Jin ', # 0xb6
'Lu ', # 0xb7
'Bin ', # 0xb8
'Shou ', # 0xb9
'Wen ', # 0xba
'Zui ', # 0xbb
'Lan ', # 0xbc
'Xi ', # 0xbd
'Ji ', # 0xbe
'Xuan ', # 0xbf
'Ruan ', # 0xc0
'Huo ', # 0xc1
'Gai ', # 0xc2
'Lei ', # 0xc3
'Du ', # 0xc4
'Li ', # 0xc5
'Zhi ', # 0xc6
'Rou ', # 0xc7
'Li ', # 0xc8
'Zan ', # 0xc9
'Qiong ', # 0xca
'Zhe ', # 0xcb
'Gui ', # 0xcc
'Sui ', # 0xcd
'La ', # 0xce
'Long ', # 0xcf
'Lu ', # 0xd0
'Li ', # 0xd1
'Zan ', # 0xd2
'Lan ', # 0xd3
'Ying ', # 0xd4
'Mi ', # 0xd5
'Xiang ', # 0xd6
'Xi ', # 0xd7
'Guan ', # 0xd8
'Dao ', # 0xd9
'Zan ', # 0xda
'Huan ', # 0xdb
'Gua ', # 0xdc
'Bo ', # 0xdd
'Die ', # 0xde
'Bao ', # 0xdf
'Hu ', # 0xe0
'Zhi ', # 0xe1
'Piao ', # 0xe2
'Ban ', # 0xe3
'Rang ', # 0xe4
'Li ', # 0xe5
'Wa ', # 0xe6
'Dekaguramu ', # 0xe7
'Jiang ', # 0xe8
'Qian ', # 0xe9
'Fan ', # 0xea
'Pen ', # 0xeb
'Fang ', # 0xec
'Dan ', # 0xed
'Weng ', # 0xee
'Ou ', # 0xef
'Deshiguramu ', # 0xf0
'Miriguramu ', # 0xf1
'Thon ', # 0xf2
'Hu ', # 0xf3
'Ling ', # 0xf4
'Yi ', # 0xf5
'Ping ', # 0xf6
'Ci ', # 0xf7
'Hekutogura ', # 0xf8
'Juan ', # 0xf9
'Chang ', # 0xfa
'Chi ', # 0xfb
'Sarake ', # 0xfc
'Dang ', # 0xfd
'Meng ', # 0xfe
'Pou ', # 0xff
)
|
mit
|
mancoast/CPythonPyc_test
|
cpython/253_test_hotshot.py
|
29
|
4199
|
import hotshot
import hotshot.log
import os
import pprint
import unittest
from test import test_support
from hotshot.log import ENTER, EXIT, LINE
def shortfilename(fn):
# We use a really shortened filename since an exact match is made,
# and the source may be either a Python source file or a
# pre-compiled bytecode file.
if fn:
return os.path.splitext(os.path.basename(fn))[0]
else:
return fn
class UnlinkingLogReader(hotshot.log.LogReader):
"""Extend the LogReader so the log file is unlinked when we're
done with it."""
def __init__(self, logfn):
self.__logfn = logfn
hotshot.log.LogReader.__init__(self, logfn)
def next(self, index=None):
try:
return hotshot.log.LogReader.next(self)
except StopIteration:
self.close()
os.unlink(self.__logfn)
raise
class HotShotTestCase(unittest.TestCase):
def new_profiler(self, lineevents=0, linetimings=1):
self.logfn = test_support.TESTFN
return hotshot.Profile(self.logfn, lineevents, linetimings)
def get_logreader(self):
return UnlinkingLogReader(self.logfn)
def get_events_wotime(self):
L = []
for event in self.get_logreader():
what, (filename, lineno, funcname), tdelta = event
L.append((what, (shortfilename(filename), lineno, funcname)))
return L
def check_events(self, expected):
events = self.get_events_wotime()
if events != expected:
self.fail(
"events did not match expectation; got:\n%s\nexpected:\n%s"
% (pprint.pformat(events), pprint.pformat(expected)))
def run_test(self, callable, events, profiler=None):
if profiler is None:
profiler = self.new_profiler()
self.failUnless(not profiler._prof.closed)
profiler.runcall(callable)
self.failUnless(not profiler._prof.closed)
profiler.close()
self.failUnless(profiler._prof.closed)
self.check_events(events)
def test_addinfo(self):
def f(p):
p.addinfo("test-key", "test-value")
profiler = self.new_profiler()
profiler.runcall(f, profiler)
profiler.close()
log = self.get_logreader()
info = log._info
list(log)
self.failUnless(info["test-key"] == ["test-value"])
def test_line_numbers(self):
def f():
y = 2
x = 1
def g():
f()
f_lineno = f.func_code.co_firstlineno
g_lineno = g.func_code.co_firstlineno
events = [(ENTER, ("test_hotshot", g_lineno, "g")),
(LINE, ("test_hotshot", g_lineno+1, "g")),
(ENTER, ("test_hotshot", f_lineno, "f")),
(LINE, ("test_hotshot", f_lineno+1, "f")),
(LINE, ("test_hotshot", f_lineno+2, "f")),
(EXIT, ("test_hotshot", f_lineno, "f")),
(EXIT, ("test_hotshot", g_lineno, "g")),
]
self.run_test(g, events, self.new_profiler(lineevents=1))
def test_start_stop(self):
# Make sure we don't return NULL in the start() and stop()
# methods when there isn't an error. Bug in 2.2 noted by
# Anthony Baxter.
profiler = self.new_profiler()
profiler.start()
profiler.stop()
profiler.close()
os.unlink(self.logfn)
def test_bad_sys_path(self):
import sys
import os
orig_path = sys.path
coverage = hotshot._hotshot.coverage
try:
# verify we require a list for sys.path
sys.path = 'abc'
self.assertRaises(RuntimeError, coverage, test_support.TESTFN)
# verify that we require sys.path exists
del sys.path
self.assertRaises(RuntimeError, coverage, test_support.TESTFN)
finally:
sys.path = orig_path
if os.path.exists(test_support.TESTFN):
os.remove(test_support.TESTFN)
def test_main():
test_support.run_unittest(HotShotTestCase)
if __name__ == "__main__":
test_main()
|
gpl-3.0
|
tmenjo/cinder-2015.1.1
|
cinder/tests/test_rbd.py
|
1
|
50268
|
# Copyright 2012 Josh Durgin
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import os
import tempfile
import mock
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import units
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import test
from cinder.tests.image import fake as fake_image
from cinder.tests import test_volume
from cinder.volume import configuration as conf
import cinder.volume.drivers.rbd as driver
from cinder.volume.flows.manager import create_volume
LOG = logging.getLogger(__name__)
# This is used to collect raised exceptions so that tests may check what was
# raised.
# NOTE: this must be initialised in test setUp().
RAISED_EXCEPTIONS = []
class MockException(Exception):
def __init__(self, *args, **kwargs):
RAISED_EXCEPTIONS.append(self.__class__)
class MockImageNotFoundException(MockException):
"""Used as mock for rbd.ImageNotFound."""
class MockImageBusyException(MockException):
"""Used as mock for rbd.ImageBusy."""
class MockImageExistsException(MockException):
"""Used as mock for rbd.ImageExists."""
def common_mocks(f):
"""Decorator to set mocks common to all tests.
The point of doing these mocks here is so that we don't accidentally set
mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
@mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy')
@mock.patch('cinder.volume.drivers.rbd.RADOSClient')
@mock.patch('cinder.backup.drivers.ceph.rbd')
@mock.patch('cinder.backup.drivers.ceph.rados')
def _common_inner_inner2(mock_rados, mock_rbd, mock_client,
mock_proxy):
inst.mock_rbd = mock_rbd
inst.mock_rados = mock_rados
inst.mock_client = mock_client
inst.mock_proxy = mock_proxy
inst.mock_rbd.RBD.Error = Exception
inst.mock_rados.Error = Exception
inst.mock_rbd.ImageBusy = MockImageBusyException
inst.mock_rbd.ImageNotFound = MockImageNotFoundException
inst.mock_rbd.ImageExists = MockImageExistsException
inst.driver.rbd = inst.mock_rbd
inst.driver.rados = inst.mock_rados
return f(inst, *args, **kwargs)
return _common_inner_inner2()
return _common_inner_inner1
CEPH_MON_DUMP = """dumped monmap epoch 1
{ "epoch": 1,
"fsid": "33630410-6d93-4d66-8e42-3b953cf194aa",
"modified": "2013-05-22 17:44:56.343618",
"created": "2013-05-22 17:44:56.343618",
"mons": [
{ "rank": 0,
"name": "a",
"addr": "[::1]:6789\/0"},
{ "rank": 1,
"name": "b",
"addr": "[::1]:6790\/0"},
{ "rank": 2,
"name": "c",
"addr": "[::1]:6791\/0"},
{ "rank": 3,
"name": "d",
"addr": "127.0.0.1:6792\/0"},
{ "rank": 4,
"name": "e",
"addr": "example.com:6791\/0"}],
"quorum": [
0,
1,
2]}
"""
class RBDTestCase(test.TestCase):
def setUp(self):
global RAISED_EXCEPTIONS
RAISED_EXCEPTIONS = []
super(RBDTestCase, self).setUp()
self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.volume_tmp_dir = None
self.cfg.image_conversion_dir = None
self.cfg.rbd_pool = 'rbd'
self.cfg.rbd_ceph_conf = None
self.cfg.rbd_secret_uuid = None
self.cfg.rbd_user = None
self.cfg.volume_dd_blocksize = '1M'
self.cfg.rbd_store_chunk_size = 4
mock_exec = mock.Mock()
mock_exec.return_value = ('', '')
self.driver = driver.RBDDriver(execute=mock_exec,
configuration=self.cfg)
self.driver.set_initialized()
self.volume_name = u'volume-00000001'
self.snapshot_name = u'snapshot-00000001'
self.volume_size = 1
self.volume = dict(name=self.volume_name, size=self.volume_size)
self.snapshot = dict(volume_name=self.volume_name,
name=self.snapshot_name)
@common_mocks
def test_create_volume(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
self.driver.create_volume(self.volume)
chunk_size = self.cfg.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
args = [client.ioctx, str(self.volume_name),
self.volume_size * units.Gi, order]
kwargs = {'old_format': False,
'features': client.features}
self.mock_rbd.RBD.return_value.create.assert_called_once_with(
*args, **kwargs)
client.__enter__.assert_called_once_with()
client.__exit__.assert_called_once_with(None, None, None)
@common_mocks
def test_manage_existing_get_size(self):
with mock.patch.object(self.driver.rbd.Image(), 'size') as \
mock_rbd_image_size:
with mock.patch.object(self.driver.rbd.Image(), 'close') \
as mock_rbd_image_close:
mock_rbd_image_size.return_value = 2 * units.Gi
existing_ref = {'source-name': self.volume_name}
return_size = self.driver.manage_existing_get_size(
self.volume,
existing_ref)
self.assertEqual(2, return_size)
mock_rbd_image_size.assert_called_once_with()
mock_rbd_image_close.assert_called_once_with()
@common_mocks
def test_manage_existing_get_invalid_size(self):
with mock.patch.object(self.driver.rbd.Image(), 'size') as \
mock_rbd_image_size:
with mock.patch.object(self.driver.rbd.Image(), 'close') \
as mock_rbd_image_close:
mock_rbd_image_size.return_value = 'abcd'
existing_ref = {'source-name': self.volume_name}
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.manage_existing_get_size,
self.volume, existing_ref)
mock_rbd_image_size.assert_called_once_with()
mock_rbd_image_close.assert_called_once_with()
@common_mocks
def test_manage_existing(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
with mock.patch.object(self.driver.rbd.RBD(), 'rename') as \
mock_rbd_image_rename:
exist_volume = 'vol-exist'
existing_ref = {'source-name': exist_volume}
mock_rbd_image_rename.return_value = 0
self.driver.manage_existing(self.volume, existing_ref)
mock_rbd_image_rename.assert_called_with(
client.ioctx,
exist_volume,
self.volume_name)
@common_mocks
def test_manage_existing_with_exist_rbd_image(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
self.mock_rbd.RBD.return_value.rename.side_effect = (
MockImageExistsException)
exist_volume = 'vol-exist'
existing_ref = {'source-name': exist_volume}
self.assertRaises(self.mock_rbd.ImageExists,
self.driver.manage_existing,
self.volume, existing_ref)
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS,
[self.mock_rbd.ImageExists])
@common_mocks
def test_delete_backup_snaps(self):
self.driver.rbd.Image.remove_snap = mock.Mock()
with mock.patch.object(self.driver, '_get_backup_snaps') as \
mock_get_backup_snaps:
mock_get_backup_snaps.return_value = [{'name': 'snap1'}]
rbd_image = self.driver.rbd.Image()
self.driver._delete_backup_snaps(rbd_image)
mock_get_backup_snaps.assert_called_once_with(rbd_image)
self.assertTrue(
self.driver.rbd.Image.return_value.remove_snap.called)
@common_mocks
def test_delete_volume(self):
client = self.mock_client.return_value
self.driver.rbd.Image.return_value.list_snaps.return_value = []
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
with mock.patch.object(self.driver, '_delete_backup_snaps') as \
mock_delete_backup_snaps:
mock_get_clone_info.return_value = (None, None, None)
self.driver.delete_volume(self.volume)
mock_get_clone_info.assert_called_once_with(
self.mock_rbd.Image.return_value,
self.volume_name,
None)
(self.driver.rbd.Image.return_value
.list_snaps.assert_called_once_with())
client.__enter__.assert_called_once_with()
client.__exit__.assert_called_once_with(None, None, None)
mock_delete_backup_snaps.assert_called_once_with(
self.mock_rbd.Image.return_value)
self.assertFalse(
self.driver.rbd.Image.return_value.unprotect_snap.called)
self.assertEqual(
1, self.driver.rbd.RBD.return_value.remove.call_count)
@common_mocks
def delete_volume_not_found(self):
self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound
self.assertIsNone(self.driver.delete_volume(self.volume))
self.mock_rbd.Image.assert_called_once_with()
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound])
@common_mocks
def test_delete_busy_volume(self):
self.mock_rbd.Image.return_value.list_snaps.return_value = []
self.mock_rbd.RBD.return_value.remove.side_effect = (
self.mock_rbd.ImageBusy)
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
mock_get_clone_info.return_value = (None, None, None)
with mock.patch.object(self.driver, '_delete_backup_snaps') as \
mock_delete_backup_snaps:
with mock.patch.object(driver, 'RADOSClient') as \
mock_rados_client:
self.assertRaises(exception.VolumeIsBusy,
self.driver.delete_volume, self.volume)
mock_get_clone_info.assert_called_once_with(
self.mock_rbd.Image.return_value,
self.volume_name,
None)
(self.mock_rbd.Image.return_value.list_snaps
.assert_called_once_with())
mock_rados_client.assert_called_once_with(self.driver)
mock_delete_backup_snaps.assert_called_once_with(
self.mock_rbd.Image.return_value)
self.assertFalse(
self.mock_rbd.Image.return_value.unprotect_snap.called)
self.assertEqual(
1, self.mock_rbd.RBD.return_value.remove.call_count)
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS,
[self.mock_rbd.ImageBusy])
@common_mocks
def test_delete_volume_not_found(self):
self.mock_rbd.Image.return_value.list_snaps.return_value = []
self.mock_rbd.RBD.return_value.remove.side_effect = (
self.mock_rbd.ImageNotFound)
with mock.patch.object(self.driver, '_get_clone_info') as \
mock_get_clone_info:
mock_get_clone_info.return_value = (None, None, None)
with mock.patch.object(self.driver, '_delete_backup_snaps') as \
mock_delete_backup_snaps:
with mock.patch.object(driver, 'RADOSClient') as \
mock_rados_client:
self.assertIsNone(self.driver.delete_volume(self.volume))
mock_get_clone_info.assert_called_once_with(
self.mock_rbd.Image.return_value,
self.volume_name,
None)
(self.mock_rbd.Image.return_value.list_snaps
.assert_called_once_with())
mock_rados_client.assert_called_once_with(self.driver)
mock_delete_backup_snaps.assert_called_once_with(
self.mock_rbd.Image.return_value)
self.assertFalse(
self.mock_rbd.Image.return_value.unprotect_snap.called)
self.assertEqual(
1, self.mock_rbd.RBD.return_value.remove.call_count)
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS,
[self.mock_rbd.ImageNotFound])
@common_mocks
def test_create_snapshot(self):
proxy = self.mock_proxy.return_value
proxy.__enter__.return_value = proxy
self.driver.create_snapshot(self.snapshot)
args = [str(self.snapshot_name)]
proxy.create_snap.assert_called_with(*args)
proxy.protect_snap.assert_called_with(*args)
@common_mocks
def test_delete_snapshot(self):
proxy = self.mock_proxy.return_value
proxy.__enter__.return_value = proxy
self.driver.delete_snapshot(self.snapshot)
args = [str(self.snapshot_name)]
proxy.remove_snap.assert_called_with(*args)
proxy.unprotect_snap.assert_called_with(*args)
@common_mocks
def test_get_clone_info(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name))
volume.parent_info.return_value = parent_info
info = self.driver._get_clone_info(volume, self.volume_name)
self.assertEqual(info, parent_info)
self.assertFalse(volume.set_snap.called)
volume.parent_info.assert_called_once_with()
@common_mocks
def test_get_clone_info_w_snap(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name))
volume.parent_info.return_value = parent_info
snapshot = self.mock_rbd.ImageSnapshot()
info = self.driver._get_clone_info(volume, self.volume_name,
snap=snapshot)
self.assertEqual(info, parent_info)
self.assertEqual(volume.set_snap.call_count, 2)
volume.parent_info.assert_called_once_with()
@common_mocks
def test_get_clone_info_w_exception(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
volume.parent_info.side_effect = self.mock_rbd.ImageNotFound
snapshot = self.mock_rbd.ImageSnapshot()
info = self.driver._get_clone_info(volume, self.volume_name,
snap=snapshot)
self.assertEqual(info, (None, None, None))
self.assertEqual(volume.set_snap.call_count, 2)
volume.parent_info.assert_called_once_with()
# Make sure the exception was raised
self.assertEqual(RAISED_EXCEPTIONS, [self.mock_rbd.ImageNotFound])
@common_mocks
def test_get_clone_info_deleted_volume(self):
volume = self.mock_rbd.Image()
volume.set_snap = mock.Mock()
volume.parent_info = mock.Mock()
parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_name))
volume.parent_info.return_value = parent_info
info = self.driver._get_clone_info(volume,
"%s.deleted" % (self.volume_name))
self.assertEqual(info, parent_info)
self.assertFalse(volume.set_snap.called)
volume.parent_info.assert_called_once_with()
@common_mocks
def test_create_cloned_volume_same_size(self):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 2
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
with mock.patch.object(self.driver, '_resize') as mock_resize:
mock_get_clone_depth.return_value = 1
self.driver.create_cloned_volume({'name': dst_name,
'size': 10},
{'name': src_name,
'size': 10})
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join((dst_name,
'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join((dst_name,
'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
self.mock_rbd.Image.return_value.close \
.assert_called_once_with()
self.assertTrue(mock_get_clone_depth.called)
self.assertEqual(
0, mock_resize.call_count)
@common_mocks
def test_create_cloned_volume_different_size(self):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 2
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
with mock.patch.object(self.driver, '_resize') as mock_resize:
mock_get_clone_depth.return_value = 1
self.driver.create_cloned_volume({'name': dst_name,
'size': 20},
{'name': src_name,
'size': 10})
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join((dst_name,
'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join((dst_name,
'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
self.mock_rbd.Image.return_value.close \
.assert_called_once_with()
self.assertTrue(mock_get_clone_depth.called)
self.assertEqual(
1, mock_resize.call_count)
@common_mocks
def test_create_cloned_volume_w_flatten(self):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 1
self.mock_rbd.RBD.return_value.clone.side_effect = (
self.mock_rbd.RBD.Error)
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
mock_get_clone_depth.return_value = 1
self.assertRaises(self.mock_rbd.RBD.Error,
self.driver.create_cloned_volume,
dict(name=dst_name), dict(name=src_name))
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
(self.mock_rbd.Image.return_value.unprotect_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.remove_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
self.mock_rbd.Image.return_value.close.assert_called_once_with()
self.assertTrue(mock_get_clone_depth.called)
@common_mocks
def test_create_cloned_volume_w_clone_exception(self):
src_name = u'volume-00000001'
dst_name = u'volume-00000002'
self.cfg.rbd_max_clone_depth = 2
self.mock_rbd.RBD.return_value.clone.side_effect = (
self.mock_rbd.RBD.Error)
with mock.patch.object(self.driver, '_get_clone_depth') as \
mock_get_clone_depth:
# Try with no flatten required
mock_get_clone_depth.return_value = 1
self.assertRaises(self.mock_rbd.RBD.Error,
self.driver.create_cloned_volume,
{'name': dst_name}, {'name': src_name})
(self.mock_rbd.Image.return_value.create_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.protect_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
self.assertEqual(
1, self.mock_rbd.RBD.return_value.clone.call_count)
(self.mock_rbd.Image.return_value.unprotect_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
(self.mock_rbd.Image.return_value.remove_snap
.assert_called_once_with('.'.join((dst_name, 'clone_snap'))))
self.mock_rbd.Image.return_value.close.assert_called_once_with()
@common_mocks
def test_good_locations(self):
locations = ['rbd://fsid/pool/image/snap',
'rbd://%2F/%2F/%2F/%2F', ]
map(self.driver._parse_location, locations)
@common_mocks
def test_bad_locations(self):
locations = ['rbd://image',
'http://path/to/somewhere/else',
'rbd://image/extra',
'rbd://image/',
'rbd://fsid/pool/image/',
'rbd://fsid/pool/image/snap/',
'rbd://///', ]
for loc in locations:
self.assertRaises(exception.ImageUnacceptable,
self.driver._parse_location,
loc)
self.assertFalse(
self.driver._is_cloneable(loc, {'disk_format': 'raw'}))
@common_mocks
def test_cloneable(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
info = {'disk_format': 'raw'}
self.assertTrue(self.driver._is_cloneable(location, info))
self.assertTrue(mock_get_fsid.called)
@common_mocks
def test_uncloneable_different_fsid(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://def/pool/image/snap'
self.assertFalse(
self.driver._is_cloneable(location, {'disk_format': 'raw'}))
self.assertTrue(mock_get_fsid.called)
@common_mocks
def test_uncloneable_unreadable(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
self.driver.rbd.Error = Exception
self.mock_proxy.side_effect = Exception
args = [location, {'disk_format': 'raw'}]
self.assertFalse(self.driver._is_cloneable(*args))
self.assertEqual(1, self.mock_proxy.call_count)
self.assertTrue(mock_get_fsid.called)
@common_mocks
def test_uncloneable_bad_format(self):
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = 'abc'
location = 'rbd://abc/pool/image/snap'
formats = ['qcow2', 'vmdk', 'vdi']
for f in formats:
self.assertFalse(
self.driver._is_cloneable(location, {'disk_format': f}))
self.assertTrue(mock_get_fsid.called)
def _copy_image(self):
with mock.patch.object(tempfile, 'NamedTemporaryFile'):
with mock.patch.object(os.path, 'exists') as mock_exists:
mock_exists.return_value = True
with mock.patch.object(image_utils, 'fetch_to_raw'):
with mock.patch.object(self.driver, 'delete_volume'):
with mock.patch.object(self.driver, '_resize'):
mock_image_service = mock.MagicMock()
args = [None, {'name': 'test', 'size': 1},
mock_image_service, None]
self.driver.copy_image_to_volume(*args)
@common_mocks
def test_copy_image_no_volume_tmp(self):
self.cfg.volume_tmp_dir = None
self.cfg.image_conversion_dir = None
self._copy_image()
@common_mocks
def test_copy_image_volume_tmp(self):
self.cfg.volume_tmp_dir = None
self.cfg.image_conversion_dir = '/var/run/cinder/tmp'
self._copy_image()
@common_mocks
def test_update_volume_stats(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
client.cluster = mock.Mock()
client.cluster.mon_command = mock.Mock()
client.cluster.mon_command.return_value = (
0, '{"stats":{"total_bytes":64385286144,'
'"total_used_bytes":3289628672,"total_avail_bytes":61095657472},'
'"pools":[{"name":"rbd","id":2,"stats":{"kb_used":1510197,'
'"bytes_used":1546440971,"max_avail":28987613184,"objects":412}},'
'{"name":"volumes","id":3,"stats":{"kb_used":0,"bytes_used":0,'
'"max_avail":28987613184,"objects":0}}]}\n', '')
self.driver.configuration.safe_get = mock.Mock()
self.driver.configuration.safe_get.return_value = 'RBD'
expected = dict(
volume_backend_name='RBD',
vendor_name='Open Source',
driver_version=self.driver.VERSION,
storage_protocol='ceph',
total_capacity_gb=27,
free_capacity_gb=26,
reserved_percentage=0)
actual = self.driver.get_volume_stats(True)
client.cluster.mon_command.assert_called_once_with(
'{"prefix":"df", "format":"json"}', '')
self.assertDictMatch(expected, actual)
@common_mocks
def test_update_volume_stats_error(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
client.cluster = mock.Mock()
client.cluster.mon_command = mock.Mock()
client.cluster.mon_command.return_value = (22, '', '')
self.driver.configuration.safe_get = mock.Mock()
self.driver.configuration.safe_get.return_value = 'RBD'
expected = dict(volume_backend_name='RBD',
vendor_name='Open Source',
driver_version=self.driver.VERSION,
storage_protocol='ceph',
total_capacity_gb='unknown',
free_capacity_gb='unknown',
reserved_percentage=0)
actual = self.driver.get_volume_stats(True)
client.cluster.mon_command.assert_called_once_with(
'{"prefix":"df", "format":"json"}', '')
self.assertDictMatch(expected, actual)
@common_mocks
def test_get_mon_addrs(self):
with mock.patch.object(self.driver, '_execute') as mock_execute:
mock_execute.return_value = (CEPH_MON_DUMP, '')
hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com']
ports = ['6789', '6790', '6791', '6792', '6791']
self.assertEqual((hosts, ports), self.driver._get_mon_addrs())
@common_mocks
def test_initialize_connection(self):
hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com']
ports = ['6789', '6790', '6791', '6792', '6791']
with mock.patch.object(self.driver, '_get_mon_addrs') as \
mock_get_mon_addrs:
mock_get_mon_addrs.return_value = (hosts, ports)
expected = {
'driver_volume_type': 'rbd',
'data': {
'name': '%s/%s' % (self.cfg.rbd_pool,
self.volume_name),
'hosts': hosts,
'ports': ports,
'auth_enabled': False,
'auth_username': None,
'secret_type': 'ceph',
'secret_uuid': None, }
}
volume = dict(name=self.volume_name)
actual = self.driver.initialize_connection(volume, None)
self.assertDictMatch(expected, actual)
self.assertTrue(mock_get_mon_addrs.called)
@common_mocks
def test_clone(self):
src_pool = u'images'
src_image = u'image-name'
src_snap = u'snapshot-name'
client_stack = []
def mock__enter__(inst):
def _inner():
client_stack.append(inst)
return inst
return _inner
client = self.mock_client.return_value
# capture both rados client used to perform the clone
client.__enter__.side_effect = mock__enter__(client)
self.driver._clone(self.volume, src_pool, src_image, src_snap)
args = [client_stack[0].ioctx, str(src_image), str(src_snap),
client_stack[1].ioctx, str(self.volume_name)]
kwargs = {'features': client.features}
self.mock_rbd.RBD.return_value.clone.assert_called_once_with(
*args, **kwargs)
self.assertEqual(client.__enter__.call_count, 2)
@common_mocks
def test_extend_volume(self):
fake_size = '20'
fake_vol = {'project_id': 'testprjid', 'name': self.volume_name,
'size': fake_size,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
self.mox.StubOutWithMock(self.driver, '_resize')
size = int(fake_size) * units.Gi
self.driver._resize(fake_vol, size=size)
self.mox.ReplayAll()
self.driver.extend_volume(fake_vol, fake_size)
self.mox.VerifyAll()
@common_mocks
def test_retype(self):
context = {}
diff = {'encryption': {},
'extra_specs': {}}
fake_volume = {'name': 'testvolume',
'host': 'currenthost'}
fake_type = 'high-IOPS'
# no support for migration
host = {'host': 'anotherhost'}
self.assertFalse(self.driver.retype(context, fake_volume,
fake_type, diff, host))
host = {'host': 'currenthost'}
# no support for changing encryption
diff['encryption'] = {'non-empty': 'non-empty'}
self.assertFalse(self.driver.retype(context, fake_volume,
fake_type, diff, host))
diff['encryption'] = {}
# no support for changing extra_specs
diff['extra_specs'] = {'non-empty': 'non-empty'}
self.assertFalse(self.driver.retype(context, fake_volume,
fake_type, diff, host))
diff['extra_specs'] = {}
self.assertTrue(self.driver.retype(context, fake_volume,
fake_type, diff, host))
def test_rbd_volume_proxy_init(self):
mock_driver = mock.Mock(name='driver')
mock_driver._connect_to_rados.return_value = (None, None)
with driver.RBDVolumeProxy(mock_driver, self.volume_name):
self.assertEqual(1, mock_driver._connect_to_rados.call_count)
self.assertFalse(mock_driver._disconnect_from_rados.called)
self.assertEqual(1, mock_driver._disconnect_from_rados.call_count)
mock_driver.reset_mock()
snap = u'snapshot-name'
with driver.RBDVolumeProxy(mock_driver, self.volume_name,
snapshot=snap):
self.assertEqual(1, mock_driver._connect_to_rados.call_count)
self.assertFalse(mock_driver._disconnect_from_rados.called)
self.assertEqual(1, mock_driver._disconnect_from_rados.call_count)
@common_mocks
def test_connect_to_rados(self):
# Default
self.cfg.rados_connect_timeout = -1
self.mock_rados.Rados.return_value.open_ioctx.return_value = \
self.mock_rados.Rados.return_value.ioctx
# default configured pool
ret = self.driver._connect_to_rados()
self.assertTrue(self.mock_rados.Rados.return_value.connect.called)
# Expect no timeout if default is used
self.mock_rados.Rados.return_value.connect.assert_called_once_with()
self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called)
self.assertEqual(ret[1], self.mock_rados.Rados.return_value.ioctx)
self.mock_rados.Rados.return_value.open_ioctx.assert_called_with(
self.cfg.rbd_pool)
# different pool
ret = self.driver._connect_to_rados('alt_pool')
self.assertTrue(self.mock_rados.Rados.return_value.connect.called)
self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called)
self.assertEqual(ret[1], self.mock_rados.Rados.return_value.ioctx)
self.mock_rados.Rados.return_value.open_ioctx.assert_called_with(
'alt_pool')
# With timeout
self.cfg.rados_connect_timeout = 1
self.mock_rados.Rados.return_value.connect.reset_mock()
self.driver._connect_to_rados()
self.mock_rados.Rados.return_value.connect.assert_called_once_with(
timeout=1)
# error
self.mock_rados.Rados.return_value.open_ioctx.reset_mock()
self.mock_rados.Rados.return_value.shutdown.reset_mock()
self.mock_rados.Rados.return_value.open_ioctx.side_effect = (
self.mock_rados.Error)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._connect_to_rados)
self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called)
self.mock_rados.Rados.return_value.shutdown.assert_called_once_with()
class RBDImageIOWrapperTestCase(test.TestCase):
def setUp(self):
super(RBDImageIOWrapperTestCase, self).setUp()
self.meta = mock.Mock()
self.meta.user = 'mock_user'
self.meta.conf = 'mock_conf'
self.meta.pool = 'mock_pool'
self.meta.image = mock.Mock()
self.meta.image.read = mock.Mock()
self.meta.image.write = mock.Mock()
self.meta.image.size = mock.Mock()
self.mock_rbd_wrapper = driver.RBDImageIOWrapper(self.meta)
self.data_length = 1024
self.full_data = 'abcd' * 256
def test_init(self):
self.assertEqual(self.mock_rbd_wrapper._rbd_meta, self.meta)
self.assertEqual(self.mock_rbd_wrapper._offset, 0)
def test_inc_offset(self):
self.mock_rbd_wrapper._inc_offset(10)
self.mock_rbd_wrapper._inc_offset(10)
self.assertEqual(self.mock_rbd_wrapper._offset, 20)
def test_rbd_image(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_image, self.meta.image)
def test_rbd_user(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_user, self.meta.user)
def test_rbd_pool(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_conf, self.meta.conf)
def test_rbd_conf(self):
self.assertEqual(self.mock_rbd_wrapper.rbd_pool, self.meta.pool)
def test_read(self):
def mock_read(offset, length):
return self.full_data[offset:length]
self.meta.image.read.side_effect = mock_read
self.meta.image.size.return_value = self.data_length
data = self.mock_rbd_wrapper.read()
self.assertEqual(data, self.full_data)
data = self.mock_rbd_wrapper.read()
self.assertEqual(data, '')
self.mock_rbd_wrapper.seek(0)
data = self.mock_rbd_wrapper.read()
self.assertEqual(data, self.full_data)
self.mock_rbd_wrapper.seek(0)
data = self.mock_rbd_wrapper.read(10)
self.assertEqual(data, self.full_data[:10])
def test_write(self):
self.mock_rbd_wrapper.write(self.full_data)
self.assertEqual(self.mock_rbd_wrapper._offset, 1024)
def test_seekable(self):
self.assertTrue(self.mock_rbd_wrapper.seekable)
def test_seek(self):
self.assertEqual(self.mock_rbd_wrapper._offset, 0)
self.mock_rbd_wrapper.seek(10)
self.assertEqual(self.mock_rbd_wrapper._offset, 10)
self.mock_rbd_wrapper.seek(10)
self.assertEqual(self.mock_rbd_wrapper._offset, 10)
self.mock_rbd_wrapper.seek(10, 1)
self.assertEqual(self.mock_rbd_wrapper._offset, 20)
self.mock_rbd_wrapper.seek(0)
self.mock_rbd_wrapper.write(self.full_data)
self.meta.image.size.return_value = self.data_length
self.mock_rbd_wrapper.seek(0)
self.assertEqual(self.mock_rbd_wrapper._offset, 0)
self.mock_rbd_wrapper.seek(10, 2)
self.assertEqual(self.mock_rbd_wrapper._offset, self.data_length + 10)
self.mock_rbd_wrapper.seek(-10, 2)
self.assertEqual(self.mock_rbd_wrapper._offset, self.data_length - 10)
# test exceptions.
self.assertRaises(IOError, self.mock_rbd_wrapper.seek, 0, 3)
self.assertRaises(IOError, self.mock_rbd_wrapper.seek, -1)
# offset should not have been changed by any of the previous
# operations.
self.assertEqual(self.mock_rbd_wrapper._offset, self.data_length - 10)
def test_tell(self):
self.assertEqual(self.mock_rbd_wrapper.tell(), 0)
self.mock_rbd_wrapper._inc_offset(10)
self.assertEqual(self.mock_rbd_wrapper.tell(), 10)
def test_flush(self):
with mock.patch.object(driver, 'LOG') as mock_logger:
self.meta.image.flush = mock.Mock()
self.mock_rbd_wrapper.flush()
self.meta.image.flush.assert_called_once_with()
self.meta.image.flush.reset_mock()
# this should be caught and logged silently.
self.meta.image.flush.side_effect = AttributeError
self.mock_rbd_wrapper.flush()
self.meta.image.flush.assert_called_once_with()
msg = _("flush() not supported in this version of librbd")
mock_logger.warning.assert_called_with(msg)
def test_fileno(self):
self.assertRaises(IOError, self.mock_rbd_wrapper.fileno)
def test_close(self):
self.mock_rbd_wrapper.close()
class ManagedRBDTestCase(test_volume.DriverTestCase):
driver_name = "cinder.volume.drivers.rbd.RBDDriver"
def setUp(self):
super(ManagedRBDTestCase, self).setUp()
# TODO(dosaboy): need to remove dependency on mox stubs here once
# image.fake has been converted to mock.
fake_image.stub_out_image_service(self.stubs)
self.volume.driver.set_initialized()
self.volume.stats = {'allocated_capacity_gb': 0,
'pools': {}}
self.called = []
def _create_volume_from_image(self, expected_status, raw=False,
clone_error=False):
"""Try to clone a volume from an image, and check the status
afterwards.
NOTE: if clone_error is True we force the image type to raw otherwise
clone_image is not called
"""
volume_id = 1
# See tests.image.fake for image types.
if raw:
image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
else:
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
# creating volume testdata
db.volume_create(self.context,
{'id': volume_id,
'updated_at': timeutils.utcnow(),
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'instance_uuid': None,
'host': 'dummy'})
try:
if not clone_error:
self.volume.create_volume(self.context,
volume_id,
image_id=image_id)
else:
self.assertRaises(exception.CinderException,
self.volume.create_volume,
self.context,
volume_id,
image_id=image_id)
volume = db.volume_get(self.context, volume_id)
self.assertEqual(volume['status'], expected_status)
finally:
# cleanup
db.volume_destroy(self.context, volume_id)
def test_create_vol_from_image_status_available(self):
"""Clone raw image then verify volume is in available state."""
def _mock_clone_image(context, volume, image_location,
image_meta, image_service):
return {'provider_location': None}, True
with mock.patch.object(self.volume.driver, 'clone_image') as \
mock_clone_image:
mock_clone_image.side_effect = _mock_clone_image
with mock.patch.object(self.volume.driver, 'create_volume') as \
mock_create:
with mock.patch.object(create_volume.CreateVolumeFromSpecTask,
'_copy_image_to_volume') as mock_copy:
self._create_volume_from_image('available', raw=True)
self.assertFalse(mock_copy.called)
self.assertTrue(mock_clone_image.called)
self.assertFalse(mock_create.called)
def test_create_vol_from_non_raw_image_status_available(self):
"""Clone non-raw image then verify volume is in available state."""
def _mock_clone_image(context, volume, image_location,
image_meta, image_service):
return {'provider_location': None}, False
with mock.patch.object(self.volume.driver, 'clone_image') as \
mock_clone_image:
mock_clone_image.side_effect = _mock_clone_image
with mock.patch.object(self.volume.driver, 'create_volume') as \
mock_create:
with mock.patch.object(create_volume.CreateVolumeFromSpecTask,
'_copy_image_to_volume') as mock_copy:
self._create_volume_from_image('available', raw=False)
self.assertTrue(mock_copy.called)
self.assertTrue(mock_clone_image.called)
self.assertTrue(mock_create.called)
def test_create_vol_from_image_status_error(self):
"""Fail to clone raw image then verify volume is in error state."""
with mock.patch.object(self.volume.driver, 'clone_image') as \
mock_clone_image:
mock_clone_image.side_effect = exception.CinderException
with mock.patch.object(self.volume.driver, 'create_volume'):
with mock.patch.object(create_volume.CreateVolumeFromSpecTask,
'_copy_image_to_volume') as mock_copy:
self._create_volume_from_image('error', raw=True,
clone_error=True)
self.assertFalse(mock_copy.called)
self.assertTrue(mock_clone_image.called)
self.assertFalse(self.volume.driver.create_volume.called)
def test_clone_failure(self):
driver = self.volume.driver
with mock.patch.object(driver, '_is_cloneable', lambda *args: False):
image_loc = (mock.Mock(), None)
actual = driver.clone_image(mock.Mock(),
mock.Mock(),
image_loc,
{},
mock.Mock())
self.assertEqual(({}, False), actual)
self.assertEqual(({}, False),
driver.clone_image('', object(), None, {}, ''))
def test_clone_success(self):
expected = ({'provider_location': None}, True)
driver = self.volume.driver
with mock.patch.object(self.volume.driver, '_is_cloneable') as \
mock_is_cloneable:
mock_is_cloneable.return_value = True
with mock.patch.object(self.volume.driver, '_clone') as \
mock_clone:
with mock.patch.object(self.volume.driver, '_resize') as \
mock_resize:
image_loc = ('rbd://fee/fi/fo/fum', None)
volume = {'name': 'vol1'}
actual = driver.clone_image(mock.Mock(),
volume,
image_loc,
{'disk_format': 'raw',
'id': 'id.foo'},
mock.Mock())
self.assertEqual(expected, actual)
mock_clone.assert_called_once_with(volume,
'fi', 'fo', 'fum')
mock_resize.assert_called_once_with(volume)
def test_clone_multilocation_success(self):
expected = ({'provider_location': None}, True)
driver = self.volume.driver
def cloneable_side_effect(url_location, image_meta):
return url_location == 'rbd://fee/fi/fo/fum'
with mock.patch.object(self.volume.driver, '_is_cloneable') \
as mock_is_cloneable, \
mock.patch.object(self.volume.driver, '_clone') as mock_clone, \
mock.patch.object(self.volume.driver, '_resize') \
as mock_resize:
mock_is_cloneable.side_effect = cloneable_side_effect
image_loc = ('rbd://bee/bi/bo/bum',
[{'url': 'rbd://bee/bi/bo/bum'},
{'url': 'rbd://fee/fi/fo/fum'}])
volume = {'name': 'vol1'}
image_meta = mock.sentinel.image_meta
image_service = mock.sentinel.image_service
actual = driver.clone_image(self.context,
volume,
image_loc,
image_meta,
image_service)
self.assertEqual(expected, actual)
self.assertEqual(2, mock_is_cloneable.call_count)
mock_clone.assert_called_once_with(volume,
'fi', 'fo', 'fum')
mock_is_cloneable.assert_called_with('rbd://fee/fi/fo/fum',
image_meta)
mock_resize.assert_called_once_with(volume)
def test_clone_multilocation_failure(self):
expected = ({}, False)
driver = self.volume.driver
with mock.patch.object(driver, '_is_cloneable', return_value=False) \
as mock_is_cloneable, \
mock.patch.object(self.volume.driver, '_clone') as mock_clone, \
mock.patch.object(self.volume.driver, '_resize') \
as mock_resize:
image_loc = ('rbd://bee/bi/bo/bum',
[{'url': 'rbd://bee/bi/bo/bum'},
{'url': 'rbd://fee/fi/fo/fum'}])
volume = {'name': 'vol1'}
image_meta = mock.sentinel.image_meta
image_service = mock.sentinel.image_service
actual = driver.clone_image(self.context,
volume,
image_loc,
image_meta,
image_service)
self.assertEqual(expected, actual)
self.assertEqual(2, mock_is_cloneable.call_count)
mock_is_cloneable.assert_any_call('rbd://bee/bi/bo/bum',
image_meta)
mock_is_cloneable.assert_any_call('rbd://fee/fi/fo/fum',
image_meta)
self.assertFalse(mock_clone.called)
self.assertFalse(mock_resize.called)
|
apache-2.0
|
axbaretto/beam
|
build/fbcode_builder/utils.py
|
17
|
2883
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
'Miscellaneous utility functions.'
import itertools
import logging
import os
import shutil
import subprocess
import sys
from contextlib import contextmanager
def recursively_flatten_list(l):
return itertools.chain.from_iterable(
(recursively_flatten_list(i) if type(i) is list else (i,))
for i in l
)
def run_command(*cmd, **kwargs):
'The stdout of most fbcode_builder utilities is meant to be parsed.'
logging.debug('Running: {0} with {1}'.format(cmd, kwargs))
kwargs['stdout'] = sys.stderr
subprocess.check_call(cmd, **kwargs)
@contextmanager
def make_temp_dir(d):
os.mkdir(d)
try:
yield d
finally:
shutil.rmtree(d, ignore_errors=True)
def _inner_read_config(path):
'''
Helper to read a named config file.
The grossness with the global is a workaround for this python bug:
https://bugs.python.org/issue21591
The bug prevents us from defining either a local function or a lambda
in the scope of read_fbcode_builder_config below.
'''
global _project_dir
full_path = os.path.join(_project_dir, path)
return read_fbcode_builder_config(full_path)
def read_fbcode_builder_config(filename):
# Allow one spec to read another
# When doing so, treat paths as relative to the config's project directory.
# _project_dir is a "local" for _inner_read_config; see the comments
# in that function for an explanation of the use of global.
global _project_dir
_project_dir = os.path.dirname(filename)
scope = {'read_fbcode_builder_config': _inner_read_config}
with open(filename) as config_file:
code = compile(config_file.read(), filename, mode='exec')
exec(code, scope)
return scope['config']
def steps_for_spec(builder, spec, processed_modules=None):
'''
Sets `builder` configuration, and returns all the builder steps
necessary to build `spec` and its dependencies.
Traverses the dependencies in depth-first order, honoring the sequencing
in each 'depends_on' list.
'''
if processed_modules is None:
processed_modules = set()
steps = []
for module in spec.get('depends_on', []):
if module not in processed_modules:
processed_modules.add(module)
steps.extend(steps_for_spec(
builder,
module.fbcode_builder_spec(builder),
processed_modules
))
steps.extend(spec.get('steps', []))
return steps
def build_fbcode_builder_config(config):
return lambda builder: builder.build(
steps_for_spec(builder, config['fbcode_builder_spec'](builder))
)
|
apache-2.0
|
appliedx/edx-platform
|
common/djangoapps/student/migrations/0006_expand_meta_field.py
|
188
|
9246
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'UserProfile.meta'
db.alter_column('auth_userprofile', 'meta', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'UserProfile.meta'
db.alter_column('auth_userprofile', 'meta', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
|
agpl-3.0
|
Root-Box/kernel_samsung_smdk4412
|
tools/perf/scripts/python/futex-contention.py
|
11261
|
1486
|
# futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
|
gpl-2.0
|
google/material-design-icons
|
update/venv/lib/python3.9/site-packages/pip/_vendor/urllib3/util/url.py
|
7
|
13981
|
from __future__ import absolute_import
import re
from collections import namedtuple
from ..exceptions import LocationParseError
from ..packages import six
url_attrs = ["scheme", "auth", "host", "port", "path", "query", "fragment"]
# We only want to normalize urls with an HTTP(S) scheme.
# urllib3 infers URLs without a scheme (None) to be http.
NORMALIZABLE_SCHEMES = ("http", "https", None)
# Almost all of these patterns were derived from the
# 'rfc3986' module: https://github.com/python-hyper/rfc3986
PERCENT_RE = re.compile(r"%[a-fA-F0-9]{2}")
SCHEME_RE = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+-]*:|/)")
URI_RE = re.compile(
r"^(?:([a-zA-Z][a-zA-Z0-9+.-]*):)?"
r"(?://([^\\/?#]*))?"
r"([^?#]*)"
r"(?:\?([^#]*))?"
r"(?:#(.*))?$",
re.UNICODE | re.DOTALL,
)
IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
HEX_PAT = "[0-9A-Fa-f]{1,4}"
LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT)
_subs = {"hex": HEX_PAT, "ls32": LS32_PAT}
_variations = [
# 6( h16 ":" ) ls32
"(?:%(hex)s:){6}%(ls32)s",
# "::" 5( h16 ":" ) ls32
"::(?:%(hex)s:){5}%(ls32)s",
# [ h16 ] "::" 4( h16 ":" ) ls32
"(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
# [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
"(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
# [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
"(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
# [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
"(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
# [ *4( h16 ":" ) h16 ] "::" ls32
"(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
# [ *5( h16 ":" ) h16 ] "::" h16
"(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
# [ *6( h16 ":" ) h16 ] "::"
"(?:(?:%(hex)s:){0,6}%(hex)s)?::",
]
UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~"
IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]"
REG_NAME_PAT = r"(?:[^\[\]%:/?#]|%[a-fA-F0-9]{2})*"
TARGET_RE = re.compile(r"^(/[^?#]*)(?:\?([^#]*))?(?:#.*)?$")
IPV4_RE = re.compile("^" + IPV4_PAT + "$")
IPV6_RE = re.compile("^" + IPV6_PAT + "$")
IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$")
BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT[2:-2] + "$")
ZONE_ID_RE = re.compile("(" + ZONE_ID_PAT + r")\]$")
SUBAUTHORITY_PAT = (u"^(?:(.*)@)?(%s|%s|%s)(?::([0-9]{0,5}))?$") % (
REG_NAME_PAT,
IPV4_PAT,
IPV6_ADDRZ_PAT,
)
SUBAUTHORITY_RE = re.compile(SUBAUTHORITY_PAT, re.UNICODE | re.DOTALL)
UNRESERVED_CHARS = set(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._-~"
)
SUB_DELIM_CHARS = set("!$&'()*+,;=")
USERINFO_CHARS = UNRESERVED_CHARS | SUB_DELIM_CHARS | {":"}
PATH_CHARS = USERINFO_CHARS | {"@", "/"}
QUERY_CHARS = FRAGMENT_CHARS = PATH_CHARS | {"?"}
class Url(namedtuple("Url", url_attrs)):
"""
Data structure for representing an HTTP URL. Used as a return value for
:func:`parse_url`. Both the scheme and host are normalized as they are
both case-insensitive according to RFC 3986.
"""
__slots__ = ()
def __new__(
cls,
scheme=None,
auth=None,
host=None,
port=None,
path=None,
query=None,
fragment=None,
):
if path and not path.startswith("/"):
path = "/" + path
if scheme is not None:
scheme = scheme.lower()
return super(Url, cls).__new__(
cls, scheme, auth, host, port, path, query, fragment
)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or "/"
if self.query is not None:
uri += "?" + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return "%s:%d" % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = u""
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + u"://"
if auth is not None:
url += auth + u"@"
if host is not None:
url += host
if port is not None:
url += u":" + str(port)
if path is not None:
url += path
if query is not None:
url += u"?" + query
if fragment is not None:
url += u"#" + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
.. deprecated:: 1.25
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, "", None
return s[:min_idx], s[min_idx + 1 :], min_delim
def _encode_invalid_chars(component, allowed_chars, encoding="utf-8"):
"""Percent-encodes a URI component without reapplying
onto an already percent-encoded component.
"""
if component is None:
return component
component = six.ensure_text(component)
# Normalize existing percent-encoded bytes.
# Try to see if the component we're encoding is already percent-encoded
# so we can skip all '%' characters but still encode all others.
component, percent_encodings = PERCENT_RE.subn(
lambda match: match.group(0).upper(), component
)
uri_bytes = component.encode("utf-8", "surrogatepass")
is_percent_encoded = percent_encodings == uri_bytes.count(b"%")
encoded_component = bytearray()
for i in range(0, len(uri_bytes)):
# Will return a single character bytestring on both Python 2 & 3
byte = uri_bytes[i : i + 1]
byte_ord = ord(byte)
if (is_percent_encoded and byte == b"%") or (
byte_ord < 128 and byte.decode() in allowed_chars
):
encoded_component += byte
continue
encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper()))
return encoded_component.decode(encoding)
def _remove_path_dot_segments(path):
# See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code
segments = path.split("/") # Turn the path into a list of segments
output = [] # Initialize the variable to use to store output
for segment in segments:
# '.' is the current directory, so ignore it, it is superfluous
if segment == ".":
continue
# Anything other than '..', should be appended to the output
elif segment != "..":
output.append(segment)
# In this case segment == '..', if we can, we should pop the last
# element
elif output:
output.pop()
# If the path starts with '/' and the output is empty or the first string
# is non-empty
if path.startswith("/") and (not output or output[0]):
output.insert(0, "")
# If the path starts with '/.' or '/..' ensure we add one more empty
# string to add a trailing '/'
if path.endswith(("/.", "/..")):
output.append("")
return "/".join(output)
def _normalize_host(host, scheme):
if host:
if isinstance(host, six.binary_type):
host = six.ensure_str(host)
if scheme in NORMALIZABLE_SCHEMES:
is_ipv6 = IPV6_ADDRZ_RE.match(host)
if is_ipv6:
match = ZONE_ID_RE.search(host)
if match:
start, end = match.span(1)
zone_id = host[start:end]
if zone_id.startswith("%25") and zone_id != "%25":
zone_id = zone_id[3:]
else:
zone_id = zone_id[1:]
zone_id = "%" + _encode_invalid_chars(zone_id, UNRESERVED_CHARS)
return host[:start].lower() + zone_id + host[end:]
else:
return host.lower()
elif not IPV4_RE.match(host):
return six.ensure_str(
b".".join([_idna_encode(label) for label in host.split(".")])
)
return host
def _idna_encode(name):
if name and any([ord(x) > 128 for x in name]):
try:
from pip._vendor import idna
except ImportError:
six.raise_from(
LocationParseError("Unable to parse URL without the 'idna' module"),
None,
)
try:
return idna.encode(name.lower(), strict=True, std3_rules=True)
except idna.IDNAError:
six.raise_from(
LocationParseError(u"Name '%s' is not a valid IDNA label" % name), None
)
return name.lower().encode("ascii")
def _encode_target(target):
"""Percent-encodes a request target so that there are no invalid characters"""
path, query = TARGET_RE.match(target).groups()
target = _encode_invalid_chars(path, PATH_CHARS)
query = _encode_invalid_chars(query, QUERY_CHARS)
if query is not None:
target += "?" + query
return target
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
This parser is RFC 3986 compliant.
The parser logic and helper functions are based heavily on
work done in the ``rfc3986`` module.
:param str url: URL to parse into a :class:`.Url` namedtuple.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
if not url:
# Empty
return Url()
source_url = url
if not SCHEME_RE.search(url):
url = "//" + url
try:
scheme, authority, path, query, fragment = URI_RE.match(url).groups()
normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES
if scheme:
scheme = scheme.lower()
if authority:
auth, host, port = SUBAUTHORITY_RE.match(authority).groups()
if auth and normalize_uri:
auth = _encode_invalid_chars(auth, USERINFO_CHARS)
if port == "":
port = None
else:
auth, host, port = None, None, None
if port is not None:
port = int(port)
if not (0 <= port <= 65535):
raise LocationParseError(url)
host = _normalize_host(host, scheme)
if normalize_uri and path:
path = _remove_path_dot_segments(path)
path = _encode_invalid_chars(path, PATH_CHARS)
if normalize_uri and query:
query = _encode_invalid_chars(query, QUERY_CHARS)
if normalize_uri and fragment:
fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)
except (ValueError, AttributeError):
return six.raise_from(LocationParseError(source_url), None)
# For the sake of backwards compatibility we put empty
# string values for path if there are any defined values
# beyond the path in the URL.
# TODO: Remove this when we break backwards compatibility.
if not path:
if query is not None or fragment is not None:
path = ""
else:
path = None
# Ensure that each part of the URL is a `str` for
# backwards compatibility.
if isinstance(url, six.text_type):
ensure_func = six.ensure_text
else:
ensure_func = six.ensure_str
def ensure_type(x):
return x if x is None else ensure_func(x)
return Url(
scheme=ensure_type(scheme),
auth=ensure_type(auth),
host=ensure_type(host),
port=port,
path=ensure_type(path),
query=ensure_type(query),
fragment=ensure_type(fragment),
)
def get_host(url):
"""
Deprecated. Use :func:`parse_url` instead.
"""
p = parse_url(url)
return p.scheme or "http", p.hostname, p.port
|
apache-2.0
|
andMYhacks/infosec_mentors_project
|
app/config.py
|
1
|
1971
|
# project/config.py
import os
# from dotenv import load_dotenv, find_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
# load_dotenv(find_dotenv())
class BaseConfig:
# Base configuration
SECRET_KEY = os.environ.get('APP_SECRET_KEY')
PASSWORD_SALT = os.environ.get('APP_PASSWORD_SALT')
DEBUG = False
BCRYPT_LOG_ROUNDS = 13
WTF_CSRF_ENABLED = True
DEBUG_TB_ENABLED = False
DEBUG_TB_INTERCEPT_REDIRECTS = False
# TODO: Switch Preferred URL Scheme
# PREFERRED_URL_SCHEME = 'https'
PREFERRED_URL_SCHEME = 'http'
# mail settings
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USE_SSL = False
# mail credentials
MAIL_USERNAME = os.environ.get('APP_MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('APP_MAIL_PASSWORD')
# mail account(s)
MAIL_DEFAULT_SENDER = os.environ.get('APP_MAIL_SENDER')
# redis server
CELERY_BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
@staticmethod
def init_app(app):
pass
class DevConfig(BaseConfig):
# Development configuration
DEBUG = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'database.sqlite')
DEBUG_TB_ENABLED = True
class ProdConfig(BaseConfig):
# Production configuration
DEBUG = False
SESSION_COOKIE_SECURE = True
SECRET_KEY = os.environ.get('APP_SECRET_KEY')
DB_NAME = os.environ.get('APP_DB_NAME')
DB_USER = os.environ.get('APP_DB_USER')
DB_PASSWORD = os.environ.get('APP_DB_PASSWORD')
SQLALCHEMY_DATABASE_URI = 'postgresql://' + DB_USER + ':' + DB_PASSWORD + '@localhost/' + DB_NAME
DEBUG_TB_ENABLED = False
STRIPE_SECRET_KEY = os.environ.get('APP_STRIPE_SECRET_KEY')
STRIPE_PUBLISHABLE_KEY = os.environ.get('APP_PUBLISHABLE_KEY')
config_type = {
'dev': DevConfig,
'prod': ProdConfig,
'defalt': DevConfig
}
|
gpl-3.0
|
gangadharkadam/vlinkfrappe
|
frappe/model/base_document.py
|
1
|
17661
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, sys
from frappe import _
from frappe.utils import cint, flt, now, cstr, strip_html, getdate, get_datetime, to_timedelta
from frappe.model import default_fields
from frappe.model.naming import set_new_name
from frappe.modules import load_doctype_module
from frappe.model import display_fieldtypes
_classes = {}
def get_controller(doctype):
"""Returns the **class** object of the given DocType.
For `custom` type, returns `frappe.model.document.Document`.
:param doctype: DocType name as string."""
from frappe.model.document import Document
if not doctype in _classes:
module_name, custom = frappe.db.get_value("DocType", doctype, ["module", "custom"]) \
or ["Core", False]
if custom:
_class = Document
else:
module = load_doctype_module(doctype, module_name)
classname = doctype.replace(" ", "").replace("-", "")
if hasattr(module, classname):
_class = getattr(module, classname)
if issubclass(_class, BaseDocument):
_class = getattr(module, classname)
else:
raise ImportError, doctype
else:
raise ImportError, doctype
_classes[doctype] = _class
return _classes[doctype]
class BaseDocument(object):
ignore_in_getter = ("doctype", "_meta", "meta", "_table_fields", "_valid_columns")
def __init__(self, d):
self.update(d)
self.dont_update_if_missing = []
if hasattr(self, "__setup__"):
self.__setup__()
@property
def meta(self):
if not hasattr(self, "_meta"):
self._meta = frappe.get_meta(self.doctype)
return self._meta
def update(self, d):
if "doctype" in d:
self.set("doctype", d.get("doctype"))
# first set default field values of base document
for key in default_fields:
if key in d:
self.set(key, d.get(key))
for key, value in d.iteritems():
self.set(key, value)
return self
def update_if_missing(self, d):
if isinstance(d, BaseDocument):
d = d.get_valid_dict()
if "doctype" in d:
self.set("doctype", d.get("doctype"))
for key, value in d.iteritems():
# dont_update_if_missing is a list of fieldnames, for which, you don't want to set default value
if (self.get(key) is None) and (value is not None) and (key not in self.dont_update_if_missing):
self.set(key, value)
def get_db_value(self, key):
return frappe.db.get_value(self.doctype, self.name, key)
def get(self, key=None, filters=None, limit=None, default=None):
if key:
if isinstance(key, dict):
return _filter(self.get_all_children(), key, limit=limit)
if filters:
if isinstance(filters, dict):
value = _filter(self.__dict__.get(key, []), filters, limit=limit)
else:
default = filters
filters = None
value = self.__dict__.get(key, default)
else:
value = self.__dict__.get(key, default)
if value is None and key not in self.ignore_in_getter \
and key in (d.fieldname for d in self.meta.get_table_fields()):
self.set(key, [])
value = self.__dict__.get(key)
return value
else:
return self.__dict__
def getone(self, key, filters=None):
return self.get(key, filters=filters, limit=1)[0]
def set(self, key, value, as_value=False):
if isinstance(value, list) and not as_value:
self.__dict__[key] = []
self.extend(key, value)
else:
self.__dict__[key] = value
def delete_key(self, key):
if key in self.__dict__:
del self.__dict__[key]
def append(self, key, value=None):
if value==None:
value={}
if isinstance(value, (dict, BaseDocument)):
if not self.__dict__.get(key):
self.__dict__[key] = []
value = self._init_child(value, key)
self.__dict__[key].append(value)
# reference parent document
value.parent_doc = self
return value
else:
raise ValueError, "Document attached to child table must be a dict or BaseDocument, not " + str(type(value))[1:-1]
def extend(self, key, value):
if isinstance(value, list):
for v in value:
self.append(key, v)
else:
raise ValueError
def remove(self, doc):
self.get(doc.parentfield).remove(doc)
def _init_child(self, value, key):
if not self.doctype:
return value
if not isinstance(value, BaseDocument):
if "doctype" not in value:
value["doctype"] = self.get_table_field_doctype(key)
if not value["doctype"]:
raise AttributeError, key
value = get_controller(value["doctype"])(value)
value.init_valid_columns()
value.parent = self.name
value.parenttype = self.doctype
value.parentfield = key
if not getattr(value, "idx", None):
value.idx = len(self.get(key) or []) + 1
if not getattr(value, "name", None):
value.__dict__['__islocal'] = 1
return value
def get_valid_dict(self):
d = {}
for fieldname in self.meta.get_valid_columns():
d[fieldname] = self.get(fieldname)
if d[fieldname]=="":
df = self.meta.get_field(fieldname)
if df and df.fieldtype in ("Datetime", "Date"):
d[fieldname] = None
return d
def init_valid_columns(self):
for key in default_fields:
if key not in self.__dict__:
self.__dict__[key] = None
for key in self.get_valid_columns():
if key not in self.__dict__:
self.__dict__[key] = None
def get_valid_columns(self):
if self.doctype not in frappe.local.valid_columns:
if self.doctype in ("DocField", "DocPerm") and self.parent in ("DocType", "DocField", "DocPerm"):
from frappe.model.meta import get_table_columns
valid = get_table_columns(self.doctype)
else:
valid = self.meta.get_valid_columns()
frappe.local.valid_columns[self.doctype] = valid
return frappe.local.valid_columns[self.doctype]
def is_new(self):
return self.get("__islocal")
def as_dict(self, no_nulls=False, no_default_fields=False):
doc = self.get_valid_dict()
doc["doctype"] = self.doctype
for df in self.meta.get_table_fields():
children = self.get(df.fieldname) or []
doc[df.fieldname] = [d.as_dict(no_nulls=no_nulls) for d in children]
if no_nulls:
for k in doc.keys():
if doc[k] is None:
del doc[k]
if no_default_fields:
for k in doc.keys():
if k in default_fields:
del doc[k]
for key in ("_user_tags", "__islocal", "__onload", "_starred_by"):
if self.get(key):
doc[key] = self.get(key)
return frappe._dict(doc)
def as_json(self):
return frappe.as_json(self.as_dict())
def get_table_field_doctype(self, fieldname):
return self.meta.get_field(fieldname).options
def get_parentfield_of_doctype(self, doctype):
fieldname = [df.fieldname for df in self.meta.get_table_fields() if df.options==doctype]
return fieldname[0] if fieldname else None
def db_insert(self):
"""INSERT the document (with valid columns) in the database."""
if not self.name:
# name will be set by document class in most cases
set_new_name(self)
d = self.get_valid_dict()
columns = d.keys()
try:
frappe.db.sql("""insert into `tab{doctype}`
({columns}) values ({values})""".format(
doctype = self.doctype,
columns = ", ".join(["`"+c+"`" for c in columns]),
values = ", ".join(["%s"] * len(columns))
), d.values())
except Exception, e:
if e.args[0]==1062 and "PRIMARY" in cstr(e.args[1]):
if self.meta.autoname=="hash":
# hash collision? try again
self.name = None
self.db_insert()
return
type, value, traceback = sys.exc_info()
frappe.msgprint(_("Duplicate name {0} {1}").format(self.doctype, self.name))
raise frappe.NameError, (self.doctype, self.name, e), traceback
else:
raise
self.set("__islocal", False)
def db_update(self):
if self.get("__islocal") or not self.name:
self.db_insert()
return
d = self.get_valid_dict()
columns = d.keys()
try:
frappe.db.sql("""update `tab{doctype}`
set {values} where name=%s""".format(
doctype = self.doctype,
values = ", ".join(["`"+c+"`=%s" for c in columns])
), d.values() + [d.get("name")])
except Exception, e:
if e.args[0]==1062:
type, value, traceback = sys.exc_info()
fieldname = str(e).split("'")[-2]
frappe.msgprint(_("{0} must be unique".format(self.meta.get_label(fieldname))))
raise frappe.ValidationError, (self.doctype, self.name, e), traceback
else:
raise
def db_set(self, fieldname, value, update_modified=True):
self.set(fieldname, value)
self.set("modified", now())
self.set("modified_by", frappe.session.user)
frappe.db.set_value(self.doctype, self.name, fieldname, value,
self.modified, self.modified_by, update_modified=update_modified)
def _fix_numeric_types(self):
for df in self.meta.get("fields"):
if df.fieldtype == "Check":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif self.get(df.fieldname) is not None:
if df.fieldtype == "Int":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif df.fieldtype in ("Float", "Currency", "Percent"):
self.set(df.fieldname, flt(self.get(df.fieldname)))
if self.docstatus is not None:
self.docstatus = cint(self.docstatus)
def _get_missing_mandatory_fields(self):
"""Get mandatory fields that do not have any values"""
def get_msg(df):
if df.fieldtype == "Table":
return "{}: {}: {}".format(_("Error"), _("Data missing in table"), _(df.label))
elif self.parentfield:
return "{}: {} #{}: {}: {}".format(_("Error"), _("Row"), self.idx,
_("Value missing for"), _(df.label))
else:
return "{}: {}: {}".format(_("Error"), _("Value missing for"), _(df.label))
missing = []
for df in self.meta.get("fields", {"reqd": 1}):
if self.get(df.fieldname) in (None, []) or not strip_html(cstr(self.get(df.fieldname))).strip():
missing.append((df.fieldname, get_msg(df)))
return missing
def get_invalid_links(self, is_submittable=False):
def get_msg(df, docname):
if self.parentfield:
return "{} #{}: {}: {}".format(_("Row"), self.idx, _(df.label), docname)
else:
return "{}: {}".format(_(df.label), docname)
invalid_links = []
cancelled_links = []
for df in self.meta.get_link_fields() + self.meta.get("fields",
{"fieldtype":"Dynamic Link"}):
docname = self.get(df.fieldname)
if docname:
if df.fieldtype=="Link":
doctype = df.options
if not doctype:
frappe.throw(_("Options not set for link field {0}").format(df.fieldname))
else:
doctype = self.get(df.options)
if not doctype:
frappe.throw(_("{0} must be set first").format(self.meta.get_label(df.options)))
# MySQL is case insensitive. Preserve case of the original docname in the Link Field.
value = frappe.db.get_value(doctype, docname, "name", cache=True)
setattr(self, df.fieldname, value)
if not value:
invalid_links.append((df.fieldname, docname, get_msg(df, docname)))
elif (df.fieldname != "amended_from"
and (is_submittable or self.meta.is_submittable) and frappe.get_meta(doctype).is_submittable
and cint(frappe.db.get_value(doctype, docname, "docstatus"))==2):
cancelled_links.append((df.fieldname, docname, get_msg(df, docname)))
return invalid_links, cancelled_links
def _validate_selects(self):
if frappe.flags.in_import:
return
for df in self.meta.get_select_fields():
if df.fieldname=="naming_series" or not (self.get(df.fieldname) and df.options):
continue
options = (df.options or "").split("\n")
# if only empty options
if not filter(None, options):
continue
# strip and set
self.set(df.fieldname, cstr(self.get(df.fieldname)).strip())
value = self.get(df.fieldname)
if value not in options and not (frappe.flags.in_test and value.startswith("_T-")):
# show an elaborate message
prefix = _("Row #{0}:").format(self.idx) if self.get("parentfield") else ""
label = _(self.meta.get_label(df.fieldname))
comma_options = '", "'.join(_(each) for each in options)
frappe.throw(_('{0} {1} cannot be "{2}". It should be one of "{3}"').format(prefix, label,
value, comma_options))
def _validate_constants(self):
if frappe.flags.in_import or self.is_new():
return
constants = [d.fieldname for d in self.meta.get("fields", {"set_only_once": 1})]
if constants:
values = frappe.db.get_value(self.doctype, self.name, constants, as_dict=True)
for fieldname in constants:
if self.get(fieldname) != values.get(fieldname):
frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(fieldname)),
frappe.CannotChangeConstantError)
def _validate_update_after_submit(self):
db_values = frappe.db.get_value(self.doctype, self.name, "*", as_dict=True)
for key, db_value in db_values.iteritems():
df = self.meta.get_field(key)
if df and not df.allow_on_submit and (self.get(key) or db_value):
self_value = self.get_value(key)
if self_value != db_value:
frappe.throw(_("Not allowed to change {0} after submission").format(df.label),
frappe.UpdateAfterSubmitError)
def precision(self, fieldname, parentfield=None):
"""Returns float precision for a particular field (or get global default).
:param fieldname: Fieldname for which precision is required.
:param parentfield: If fieldname is in child table."""
from frappe.model.meta import get_field_precision
if parentfield and not isinstance(parentfield, basestring):
parentfield = parentfield.parentfield
cache_key = parentfield or "main"
if not hasattr(self, "_precision"):
self._precision = frappe._dict()
if cache_key not in self._precision:
self._precision[cache_key] = frappe._dict()
if fieldname not in self._precision[cache_key]:
self._precision[cache_key][fieldname] = None
doctype = self.meta.get_field(parentfield).options if parentfield else self.doctype
df = frappe.get_meta(doctype).get_field(fieldname)
if df.fieldtype in ("Currency", "Float", "Percent"):
self._precision[cache_key][fieldname] = get_field_precision(df, self)
return self._precision[cache_key][fieldname]
def get_formatted(self, fieldname, doc=None, currency=None):
from frappe.utils.formatters import format_value
df = self.meta.get_field(fieldname)
if not df and fieldname in default_fields:
from frappe.model.meta import get_default_df
df = get_default_df(fieldname)
return format_value(self.get(fieldname), df=df, doc=doc or self, currency=currency)
def is_print_hide(self, fieldname, df=None, for_print=True):
"""Returns true if fieldname is to be hidden for print.
Print Hide can be set via the Print Format Builder or in the controller as a list
of hidden fields. Example
class MyDoc(Document):
def __setup__(self):
self.print_hide = ["field1", "field2"]
:param fieldname: Fieldname to be checked if hidden.
"""
meta_df = self.meta.get_field(fieldname)
if meta_df and meta_df.get("__print_hide"):
return True
if df:
return df.print_hide
if meta_df:
return meta_df.print_hide
def in_format_data(self, fieldname):
"""Returns True if shown via Print Format::`format_data` property.
Called from within standard print format."""
doc = getattr(self, "parent_doc", self)
if hasattr(doc, "format_data_map"):
return fieldname in doc.format_data_map
else:
return True
def reset_values_if_no_permlevel_access(self, has_access_to, high_permlevel_fields):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
to_reset = []
for df in high_permlevel_fields:
if df.permlevel not in has_access_to and df.fieldtype not in display_fieldtypes:
to_reset.append(df)
if to_reset:
if self.is_new():
# if new, set default value
ref_doc = frappe.new_doc(self.doctype)
else:
# get values from old doc
if self.parent:
self.parent_doc.get_latest()
ref_doc = [d for d in self.parent_doc.get(self.parentfield) if d.name == self.name][0]
else:
ref_doc = self.get_latest()
for df in to_reset:
self.set(df.fieldname, ref_doc.get(df.fieldname))
def get_value(self, fieldname):
df = self.meta.get_field(fieldname)
val = self.get(fieldname)
return self.cast(val, df)
def cast(self, val, df):
if df.fieldtype in ("Currency", "Float", "Percent"):
val = flt(val, self.precision(df.fieldname))
elif df.fieldtype in ("Int", "Check"):
val = cint(val)
elif df.fieldtype in ("Data", "Text", "Small Text", "Long Text",
"Text Editor", "Select", "Link", "Dynamic Link"):
val = cstr(val)
elif df.fieldtype == "Date":
val = getdate(val)
elif df.fieldtype == "Datetime":
val = get_datetime(val)
elif df.fieldtype == "Time":
val = to_timedelta(val)
return val
def _extract_images_from_text_editor(self):
from frappe.utils.file_manager import extract_images_from_doc
if self.doctype != "DocType":
for df in self.meta.get("fields", {"fieldtype":"Text Editor"}):
extract_images_from_doc(self, df.fieldname)
def _filter(data, filters, limit=None):
"""pass filters as:
{"key": "val", "key": ["!=", "val"],
"key": ["in", "val"], "key": ["not in", "val"], "key": "^val",
"key" : True (exists), "key": False (does not exist) }"""
out = []
for d in data:
add = True
for f in filters:
fval = filters[f]
if fval is True:
fval = ("not None", fval)
elif fval is False:
fval = ("None", fval)
elif not isinstance(fval, (tuple, list)):
if isinstance(fval, basestring) and fval.startswith("^"):
fval = ("^", fval[1:])
else:
fval = ("=", fval)
if not frappe.compare(getattr(d, f, None), fval[0], fval[1]):
add = False
break
if add:
out.append(d)
if limit and (len(out)-1)==limit:
break
return out
|
mit
|
aferrugento/SemLDA
|
wsd.py
|
1
|
15115
|
from pywsd.lesk import adapted_lesk
from nltk.corpus import wordnet as wn
import pickle
import time
import sys
def main(file_name):
start = time.time()
#string = '/home/adriana/Dropbox/mine/Tese/preprocessing/data_output/'
#string = '/home/aferrugento/Desktop/'
string = ''
h = open(string + file_name + '_proc.txt')
sentences = h.read()
h.close()
extra_synsets = {}
sentences = sentences.split("\n")
for i in range(len(sentences)):
sentences[i] = sentences[i].split(" ")
for j in range(len(sentences[i])):
if sentences[i][j] == '':
continue
sentences[i][j] = sentences[i][j].split("_")[0]
for i in range(len(sentences)):
aux = ''
for j in range(len(sentences[i])):
aux += sentences[i][j] + ' '
sentences[i] = aux
word_count = pickle.load(open('word_count_new.p'))
synset_count = pickle.load(open('synset_count.p'))
word_count_corpus = calculate_word_frequency(sentences)
sum_word_corpus = 0
for key in word_count_corpus.keys():
sum_word_corpus += word_count_corpus.get(key)
sum_word = 0
for key in word_count.keys():
sum_word += word_count.get(key)
sum_synset = 0
for key in synset_count.keys():
sum_synset += synset_count.get(key)
word_list = []
for key in word_count.keys():
word_list.append(word_count.get(key))
synset_list = []
for key in synset_count.keys():
synset_list.append(synset_count.get(key))
word_list.sort()
synset_list.sort()
#print len(word_list), len(synset_list)
#print len(word_list)/2., len(synset_list)/2., (len(word_list)/2.) -1, (len(synset_list)/2.) -1
#print word_list[len(word_list)/2], word_list[(len(word_list)/2)-1]
#print synset_list[len(synset_list)/2], synset_list[(len(synset_list)/2)-1]
word_median = round(2./sum_word, 5)
synset_median = round(2./sum_synset, 5)
#print word_median, synset_median
#print sum_word, sum_synset
#return
#f = open(string + 'preprocess_semLDA_EPIA/NEWS2_snowballstopword_wordnetlemma_pos_freq.txt')
f = open(string + file_name +'_freq.txt')
m = f.read()
f.close()
m = m.split("\n")
for i in range(len(m)):
m[i] = m[i].split(" ")
count = 0
imag = -1
#f = open(string + 'preprocess_semLDA_EPIA/znew_eta_NEWS2.txt')
f = open(string + file_name + '_eta.txt')
g = f.read()
f.close()
g = g.split("\n")
for i in range(len(g)):
g[i] = g[i].split(" ")
dic_g = create_dicio(g)
g = open(string + file_name +'_wsd.txt','w')
#dictio = pickle.load(open(string + 'preprocess_semLDA_EPIA/NEWS2_snowballstopword_wordnetlemma_pos_vocab.p'))
dictio = pickle.load(open(string + file_name +'_vocab.p'))
nn = open(string + file_name +'_synsetVoc.txt','w')
synsets = {}
to_write = []
p = open(string + 'NEWS2_wsd.log','w')
for i in range(len(m)):
nana = str(m[i][0]) + ' '
print 'Doc ' + str(i)
p.write('---------- DOC ' +str(i) + ' ----------\n')
#words_probs = bayes_theorem(sentences[i], dictio, word_count, sum_word, word_median)
#return
#g.write(str(m[i][0]) + ' ')
for k in range(1, len(m[i])):
#print sentences[i]
if m[i][k] == '':
continue
#print dictio.get(int(m[i][k].split(":")[0])) + str(m[i][k].split(":")[0])
#print wn.synsets(dictio.get(int(m[i][k].split(":")[0])).split("_")[0], penn_to_wn(dictio.get(int(m[i][k].split(":")[0])).split("_")[1]))
#caso nao existam synsets para aquela palavra
if len(wn.synsets(dictio.get(int(m[i][k].split(":")[0])).split("_")[0], penn_to_wn(dictio.get(int(m[i][k].split(":")[0])).split("_")[1]))) == 0:
nana += m[i][k]+":1[" +str(count)+":"+str(1)+"] "
synsets[imag] = count
extra_synsets[imag] = dictio.get(int(m[i][k].split(":")[0]))
#g.write(m[i][k]+":1[" +str(imag)+":"+str(1)+"] ")
imag -= 1
count += 1
continue
sent = sentences[i]
ambiguous = dictio.get(int(m[i][k].split(":")[0])).split("_")[0]
post = dictio.get(int(m[i][k].split(":")[0])).split("_")[1]
try:
answer = adapted_lesk(sent, ambiguous, pos= penn_to_wn(post), nbest=True)
except Exception, e:
#caso o lesk se arme em estupido
s = wn.synsets(dictio.get(int(m[i][k].split(":")[0])).split("_")[0], penn_to_wn(dictio.get(int(m[i][k].split(":")[0])).split("_")[1]))
if len(s) != 0:
count2 = 0
#ver quantos synsets existem no semcor
#for n in range(len(s)):
# if dic_g.has_key(str(s[n].offset)):
# words = dic_g.get(str(s[n].offset))
# for j in range(len(words)):
# if words[j].split(":")[0] == m[i][k].split(":")[0]:
# count2 += 1
# se nao existir nenhum criar synset imaginario
#if count2 == 0:
# nana += m[i][k]+":1[" +str(count)+":"+str(1)+"] "
# synsets[imag] = count
# extra_synsets[imag] = dictio.get(int(m[i][k].split(":")[0]))
#g.write(m[i][k]+":1[" +str(imag)+":"+str(1)+"] ")
# count += 1
# imag -= 1
# continue
#caso existam ir buscar as suas probabilidades ao semcor
nana += m[i][k] +':'+ str(len(s)) + '['
c = 1
prob = 1.0/len(s)
for n in range(len(s)):
#print answer[n][1].offset
#print 'Coco ' + str(s[n].offset)
#if dic_g.has_key(str(s[n].offset)):
#words = dic_g.get(str(s[n].offset))
#for j in range(len(words)):
# if words[j].split(":")[0] == m[i][k].split(":")[0]:
# aux = 0
a = (s[n].offset())
#print s[n].offset()
if synsets.has_key(a):
aux = synsets.get(a)
else:
synsets[a] = count
aux = count
count += 1
if n == len(s) - 1:
nana += str(aux) + ':' + str(prob) + '] '
else:
nana += str(aux) + ':' + str(prob) + ' '
else:
nana += m[i][k]+":1[" +str(count)+":"+str(1)+"] "
synsets[imag] = count
extra_synsets[imag] = dictio.get(int(m[i][k].split(":")[0]))
#g.write(m[i][k]+":1[" +str(imag)+":"+str(1)+"] ")
count += 1
imag -= 1
continue
#g.write(m[i][k] +':'+ str(len(answer)) + '[')
total = 0
for j in range(len(answer)):
total += answer[j][0]
#caso lesk nao devolva nenhuma resposta criar synset imaginario
if len(answer) == 0:
nana += m[i][k]+":1[" +str(count)+":"+str(1)+"] "
synsets[imag] = count
extra_synsets[imag] = dictio.get(int(m[i][k].split(":")[0]))
#g.write(m[i][k]+":1[" +str(imag)+":"+str(1)+"] ")
count += 1
imag -= 1
continue
#print ambiguous
#print total
#print answer
#caso nenhum dos synsets tenha overlap ir ver ao semcor as suas probabilidades
if total == 0:
#print 'ZERO'
count2 = 0
#for n in range(len(answer)):
# if dic_g.has_key(str(answer[n][1].offset)):
# words = dic_g.get(str(answer[n][1].offset))
# for j in range(len(words)):
# if words[j].split(":")[0] == m[i][k].split(":")[0]:
# count2 += 1
#if count2 == 0:
# nana += m[i][k]+":1[" +str(count)+":"+str(1)+"] "
# synsets[imag] = count
# extra_synsets[imag] = dictio.get(int(m[i][k].split(":")[0]))
#g.write(m[i][k]+":1[" +str(imag)+":"+str(1)+"] ")
# count += 1
# imag -= 1
# continue
s = wn.synsets(dictio.get(int(m[i][k].split(":")[0])).split("_")[0], penn_to_wn(dictio.get(int(m[i][k].split(":")[0])).split("_")[1]))
nana += m[i][k] +':'+ str(len(s)) + '['
c = 1
prob = 1.0/len(s)
for n in range(len(s)):
#print answer[n][1].offset
#print 'Coco ' + str(s[n].offset)
#if dic_g.has_key(str(s[n].offset)):
#words = dic_g.get(str(s[n].offset))
#for j in range(len(words)):
# if words[j].split(":")[0] == m[i][k].split(":")[0]:
# aux = 0
a = (s[n].offset())
#print s[n].offset()
if synsets.has_key(a):
aux = synsets.get(a)
else:
synsets[a] = count
aux = count
count += 1
if n == len(s) - 1:
nana += str(aux) + ':' + str(prob) + '] '
else:
nana += str(aux) + ':' + str(prob) + ' '
#print nana
continue
#contar quantos synsets e que nao estao a zero
count2 = 0
for j in range(len(answer)):
if answer[j][0] == 0:
continue
else:
count2 += 1
c = 1
nana += m[i][k] +':'+ str(count2) + '['
for j in range(len(answer)):
#words_synsets = words_probs.get(int(m[i][k].split(':')[0]))
#s.write(answer[j][1].offset+"\n")
if answer[j][0] == 0:
continue
aux = 0
a = (answer[j][1].offset())
#print 'Coco '+ str(answer[j][1].offset())
if synsets.has_key(a):
aux = synsets.get(a)
else:
synsets[a] = count
aux = count
count += 1
prob_s = 0.0
prob_w = 0.0
prob_s_w = float(answer[j][0])/total
#if synset_count.has_key(str(answer[j][1].offset)):
# prob_s = synset_count.get(str(answer[j][1].offset))/float(sum_synset)
#else:
# prob_s = 0.1
prob_s_s = 1.0/count2
#if word_count.has_key(dictio.get(int(m[i][k].split(":")[0]))):
# prob_w = word_count.get(dictio.get(int(m[i][k].split(":")[0])))/float(sum_word)
#else:
# prob_w = 0.1
if word_count_corpus.has_key(dictio.get(int(m[i][k].split(":")[0])).split("_")[0]):
prob_w = word_count_corpus.get(dictio.get(int(m[i][k].split(":")[0])).split("_")[0])/float(sum_word_corpus)
else:
prob_w = 0.1
prob_w_s = (prob_w * prob_s_w) / prob_s_s
if j == len(answer) - 1 or count2 == c:
if prob_w_s > 1.0:
#print 'Word: 'dictio.get(int(m[i][k].split(":")[0])) + ' Synset: ' + str(answer[j][1])
p.write('Word: '+ dictio.get(int(m[i][k].split(":")[0])) + ' Synset: ' + str(answer[j][1]))
#print 'Synsets disambiguated: ' + str(answer)
p.write('---- Synsets disambiguated: ' + str(answer))
#print synset_count.get(str(answer[j][1].offset)), word_count.get(dictio.get(int(m[i][k].split(":")[0]))), sum_synset, sum_word
#print 'P(s)=' +prob_s +', P(w)='+prob_w +', P(s|w)='+ prob_s_w +', P(w|s)='+ prob_w_s
p.write('---- P(s)=' +str(prob_s) +', P(w)='+ str(prob_w) +', P(s|w)='+ str(prob_s_w) +', P(w|s)='+ str(prob_w_s))
p.write("\n")
nana += str(aux) + ':' + str(1) + '] '
#nana += str(aux) + ':' + str(words_synsets.get(answer[j][1].offset)) + '] '
else:
nana += str(aux) + ':' + str(prob_w_s) + '] '
#g.write(str(aux) + ':' + str(float(answer[j][0]/total)) + '] ')
else:
c += 1
if prob_w_s > 1.0:
#print 'Word: 'dictio.get(int(m[i][k].split(":")[0])) + ' Synset: ' + str(answer[j][1])
p.write('Word: '+ dictio.get(int(m[i][k].split(":")[0])) + ' Synset: ' + str(answer[j][1]))
#print 'Synsets disambiguated: ' + str(answer)
p.write('---- Synsets disambiguated: ' + str(answer))
#print synset_count.get(str(answer[j][1].offset)), word_count.get(dictio.get(int(m[i][k].split(":")[0]))), sum_synset, sum_word
#print 'P(s)=' +prob_s +', P(w)='+prob_w +', P(s|w)='+ prob_s_w +', P(w|s)='+ prob_w_s
p.write('---- P(s)=' +str(prob_s) +', P(w)='+ str(prob_w) +', P(s|w)='+ str(prob_s_w) +', P(w|s)='+ str(prob_w_s))
p.write("\n")
nana += str(aux) + ':' + str(1) + '] '
#nana += str(aux) + ':' + str(words_synsets.get(answer[j][1].offset)) +' '
else:
nana += str(aux) + ':' + str(prob_w_s) +' '
#g.write(str(aux) + ':' + str(float(answer[j][0]/total)) +' ')
nana += '\n'
#print nana
#return
to_write.append(nana)
#g.write("\n")
ne = revert_dicio(synsets)
for i in range(len(ne)):
#print ne.get(i), type(ne.get(i))
nn.write(str(ne.get(i))+'\n')
g.write(str(len(ne))+"\n")
for i in range(len(to_write)):
g.write(to_write[i])
nn.close()
p.close()
g.close()
end = time.time()
pickle.dump(extra_synsets, open(string + file_name +"_imag.p","w"))
print end - start
def calculate_word_frequency(corpus):
word_count_dict = {}
for i in range(len(corpus)):
for j in range(len(corpus[i])):
if word_count_dict.has_key(corpus[i][j]):
aux = word_count_dict.get(corpus[i][j])
word_count_dict[corpus[i][j]] = aux + 1
else:
word_count_dict[corpus[i][j]] = 1
return word_count_dict
#bayes_theorem(sentences[i], dictio, synset_count, word_count, sum_synset, sum_word, synset_median, word_median)
def bayes_theorem(context, vocab, word_count, sum_word, word_median):
words_probs = {}
print len(vocab)
count = 0
for word in vocab:
if count%1000 == 0:
print 'word ' + str(count)
count += 1
sent = context
ambiguous = vocab.get(word).split("_")[0]
post = vocab.get(word).split("_")[1]
#print ambiguous, post
try:
answer = adapted_lesk(sent, ambiguous, pos= penn_to_wn(post), nbest=True)
except Exception, e:
continue
total = 0
for j in range(len(answer)):
total += answer[j][0]
if total == 0:
continue
for j in range(len(answer)):
if answer[j][0] == 0:
continue
prob_w = 0.0
prob_s_w = float(answer[j][0])/total
if word_count.has_key(vocab.get(word)):
prob_w = word_count.get(vocab.get(word))/float(sum_word)
else:
prob_w = word_median
prob_w_s = prob_s_w * prob_w
if words_probs.has_key(word):
aux = words_probs.get(word)
aux[int(answer[j][1].offset)] = prob_w_s
words_probs[word] = aux
else:
aux = {}
aux[int(answer[j][1].offset)] = prob_w_s
words_probs[word] = aux
#print words_probs
synsets_probs = {}
for word in words_probs:
for synset in words_probs.get(word):
if synsets_probs.has_key(synset):
aux = synsets_probs.get(synset)
aux[word] = words_probs.get(word).get(synset)
synsets_probs[synset] = aux
else:
aux = {}
aux[word] = words_probs.get(word).get(synset)
synsets_probs[synset] = aux
for synset in synsets_probs:
sum_words = 0.0
for word in synsets_probs.get(synset):
sum_words += synsets_probs.get(synset).get(word)
for word in synsets_probs.get(synset):
aux = synsets_probs.get(synset).get(word)
synsets_probs.get(synset)[word] = float(aux)/sum_words
new_word_probs = {}
for word in synsets_probs:
for synset in synsets_probs.get(word):
if new_word_probs.has_key(synset):
aux = new_word_probs.get(synset)
aux[word] = synsets_probs.get(word).get(synset)
new_word_probs[synset] = aux
else:
aux = {}
aux[word] = synsets_probs.get(word).get(synset)
new_word_probs[synset] = aux
return new_word_probs
def create_dicio(eta):
dictio = {}
for i in range(len(eta)-1):
for j in range(2, len(eta[i])):
if dictio.has_key(eta[i][0]):
aux = dictio.get(eta[i][0])
aux.append(eta[i][j])
dictio[eta[i][0]] = aux
else:
aux = []
aux.append(eta[i][j])
dictio[eta[i][0]] = aux
return dictio
def revert_dicio(words_ids):
new_dictio = {}
for key in words_ids:
new_dictio[words_ids[key]] = key
return new_dictio
def is_noun(tag):
return tag in ['NN', 'NNS', 'NNP', 'NNPS']
def is_verb(tag):
return tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']
def is_adverb(tag):
return tag in ['RB', 'RBR', 'RBS']
def is_adjective(tag):
return tag in ['JJ', 'JJR', 'JJS']
def penn_to_wn(tag):
if is_adjective(tag):
return wn.ADJ
elif is_noun(tag):
return wn.NOUN
elif is_adverb(tag):
return wn.ADV
elif is_verb(tag):
return wn.VERB
return None
if __name__ == "__main__":
main(sys.argv[1])
|
lgpl-2.1
|
feoff3/compute-image-packages
|
google-daemon/usr/share/google/google_daemon/utils.py
|
3
|
6189
|
#!/usr/bin/python
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library functions and interfaces for manipulating accounts."""
import errno
import fcntl
import logging
import logging.handlers
import os
import shutil
import subprocess
import sys
import tempfile
class RunCommandException(Exception):
"""Could not run a command."""
pass
class System(object):
"""Interface for interacting with the system."""
def __init__(self, subprocess_module=subprocess, os_module=os):
self.subprocess = subprocess_module
self.os = os_module
def MakeLoggingHandler(self, prefix, facility):
"""Make a logging handler to send logs to syslog."""
handler = logging.handlers.SysLogHandler(
address='/dev/log', facility=facility)
formatter = logging.Formatter(prefix + ': %(levelname)s %(message)s')
handler.setFormatter(formatter)
return handler
def SetLoggingHandler(self, logger, handler):
"""Setup logging w/ a specific handler."""
handler.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
def EnableDebugLogging(self, logger):
debug_handler = logging.StreamHandler(sys.stdout)
debug_handler.setLevel(logging.DEBUG)
logger.addHandler(debug_handler)
logger.setLevel(logging.DEBUG)
def OpenFile(self, *args, **kwargs):
return open(*args, **kwargs)
def MoveFile(self, src, dst):
return shutil.move(src, dst)
def CreateTempFile(self, delete=True):
return tempfile.NamedTemporaryFile(delete=delete)
def UserAdd(self, user, groups):
logging.info('Creating account %s', user)
# We must set the crypto passwd via useradd to '*' to make ssh work
# on Linux systems without PAM.
#
# Unfortunately, there is no spec that I can find that defines how
# this stuff is used and from the manpage of shadow it says that "!"
# or "*" or any other invalid crypt can be used.
#
# ssh just takes it upon itself to use "!" as its locked account token:
# https://github.com/openssh/openssh-portable/blob/master/configure.ac#L705
#
# If '!' token is used then it simply denies logins:
# https://github.com/openssh/openssh-portable/blob/master/auth.c#L151
#
# To solve the issue make the passwd '*' which is also recognized as
# locked but doesn't prevent ssh logins.
result = self.RunCommand([
'/usr/sbin/useradd', user, '-m', '-s', '/bin/bash', '-p', '*', '-G',
','.join(groups)])
if self.RunCommandFailed(result, 'Could not create user %s', user):
return False
return True
def IsValidSudoersFile(self, filename):
result = self.RunCommand(['/usr/sbin/visudo', '-c', '-f', filename])
if result[0] != 0:
with self.system.OpenFile(filename, 'r') as f:
contents = f.read()
self.RunCommandFailed(
result, 'Could not produce valid sudoers file\n%s' % contents)
return False
return True
def IsExecutable(self, path):
"""Return whether path exists and is an executable binary."""
return self.os.path.isfile(path) and self.os.access(path, os.X_OK)
def RunCommand(self, args):
"""Run a command, return a retcode, stdout, stderr tuple."""
try:
p = self.subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
return (p.returncode, stdout, stderr)
except OSError, e:
raise RunCommandException('Could not run %s due to %s' % (args, e))
def RunCommandFailed(self, result, *msg_args):
retcode, stdout, stderr = result
if retcode != 0:
logging.warning('%s\nSTDOUT:\n%s\nSTDERR:\n%s\n',
msg_args[0] % msg_args[1:], stdout, stderr)
return True
return False
class CouldNotLockException(Exception):
"""Someone else seems to be holding the lock."""
pass
class UnexpectedLockException(Exception):
"""We genuinely failed to lock the file."""
pass
class CouldNotUnlockException(Exception):
"""Someone else seems to be holding the lock."""
pass
class UnexpectedUnlockException(Exception):
"""We genuinely failed to unlock the file."""
pass
class LockFile(object):
"""Lock a file to prevent multiple concurrent executions."""
def __init__(self, fcntl_module=fcntl):
self.fcntl_module = fcntl_module
def RunExclusively(self, lock_fname, method):
try:
self.Lock(lock_fname)
method()
self.Unlock()
except CouldNotLockException:
logging.warning(
'Could not lock %s. Is it locked by another program?',
lock_fname)
except UnexpectedLockException as e:
logging.warning(
'Could not lock %s due to %s', lock_fname, e)
except CouldNotUnlockException:
logging.warning(
'Could not unlock %s. Is it locked by another program?',
lock_fname)
except UnexpectedUnlockException as e:
logging.warning(
'Could not unlock %s due to %s', lock_fname, e)
def Lock(self, lock_fname):
"""Lock the lock file."""
try:
self.fh = open(lock_fname, 'w+b')
self.fcntl_module.flock(self.fh.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
except IOError as e:
if e.errno == errno.EWOULDBLOCK:
raise CouldNotLockException()
raise UnexpectedLockException('Failed to lock: %s' % e)
def Unlock(self):
"""Unlock the lock file."""
try:
self.fcntl_module.flock(self.fh.fileno(), fcntl.LOCK_UN|fcntl.LOCK_NB)
except IOError as e:
if e.errno == errno.EWOULDBLOCK:
raise CouldNotUnlockException()
raise UnexpectedUnlockException('Failed to unlock: %s' % e)
|
apache-2.0
|
akirk/youtube-dl
|
youtube_dl/extractor/dreisat.py
|
1
|
3607
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unified_strdate,
determine_ext,
)
class DreiSatIE(InfoExtractor):
IE_NAME = '3sat'
_VALID_URL = r'(?:http://)?(?:www\.)?3sat\.de/mediathek/(?:index\.php|mediathek\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$'
_TESTS = [
{
'url': 'http://www.3sat.de/mediathek/index.php?mode=play&obj=45918',
'md5': 'be37228896d30a88f315b638900a026e',
'info_dict': {
'id': '45918',
'ext': 'mp4',
'title': 'Waidmannsheil',
'description': 'md5:cce00ca1d70e21425e72c86a98a56817',
'uploader': '3sat',
'upload_date': '20140913'
}
},
{
'url': 'http://www.3sat.de/mediathek/mediathek.php?mode=play&obj=51066',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
details_url = 'http://www.3sat.de/mediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
details_doc = self._download_xml(details_url, video_id, 'Downloading video details')
status_code = details_doc.find('./status/statuscode')
if status_code is not None and status_code.text != 'ok':
code = status_code.text
if code == 'notVisibleAnymore':
message = 'Video %s is not available' % video_id
else:
message = '%s returned error: %s' % (self.IE_NAME, code)
raise ExtractorError(message, expected=True)
thumbnail_els = details_doc.findall('.//teaserimage')
thumbnails = [{
'width': int(te.attrib['key'].partition('x')[0]),
'height': int(te.attrib['key'].partition('x')[2]),
'url': te.text,
} for te in thumbnail_els]
information_el = details_doc.find('.//information')
video_title = information_el.find('./title').text
video_description = information_el.find('./detail').text
details_el = details_doc.find('.//details')
video_uploader = details_el.find('./channel').text
upload_date = unified_strdate(details_el.find('./airtime').text)
format_els = details_doc.findall('.//formitaet')
formats = []
for fe in format_els:
if fe.find('./url').text.startswith('http://www.metafilegenerator.de/'):
continue
url = fe.find('./url').text
# ext = determine_ext(url, None)
# if ext == 'meta':
# doc = self._download_xml(url, video_id, 'Getting rtmp URL')
# url = doc.find('./default-stream-url').text
formats.append({
'format_id': fe.attrib['basetype'],
'width': int(fe.find('./width').text),
'height': int(fe.find('./height').text),
'url': url,
'filesize': int(fe.find('./filesize').text),
'video_bitrate': int(fe.find('./videoBitrate').text),
})
self._sort_formats(formats)
return {
'_type': 'video',
'id': video_id,
'title': video_title,
'formats': formats,
'description': video_description,
'thumbnails': thumbnails,
'thumbnail': thumbnails[-1]['url'],
'uploader': video_uploader,
'upload_date': upload_date,
}
|
unlicense
|
phani00/tovp
|
tovp/promotions/migrations/0003_guruparamparabrick.py
|
2
|
1850
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import model_utils.fields
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('contributions', '0008_auto_20150219_1555'),
('promotions', '0002_auto_20150207_1138'),
]
operations = [
migrations.CreateModel(
name='GuruParamparaBrick',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('certificate_given', models.BooleanField(db_index=True, help_text='Has certificate for this promotion been given?', default=False)),
('certificate_given_date', model_utils.fields.MonitorField(default=django.utils.timezone.now, monitor='certificate_given')),
('coin_given', models.BooleanField(db_index=True, help_text='Has coin for this promotion been given?', default=False)),
('coin_given_date', model_utils.fields.MonitorField(default=django.utils.timezone.now, monitor='coin_given')),
('name_on_brick', models.TextField(help_text='Enter name which will be on the brick. Maximum 100 characters.', max_length=100, blank=True, verbose_name='Name on the brick')),
('pledge', models.ForeignKey(to='contributions.Pledge', related_name='+', verbose_name='Pledge')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
mit
|
svr93/node-gyp
|
gyp/pylib/gyp/easy_xml.py
|
1
|
4891
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
# Fix encoding
xml_string = unicode(xml_string, 'Windows-1251').encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
|
mit
|
zuotingbing/spark
|
examples/src/main/python/ml/multilayer_perceptron_classification.py
|
123
|
2172
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.classification import MultilayerPerceptronClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder.appName("multilayer_perceptron_classification_example").getOrCreate()
# $example on$
# Load training data
data = spark.read.format("libsvm")\
.load("data/mllib/sample_multiclass_classification_data.txt")
# Split the data into train and test
splits = data.randomSplit([0.6, 0.4], 1234)
train = splits[0]
test = splits[1]
# specify layers for the neural network:
# input layer of size 4 (features), two intermediate of size 5 and 4
# and output of size 3 (classes)
layers = [4, 5, 4, 3]
# create the trainer and set its parameters
trainer = MultilayerPerceptronClassifier(maxIter=100, layers=layers, blockSize=128, seed=1234)
# train the model
model = trainer.fit(train)
# compute accuracy on the test set
result = model.transform(test)
predictionAndLabels = result.select("prediction", "label")
evaluator = MulticlassClassificationEvaluator(metricName="accuracy")
print("Test set accuracy = " + str(evaluator.evaluate(predictionAndLabels)))
# $example off$
spark.stop()
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.