text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import graph
import dot
from core import *
import dataflow
def make_inst(g, addr, dest, op, *args):
def make_arg(a):
if a is None:
return None
if isinstance(a, int):
return VALUE(a)
if isinstance(a, str):
return REG(a)
return a
b = BBlock(addr)
args = [make_arg(a) for a in args]
b.add(Inst(make_arg(dest), op, args, addr))
g.add_node(addr, val=b)
def test_nielson_2_1_4():
g = graph.Graph()
make_inst(g, 1, "x", "=", 2)
make_inst(g, 2, "y", "=", 4)
make_inst(g, 3, "x", "=", 1)
make_inst(g, 4, None, "if", COND(EXPR(">", REG("x"), REG("y"))))
make_inst(g, 5, "z", "=", REG("y"))
make_inst(g, 6, "z", "*", REG("y"), REG("y"))
make_inst(g, 7, "x", "=", REG("z"))
g.add_edge(1, 2)
g.add_edge(2, 3)
g.add_edge(3, 4)
g.add_edge(4, 5)
g.add_edge(4, 6)
g.add_edge(5, 7)
g.add_edge(6, 7)
#dot.dot(g)
#ana = dataflow.LiveVarAnalysis(g)
#ana.init()
#g.print_nodes()
#print("===")
ana = dataflow.LiveVarAnalysis(g)
ana.solve()
#g.print_nodes()
LV_entry = {
1: set(),
2: set(),
3: {REG("y")},
4: {REG("x"), REG("y")},
5: {REG("y")},
6: {REG("y")},
7: {REG("z")},
}
LV_exit = {
1: set(),
2: {REG("y")},
3: {REG("x"), REG("y")},
4: {REG("y")},
5: {REG("z")},
6: {REG("z")},
7: set(),
}
GEN_LV = {
1: set(),
2: set(),
3: set(),
4: {REG("x"), REG("y")},
5: {REG("y")},
6: {REG("y")},
7: {REG("z")},
}
KILL_LV = {
1: {REG("x")},
2: {REG("y")},
3: {REG("x")},
4: set(),
5: {REG("z")},
6: {REG("z")},
7: {REG("x")},
}
for i, info in g.iter_sorted_nodes():
assert info["live_gen"] == GEN_LV[i]
assert info["live_kill"] == KILL_LV[i]
assert info["live_in"] == LV_entry[i], (info["live_in"], LV_entry[i])
assert info["live_out"] == LV_exit[i]
|
pfalcon/ScratchABlock
|
tests_unit/test_liveness.py
|
Python
|
gpl-3.0
| 2,105 | 0.003325 |
from ecgmeasure import ECGMeasure
import pandas as pd
import numpy as np
# need to test what happens when have too little data to create a chunk
# need to throw an exception if have too little data
def get_raw_data():
""".. function :: get_raw_data()
Creates dataframe with raw data.
"""
times = [x*0.1 for x in range(0, 10*50)]
voltages = []
for x in range(0, 10):
for ii in range(0, 25+1):
voltages.append(ii)
for jj in range(24, 0, -1):
voltages.append(jj)
raw_data = pd.DataFrame({'time': times, 'voltage': voltages})
return raw_data
def test_thresholdhr_unchanging():
""".. function:: test_thresholdhr_unchanging()
Test that threshold is the same for all chunks of the raw data.
"""
thr = []
for x in range(0, 10):
thr.append(0.9*25)
thresholds = np.array(thr)
chunk = 50
num_chunks = 10
biomeasure = ECGMeasure(file_bool=True, argument="test_hr.csv")
# biomeasure.__hr_rawdata = get_raw_data()
#print(biomeasure.__hr_rawdata)
biomeasure.thresholdhr()
[t, c, n] = biomeasure.data
t_list = t.values.T.tolist()[0]
assert (t_list == thresholds).all()
assert c == chunk
assert n == num_chunks
def get_test_hr1():
""".. function:: get_test_hr1()
Adds heartrate information to dataframe.
"""
initial_messages = []
hrs = []
for ii in range(0, 10):
hrs.append(1/5*60)
initial_messages.append('Bradycardia Detected')
test_hr1 = pd.DataFrame({'HeartRate': hrs, 'B/T': initial_messages, 'time': list(range(0, 50, 5))})
return test_hr1
def test_hrdetector():
""".. function:: test_hrdetector()
Test that hrdetector() correctly detects brady/tachycardia.
"""
biomeasure = ECGMeasure(file_bool=True, argument="test_hr.csv")
# biomeasure.__raw_data = get_raw_data()
test_hr1 = get_test_hr1()
biomeasure.hrdetector()
biomeasure.detect_rhythm()
assert (biomeasure.data['HeartRate'] == test_hr1['HeartRate']).all()
assert (biomeasure.data['B/T'] == test_hr1['B/T']).all()
|
raspearsy/bme590hrm
|
test_hr.py
|
Python
|
mit
| 2,111 | 0.000947 |
from django.db import models
from versatileimagefield.fields import VersatileImageField
from versatileimagefield.placeholder import OnStoragePlaceholderImage
class VersatileImagePostProcessorTestModel(models.Model):
"""A model for testing VersatileImageFields."""
image = VersatileImageField(
upload_to='./',
blank=True,
placeholder_image=OnStoragePlaceholderImage(
path='on-storage-placeholder/placeholder.png'
)
)
class Meta:
verbose_name = 'foo'
verbose_name_plural = 'foos'
|
respondcreate/django-versatileimagefield
|
tests/post_processor/models.py
|
Python
|
mit
| 559 | 0 |
from os import walk
from os.path import basename, splitext, dirname, join, exists
from glob import glob
import importlib
from inspect import getmembers, isclass
import sverchok
from sverchok.utils.testing import *
from sverchok.utils.logging import debug, info, error
from sverchok.node_tree import SverchCustomTreeNode
class UiTests(SverchokTestCase):
def test_all_nodes_have_icons(self):
def has_icon(node_class):
has_sv_icon = hasattr(node_class, "sv_icon")
has_bl_icon = hasattr(node_class, "bl_icon") and node_class.bl_icon and node_class.bl_icon != 'OUTLINER_OB_EMPTY'
#debug("Icon: %s: BL %s, SV %s", node_class.__name__, getattr(node_class, 'bl_icon', None), getattr(node_class, 'sv_icon', None))
return has_sv_icon or has_bl_icon
ignore_list = [
'SvIterationNode',
'SvExMinimalScalarFieldNode',
'SvExScalarFieldGraphNode',
'SvMeshSurfaceFieldNode',
'SvExMeshNormalFieldNode',
'SvExMinimalVectorFieldNode',
'SvSolidCenterOfMassNode',
'SvIsSolidClosedNode',
'SvRefineSolidNode',
'SvSolidValidateNode'
]
sv_init = sverchok.__file__
nodes_dir = join(dirname(sv_init), "nodes")
def check_category(directory):
category = basename(directory)
from sverchok.node_tree import SverchCustomTreeNode
for py_path in glob(join(directory, "*.py")):
py_file = basename(py_path)
py_name, ext = splitext(py_file)
module = importlib.import_module(f"sverchok.nodes.{category}.{py_name}")
for node_class_name, node_class in getmembers(module, isclass):
if node_class.__module__ != module.__name__:
continue
if node_class_name in ignore_list:
continue
debug("Check: %s: %s: %s", node_class, node_class.__bases__, SverchCustomTreeNode in node_class.__bases__)
if SverchCustomTreeNode in node_class.mro():
with self.subTest(node = node_class_name):
if not has_icon(node_class):
self.fail(f"Node <{node_class_name}> does not have icon!")
for directory, subdirs, fnames in walk(nodes_dir):
dir_name = basename(directory)
if dir_name == "nodes":
continue
with self.subTest(directory=dir_name):
check_category(directory)
|
DolphinDream/sverchok
|
tests/ui_tests.py
|
Python
|
gpl-3.0
| 2,714 | 0.004422 |
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class KyzsPipeline(object):
def process_item(self, item, spider):
return item
|
MasLinoma/test
|
kyzs/kyzs/pipelines.py
|
Python
|
gpl-2.0
| 258 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#important: before running this demo, make certain that you import the library
#'paho.mqtt.client' into python (https://pypi.python.org/pypi/paho-mqtt)
#also make certain that ATT_IOT is in the same directory as this script.
import traceback # for logging exceptions
import logging
logging.getLogger().setLevel(logging.INFO) #before doing anything else, set the desired logging level, so all modules log correctly.
from ConfigParser import *
import RPi.GPIO as GPIO #provides pin support
import ATT_IOT as IOT #provide cloud support
from time import sleep #pause the app
import picamera
import cameraStreamer
import sys
import datetime # for generating a unique file name
ConfigName = 'rpicamera.config'
hasLISIPAROI = False
LISIPAROIPin = 4
streamer = None
camera = None
PreviewId = 1 # turn on/off preview on the stream server
RecordId = 2 # turn on/off recording on disk
StreamServerId = 3 # assign the destination to stream the video to.
ToggleLISIPAROIId = 4
PictureId = 5
_isPreview = False
_isRecording = False
def tryLoadConfig():
'load the config from file'
global hasLISIPAROI, LISIPAROIPin
c = ConfigParser()
if c.read(ConfigName):
#set up the ATT internet of things platform
IOT.DeviceId = c.get('cloud', 'deviceId')
IOT.ClientId = c.get('cloud', 'clientId')
IOT.ClientKey = c.get('cloud', 'clientKey')
hasLISIPAROI = bool(c.get('camera', 'has LISIPAROI'))
logging.info("has LISIPAROI:" + str(hasLISIPAROI) )
if hasLISIPAROI:
LISIPAROIPin = int(c.get('camera', 'LISIPAROI pin'))
logging.info("LISIPAROI pin:" + str(LISIPAROIPin) )
return True
else:
return False
def setupCamera():
'create the camera responsible for recording video and streaming object responsible for sending it to the server.'
global streamer, camera
camera = picamera.PiCamera()
camera.resolution = (640, 480)
camera.framerate = 30
streamer = cameraStreamer.CameraStreamer(camera)
def setBacklight(value):
'''turn on/off the backlight
value: string ('true' or 'false')
returns: true when input was succesfully processed, otherwise false
'''
if value == "true":
GPIO.output(LISIPAROIPin, GPIO.HIGH)
elif value == "false":
GPIO.output(LISIPAROIPin, GPIO.LOW)
else:
print("unknown value: " + value)
IOT.send(value, ToggleLISIPAROIId) #provide feedback to the cloud that the operation was succesful
def setPreview(value):
if _isRecording:
print("recording not allowed during preview, shutting down recording.")
setRecord(False)
if value == "true":
_isPreview = True
streamer.start_preview()
elif value == "false":
_isPreview = False
streamer.stop_preview()
else:
print("unknown value: " + value)
IOT.send(value, PreviewId) #provide feedback to the cloud that the operation was succesful
def setRecord(value):
if _isPreview:
print("preview not allowed during recording, shutting down preview.")
setPreview(False)
if value == "true":
camera.resolution = (1920, 1080) #set to max resulotion for record
camera.start_recording('video' + datetime.date.today().strftime("%d_%b_%Y_%H_%M%_S") + '.h264')
elif value == "false":
camera.stop_recording()
camera.resolution = (640, 480) #reset resulotion for preview
else:
print("unknown value: " + value)
IOT.send(value, RecordId) #provide feedback to the cloud that the operation was succesful
def takePicture():
'take a single picture, max resoution'
prevWasPreview = _isPreview
prevWasRecording = _isRecording
if _isRecording:
print("record not allowed while taking picture.")
setRecord(False)
if not _isPreview:
print("preview required for taking picture.")
setPreview(True)
sleep(2) # if preview was not running yet, give it some time to startup
camera.capture('picture' + datetime.date.today().strftime("%d_%b_%Y_%H_%M%_S") + '.jpg')
if prevWasPreview:
print("reactivating preview.")
setPreview(True)
elif prevWasRecording:
print("reactivating record.")
setRecord(True)
#callback: handles values sent from the cloudapp to the device
def on_message(id, value):
if id.endswith(str(ToggleLISIPAROIId)) == True:
value = value.lower() #make certain that the value is in lower case, for 'True' vs 'true'
setBacklight(value)
elif id.endswith(str(PreviewId)) == True:
value = value.lower() #make certain that the value is in lower case, for 'True' vs 'true'
setPreview(value)
elif id.endswith(str(RecordId)) == True:
value = value.lower() #make certain that the value is in lower case, for 'True' vs 'true'
setRecord(value)
elif id.endswith(str(StreamServerId)) == True:
streamer.streamServerIp = value
IOT.send(value, StreamServerId) #provide feedback to the cloud that the operation was succesful
elif id.endswith(str(PictureId)) == True:
if value.lower() == "true":
takePicture()
else:
print("unknown actuator: " + id)
def setupCloud():
IOT.on_message = on_message
#make certain that the device & it's features are defined in the cloudapp
IOT.connect()
if hasLISIPAROI:
IOT.addAsset(ToggleLISIPAROIId, "LISIPAROI", "Control the light on the camera", False, "boolean")
IOT.addAsset(PreviewId, "Preview", "Show/close a preview on the monitor that is connected to the RPI", True, "boolean")
IOT.addAsset(RecordId, "Record", "Start/stop recording the video stream on sd-card", True, "boolean")
IOT.addAsset(PictureId, "Picture", "take a picture (max resoution) and store on sd-card", True, "boolean")
IOT.addAsset(StreamServerId, "Stream server", "set the ip address of the server that manages the video", True, "string")
# get any previously defined settings
streamer.streamServerIp = IOT.getAssetState(StreamServerId)
if streamer.streamServerIp:
streamer.streamServerIp = streamer.streamServerIp['state']['value']
logging.info("sending stream to: " + streamer.streamServerIp)
else:
logging.info("no stream endpoint defined")
IOT.subscribe() #starts the bi-directional communication
# set current state of the device
IOT.send("false", ToggleLISIPAROIId)
IOT.send("false", PreviewId)
IOT.send("false", RecordId)
tryLoadConfig()
setupCamera() # needs to be done before setting up the cloud, cause we will get the settings from the cloud and assign them to the camera.
setupCloud()
if hasLISIPAROI:
try:
#setup GPIO using Board numbering
#GPIO.setmode(GPIO.BCM)
GPIO.setmode(GPIO.BOARD)
#set up the pins
GPIO.setup(LISIPAROIPin, GPIO.OUT)
except:
logging.error(traceback.format_exc())
#main loop: run as long as the device is turned on
while True:
#main thread doesn't have to do much, all is handled on the thread calling the message handler (for the actuators)
sleep(5)
|
ATT-JBO/RPICameraRemote
|
RPICamera/RPICamera/RPICameraRemote.py
|
Python
|
mit
| 7,821 | 0.009973 |
import sys
import os
import time
import logging
import socket
import string
import collections
import logging
import atexit
__version__ = "1.1.26"
__all__ = ['main','amqp']
class client_interface(object):
def get_cell(self, key, value=None):
"""Returns the contents of the cell"""
raise NotImplementedError("""get_cell(self, key, value=None)""")
def set_cell(self, key, value=None):
"""Set the contents of the cell"""
raise NotImplementedError("""set_cell(self, key, value=None)""")
def get_prop(self, key, prop, value=None):
"""Returns the contents of the cell"""
raise NotImplementedError("""get_prop(self, key, prop, value=None)""")
def set_prop(self, key, prop, value=None):
"""Set the contents of the cell"""
raise NotImplementedError("""set_prop(self, key, prop, value=None)""")
def emergency_exit(status=1, msg=None, ):
"""Force an exit"""
if msg:
print msg
os._exit(status)
def trace_log_info(f, *args, **kw):
"""Trace function invocation"""
logger.info("calling %s with args %s, %s" % (f.__name__, args, kw))
return f(*args, **kw)
class base_dictionary(collections.MutableMapping):
"""A dictionary that applies an arbitrary key-altering
function before accessing the keys"""
def __init__(self, *args, **kwargs):
self.store = dict()
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
return self.store[self.__keytransform__(key)]
def __setitem__(self, key, value):
self.store[self.__keytransform__(key)] = value
def __delitem__(self, key):
del self.store[self.__keytransform__(key)]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
def __keytransform__(self, key):
return key.lower()
synapse = base_dictionary()
synapse_process_id = "%s-%d" % (socket.gethostname(), os.getpid())
synapse_title = "Synapse Console Interface v" + __version__
synapse_ps1 = 'sc> '
synapse_ps2 = '.... '
synapse_prompts = {'ps1':'sc> ', 'ps2':'.... '}
synapse_exit_prompt = "Use exit() plus Return to exit."
synapse_dict_list = []
synapse_sheets = {}
synapse_current_cell_engine_context = None
synapse_current_cell_engine = None
get_logger_file = 'synapse.log'
get_logger_level = logging.WARNING
def initialize_logger(name, file=get_logger_file, level=get_logger_level):
# create logger with 'name'
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
if file:
fh = logging.FileHandler(file)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(level)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if file:
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
if file:
logger.addHandler(fh)
logger.addHandler(ch)
return logger
def get_logger():
"""Returns the current logger"""
global logger
return logger;
class cell_dictionary(base_dictionary,client_interface):
"""synapse Dictionary with Formulas and Guards"""
__formulas__ = base_dictionary()
__guards__ = base_dictionary()
__props__ = base_dictionary()
__engine__ = None
def set_formula(self, name, formula):
"""
Sets the formula function for a cell.
:param name: the name of the cell as string
:param formula: a function that takes (key,value) where key=cell, value an optional value
:return: None
"""
if formula == None:
del self.__formulas__[name]
else:
self.__formulas__[name] = formula
def set_guard(self, name, guard):
"""
Sets a guard function for a cell.
:param name: the name of the cell as string
:param guard: a function that takes (key,value) where key=cell, and value=value for the cell
:return: None
"""
if guard == None:
del self.__guards__[name]
else:
self.__guards__[name] = guard
def set_cell(self, key, value):
"""
Set the value of a cell
:param key: the name of the cell
:param value: the value for the cell
:return: the current value cell
"""
self.__setitem__(key, value)
return self.__getitem__(key, value)
def get_cell(self, key, value=None):
"""
Returns the current value of a cell
:param key: the name of the cell as a string
:param value: an optional value that may be passed to the cell's formula
:return: the current value of the cell
"""
return self.__getitem__(key, value)
def set_prop(self, key, prop, value):
"""
Sets a cell's named property to a value
:param key: the name of the cell as a string
:param prop: the name of the property as a string
:param value: the current value of the property
:return:
"""
key = self.__keytransform__(key)
if not(key in self.__props__):
self.__props__[key] = base_dictionary()
props = self.__props__[key]
props[prop] = value
return props[prop]
def get_prop(self, key, prop):
"""
Returns the current value of a cell's property
:param key: the name of the cell as a string
:param prop: the name of the property as a string
:return: the current value of the property
"""
key = self.__keytransform__(key)
if not(key in self.__props__):
self.__props__[key] = base_dictionary()
props = self.__props__[key]
if (prop in props):
return props[prop]
else:
return None
def get_props(self, key):
"""
Returns all the properties of a cell
:param key: the name of the cell as string
:param prop:
:return: all the properties as a string
"""
key = self.__keytransform__(key)
if not(key in self.__props__):
self.__props__[key] = base_dictionary()
return self.__props__[key]
def __getitem__(self, key, value=None):
"""
Returns the value of a cell when referenced as an item
:param key: the name of the cell as a string
:param value: an optional value
:return: the value of the cell
"""
key = self.__keytransform__(key)
if key in self.__formulas__:
self.store[key] = self.__formulas__[key](key,value)
if not(key in self.store):
self.store[key] = None
return self.store[key]
def __setitem__(self, key, value):
"""
Sets the value of a cell when referenced as an item
:param key: the name of the cell as a string
:param value: the new value for the cell
:return: the value of the cell
"""
if key in self.__guards__:
self.store[key] = self.__guards__[key](key,value)
else:
self.store[self.__keytransform__(key)] = value
def __delitem__(self, key):
"""
Deletes a cell when referenced as an item
:param key: the name of the cell as a string
:return: None
"""
key = self.__keytransform__(key)
if key in self.__formulas__:
del self.__formulas__[key]
if key in self.__guards__:
del self.__guards__[key]
if not(key in self.store):
return None
del self.store[self.__keytransform__(key)]
def get_cell_engine(context='root'):
"""Create a new CellEngine"""
global synapse
lname = context.lower()
synapse_current_cell_engine_context = lname
if lname in synapse_sheets:
return synapse_sheets[lname]
synapse_current_cell_engine = synapse_sheets[lname] = cell_dictionary()
return synapse_current_cell_engine
def wait_for_ctrlc(seconds=1):
"""
Wait for ctrlc interrupt from the board
:param seconds: sleep time per loop in seconds
:return:
"""
try:
while True:
time.sleep(seconds)
except KeyboardInterrupt:
pass
class cell_engine(object):
"""
The Synapse Cell Engine class.
"""
def __set(self,key,value):
"""
Sets the value of a cell
:param key: the name of the cell as a string
:param value: the value for the cell
:return: None
"""
self.__dict__[key] = value
def __get(self,key):
"""
Returns the value of a cell
:param key: the name of the cell as a string
:return: the value of the cell
"""
return self.__dict__[key]
def __init__(self,cells=None):
"""
Constructor for a Synapse Cell Engine
:param cells: a client_interface instance. If not specified, set to the current base_dictionary
"""
if not cells:
cells = get_cell_engine()
if not isinstance(cells,client_interface):
raise RuntimeError("%s is not a subclass of synapse_AbstractClient" % type(cells))
self.__set('__cells', cells)
def cells(self):
"""
Returns the cell dictionary for this instance
"""
return self.__get('__cells')
def get_cell(self,key,value=None):
"""
Returns the current value of a cell
:param key: the name of the cell as a string
:param value: an optional value that may be passed to the cell's formula
:return: the current value of the cell
"""
return self.__get('__cells').get_cell(key,value)
def set_cell(self,key,value=None):
"""
Set the value of a cell
:param key: the name of the cell
:param value: the value for the cell
:return: the current value cell
"""
return self.__get('__cells').set_cell(key,value)
def set_formula(self,key,formula):
"""
Sets the formula function for a cell.
:param name: the name of the cell as string
:param formula: a function that takes (key,value) where key=cell, value an optional value
:return: None
"""
return self.cells().set_formula(key,formula)
def set_guard(self,key,guard):
"""
Sets a guard function for a cell.
:param name: the name of the cell as string
:param guard: a function that takes (key,value) where key=cell, and value=value for the cell
:return: None
"""
return self.cells().set_guard(key,guard)
def set_prop(self,key,prop,value):
"""
Sets a cell's named property to a value
:param key: the name of the cell as a string
:param prop: the name of the property as a string
:param value: the current value of the property
:return:
"""
return self.cells().set_prop(key,prop,value)
def get_prop(self,key,prop):
"""
Returns the current value of a cell's property
:param key: the name of the cell as a string
:param prop: the name of the property as a string
:return: the current value of the property
"""
return self.cells().get_prop(key,prop)
def __delattr__(self,key):
del self.cells()[key]
def __getattr__(self, key):
return self.__get('__cells').get_cell(key)
def __setattr__(self, key, value):
return self.__get('__cells').set_cell(key, value)
def __getitem__(self, key):
"""
Returns the value of a cell when referenced as an item
:param key: the name of the cell as a string
:param value: an optional value
:return: the value of the cell
"""
return self.__get('__cells').get_cell(key)
def __setitem__(self, key, value):
"""
Sets the value of a cell when referenced as an item
:param key: the name of the cell as a string
:param value: the new value for the cell
:return: the value of the cell
"""
return self.__get('__cells').set_cell(key, value)
def __len__(self):
"""
Returns the number of cells in the cell engine.
"""
return len(self.cells())
def close(self):
pass
#synapse.cells = cell_engine
#synapse.help = synapse_help
#synapse_dict = base_dictionary
#synapse_cells = cell_engine
synapse_spreadsheet = get_cell_engine
#############################
import cherrypy
import json
import requests
import threading
#synapse.http = base_dictionary()
synapse_http_port = 8888
synapse_http_host = "127.0.0.1"
logger = None
protocol = 'http'
class http_root_service(object):
def __init__(self,title="synapse Web Service"):
self.__title = title
@cherrypy.expose
def index(self):
return self.__title
class http_rest_service(object):
exposed = True
name = 'rest'
vdir = '/rest'
conf = {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'application/json')]
}
__cells = get_cell_engine()
def __init__(self,name=None, conf=None, cells=None):
if cells:
__cells = cells
if name:
self.name = name
if conf:
self.conf = conf
self.vdir = '/' + self.name
def __get(self, data):
j = json.loads(data)
key = j['key']
prop = j.get('prop')
value = j.get('value')
context = j.get('context')
if not context or not context in synapse_sheets:
raise cherrypy.HTTPError("400 Bad Request", "Invalid Context specified (%s)" % context)
self.__cells = get_cell_engine(context)
if prop:
j['value'] = self.__cells.get_prop(key, prop)
else:
j['value'] = self.__cells.get_cell(key, value)
return json.dumps(j)
def __set(self, data):
j = json.loads(data)
key = j['key']
prop = j.get('prop')
value = j.get('value')
context = j.get('context')
if not context or not context in synapse_sheets:
raise cherrypy.HTTPError("400 Bad Request", "Invalid Context specified (%s)" % context)
self.__cells = get_cell_engine(context)
if prop:
j['value'] = self.__cells.set_prop(key, prop, value)
else:
j['value'] = self.__cells.set_cell(key, value)
return json.dumps(j)
@cherrypy.tools.accept(media='text/plain')
def GET(self, data='{}'):
return self.__get(data)
@cherrypy.tools.accept(media='text/plain')
def POST(self, data='{}'):
return self.__set(data)
@cherrypy.tools.accept(media='text/plain')
def PUT(self, data='{}'):
return self.__set(data)
@cherrypy.tools.accept(media='text/plain')
def DELETE(self, data='{}'):
jdata = json.loads(data)
return {"from":"delete", "data":jdata}
http_server_running = False
class http_server(object):
"""Starts a local synapse HTTP Web Service."""
thread = None
root = None
conf = None
rest = None
def __init__(self,port=synapse_http_port,title='synapse Web Service',log_screen=False,services=[]):
global logger
global http_server_running
logger = initialize_logger('http',file="synapse_%d_%d.log" % (port, os.getpid()))
self.root = http_root_service("%s on port %d" % (title, port))
self.rest = http_rest_service(name='rest')
self.root.__setattr__(self.rest.name, self.rest)
self.conf = {
'/': {
'tools.sessions.on': True,
'tools.staticdir.root': os.path.abspath(os.getcwd())
},
self.rest.vdir: self.rest.conf
}
for svc in services:
self.root.__setattr__(svc.name, svc)
self.conf.__setitem__(svc.vdir, svc.conf)
def worker():
#cherrypy.log.access_file - 'synapse.http.log'
cherrypy.log.access_log.level = logging.INFO
cherrypy.log.error_log.level = logging.ERROR
cherrypy.log.screen = log_screen
cherrypy.server.socket_host = '0.0.0.0'
cherrypy.server.socket_port = port
cherrypy.quickstart(self.root, '/', self.conf)
self.thread = threading.Thread(target=worker)
self.thread.daemon = True
self.thread.start()
http_server_running = True;
class http_client(client_interface):
"""Creates a new HTTP Client"""
__url__ = None
response = None
trust_env = False
context = 'root'
def __init__(self, port=synapse_http_port, host=synapse_http_host, context='root'):
self.context = context
if not ('NO_PROXY' in os.environ):
os.environ['NO_PROXY'] = "127.0.0.1,localhost,%s" % socket.gethostname()
self.__url__ = "http://%s:%d/rest" % (host, port)
def __response(self):
if self.response.status_code == 200:
null = None
return json.loads(self.response.text)['value']
else:
raise requests.exceptions.HTTPError(self.response.status_code)
def get_cell(self, key, value=None):
"""Returns the contents of the cell"""
data = {'action':'get_cell', 'key':key, 'value':value, 'context':self.context}
self.response = requests.get(self.__url__, params={'data':json.dumps(data)})
return self.__response()
def set_cell(self, key, value=None):
"""Set the contents of the cell"""
data = {'action':'set_cell', 'key':key, 'value':value, 'context':self.context}
self.response = requests.post(self.__url__, data={'data':json.dumps(data)})
return self.__response()
def get_prop(self, key, prop, value=None):
"""Returns the contents of the cell"""
data = {'action':'get_prop', 'key':key, 'prop':prop, 'value':value, 'context':self.context}
self.response = requests.get(self.__url__, params={'data':json.dumps(data)})
return self.__response()
def set_prop(self, key, prop, value=None):
"""Set the contents of the cell"""
data = {'action':'set_prop', 'key':key, 'prop':prop, 'value':value, 'context':self.context}
self.response = requests.post(self.__url__, data={'data':json.dumps(data)})
return self.__response()
def __getitem__(self, key):
"""
Returns the value of a cell when referenced as an item
:param key: the name of the cell as a string
:param value: an optional value
:return: the value of the cell
"""
return self.get_cell(key)
def __setitem__(self, key, value):
"""
Sets the value of a cell when referenced as an item
:param key: the name of the cell as a string
:param value: the new value for the cell
:return: the value of the cell
"""
return self.set_cell(key, value)
def RaiseError(self):
raise requests.exceptions.HTTPError(404)
def http_cell_engine(port=synapse_http_port, host=synapse_http_host, context='root'):
"""Returns a cell engine from a new HTTP_Client"""
return cell_engine(http_client(port=port, host=host, context=context))
def _server(port=synapse_http_port):
return http_server(port)
def _client(port=synapse_http_port, host=synapse_http_host,context='root'):
return http_cell_engine(port=port,host=host,context=context)
def exit(status=1):
global logger
if http_server_running == True:
cherrypy.engine.exit()
if logger != None:
logging.shutdown()
print("Goodbye from Synapse")
sys.exit(status)
def main():
from . import exit
sys.ps1 = synapse_ps1
sys.ps2 = synapse_ps2
print synapse_title
print synapse_exit_prompt
sys.tracebacklimit = 0
try:
for file in sys.argv[1:]:
print "execfile(%s)" % file
execfile(file)
finally:
pass
import rpyc
from rpyc.utils.server import ThreadedServer
import threading
synapse_rpc_port = 9999
synapse_rpc_host = "127.0.0.1"
def rpc_server(port):
"""Create a new RPC Server"""
class SynapseRpcService(rpyc.Service):
def on_connect(self):
"""Called on connection"""
pass
def on_disconnect(self):
"""Called on disconnection"""
pass
def exposed_get_cell(self, key, value=None, context='root'): # Get Cell
x = get_cell_engine(context)
return x.get_cell(key, value)
def exposed_set_cell(self, key, value=None, context='root'): # Set Cell
x = get_cell_engine(context)
return x.set_cell(key, value)
def exposed_get_prop(self, key, prop, context='root'):
x = get_cell_engine(context)
return x.get_prop(key, prop)
def exposed_set_prop(self,key, prop, value=None, context='root'):
x = get_cell_engine(context)
return x.set_prop(key, prop, value)
def server_thread():
ts = ThreadedServer(SynapseRpcService,port=port)
ts.start()
t = threading.Thread(target=server_thread)
t.daemon = True
t.start()
return t
class rpc_client(client_interface):
"""Creates a new RPC Client"""
__url__ = None
response = None
context = 'root'
conn = None
host = None
port = None
def __connect(self):
self.conn = rpyc.connect(self.host,self.port)
def __init__(self, port=synapse_rpc_port, host=synapse_rpc_host, context='root'):
self.host = host
self.port = port
self.context = context
self.__url__ = "rpc://%s:%d/rest" % (host, port)
if not ('NO_PROXY' in os.environ):
os.environ['NO_PROXY'] = "127.0.0.1,localhost,%s" % socket.gethostname()
self.__connect()
def get_cell(self, key, value=None):
"""Returns the contents of the cell"""
try:
return self.conn.root.get_cell(key, value, context=self.context)
except EOFError:
self.__connect()
return self.conn.root.get_cell(key, value, context=self.context)
def set_cell(self, key, value=None):
"""Set the contents of the cell"""
try:
return self.conn.root.set_cell(key, value, context=self.context)
except EOFError:
self.__connect()
return self.conn.root.set_cell(key, value, context=self.context)
def get_prop(self, key, prop):
"""Returns the contents of the cell"""
try:
return self.conn.root.get_prop(key, prop, context=self.context)
except EOFError:
self.__connect()
return self.conn.root.get_prop(key, prop, context=self.context)
def set_prop(self, key, prop, value=None):
"""Set the contents of the cell"""
try:
return self.conn.root.set_prop(key, prop, value, context=self.context)
except EOFError:
self.__connect()
return self.conn.root.set_prop(key, prop, value, context=self.context)
def __getitem__(self, key):
"""
Returns the value of a cell when referenced as an item
:param key: the name of the cell as a string
:param value: an optional value
:return: the value of the cell
"""
return self.get_cell(key)
def __setitem__(self, key, value):
"""
Sets the value of a cell when referenced as an item
:param key: the name of the cell as a string
:param value: the new value for the cell
:return: the value of the cell
"""
return self.set_cell(key, value)
def rpc_cell_engine(port=synapse_rpc_port, host=synapse_rpc_host, context='root'):
"""Returns a cell engine from a new HTTP_Client"""
return cell_engine(rpc_client(port=port, host=host, context=context))
class paramsdict(object):
hash = {}
def __init__(self, msg):
lines = msg.rstrip('\n').split('\n')
self.head = lines[0];
self.action = self.head.split('.').pop().lower()
self.body = lines[1:]
self.hash = {}
for i in range(0,len(self.body)):
pos = self.body[i].index(":")
key = self.__key(self.body[i][:pos])
val = self.body[i][pos+1:]
self.hash[key] = val
def ack(self):
if not self.head.endswith('.ack'):
self.head = self.head + '.ack'
def __key(self,key):
return key.strip().lower()
def __getitem__(self, key):
fkey = self.__key(key)
if fkey in self.hash:
return self.hash[fkey]
def __setitem__(self, key, value):
fkey = self.__key(key)
self.hash[fkey] = value
return value
def __repr__(self):
return self._string()
def _string(self):
result = self.head + '\n'
for key in self.hash:
result = result + "{}:{}\n".format(key,self.hash[key])
return result + '\n'
import SocketServer
synapse_stomp_port = 10000
synapse_stomp_host = "127.0.0.1"
class synapse_stomp_handler(SocketServer.StreamRequestHandler):
"""
The request handler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def cell_engine(self,pd):
context = pd['ctx']
if not context:
return get_cell_engine()
else:
return get_cell_engine(context)
def set_error(self,pd,msg):
pd['$err'] = msg
def set_answer(self,pd,msg):
pd['@ans'] = msg
def process_message(self, pd):
x = self.cell_engine(pd)
theVar = pd['var']
theVal = pd['val']
theProp = pd['prop']
action = pd.action
ACTION = lambda (x) : True if action == x else False
try:
if ACTION('get-cell'):
self.set_answer(pd, x.get_cell(theVar, theVal))
elif ACTION('set-cell'):
x.set_cell(theVar, theVal)
self.set_answer(pd, x.get_cell(theVar))
elif ACTION('get-prop'):
self.set_answer(pd, x.get_prop(theVar, theProp))
elif ACTION('set-prop'):
self.set_answer(pd, x.set_prop(theVar, theProp, theVal))
else:
self.set_error(pd, 'Unknown action {}'.format(action))
except Exception, e:
self.set_error(pd, e.message)
def handle(self):
# self.request is the TCP socket connected to the client
client = "%s:%s" % (self.client_address[0], self.client_address[1])
print "connect({})".format(client)
buf = ""
cnt = 0
while(True):
self.data = self.rfile.readline()
print self.data,
if not self.data: break
buf = buf + self.data
if buf.endswith('\n\n'):
try:
pd = paramsdict(buf)
self.process_message(pd)
pd.ack()
self.wfile.write(pd._string())
cnt = 0
except:
print 'invalid message:{}'.format(buf)
break;
finally:
cnt = cnt + 1
buf = ""
print "disconnect({}); {} messages".format(client,cnt)
def stomp_server(port):
HOST, PORT = "0.0.0.0", port
# Create the server, binding to localhost on port 9999
server = SocketServer.TCPServer((HOST, PORT), synapse_stomp_handler)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
def server_thread():
server.serve_forever()
t = threading.Thread(target=server_thread)
t.daemon = True
t.start()
return t
class stomp_client(client_interface):
"""Creates a new RPC Client"""
__url__ = None
response = None
context = 'root'
conn = None
host = None
port = None
appid = "{}.{}.{}".format('python', socket.gethostname(), os.getpid())
def __connect(self):
#self.conn = rpyc.connect(self.host,self.port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port on the server given by the caller
self.server_address = (self.host, self.port)
print >>sys.stderr, 'connecting to %s port %s' % self.server_address
self.sock.connect(self.server_address)
def __init__(self, port=synapse_stomp_port, host=synapse_stomp_host, context='root'):
self.host = host
self.port = port
self.context = context
self.__url__ = "rpc://%s:%d/rest" % (host, port)
if not ('NO_PROXY' in os.environ):
os.environ['NO_PROXY'] = "127.0.0.1,localhost,%s" % socket.gethostname()
self.__connect()
def __sendrecv(self,data):
self.sock.sendall(data)
msg = ''
while not msg.endswith('\n\n'):
data = self.sock.recv(4096)
if not data:
raise EOFError
msg = msg + data;
pd = paramsdict(msg)
if pd['$err']:
raise Exception(pd['$err'])
return pd['@ans']
def __sendmsg(self, action, args):
msg = self.__genmsg(action, args)
pd = paramsdict(msg)
return self.__sendrecv(pd._string())
def __genmsg(self, action, args):
msg = self.appid + "." + action + "\n"
args['ctx'] = self.context
for k in args:
msg = msg + "{}:{}\n".format(k, args[k])
msg = msg + "\n"
return msg
def get_cell(self, key, value=None):
"""Returns the contents of the cell"""
return self.__sendmsg("get-cell", {'var':key, 'val':value})
def set_cell(self, key, value=None):
"""Set the contents of the cell"""
return self.__sendmsg("set-cell", {'var':key, 'val':value})
def get_prop(self, key, prop):
"""Returns the contents of the cell"""
return self.__sendmsg("get-prop", {'var':key, 'prop':prop})
def set_prop(self, key, prop, value=None):
"""Set the contents of the cell"""
return self.__sendmsg("set-prop", {'var':key, 'prop':prop, 'value':value})
def __getitem__(self, key):
"""
Returns the value of a cell when referenced as an item
:param key: the name of the cell as a string
:param value: an optional value
:return: the value of the cell
"""
return self.get_cell(key)
def __setitem__(self, key, value):
"""
Sets the value of a cell when referenced as an item
:param key: the name of the cell as a string
:param value: the new value for the cell
:return: the value of the cell
"""
return self.set_cell(key, value)
def stomp_cell_engine(port=synapse_stomp_port, host=synapse_stomp_host, context='root'):
"""Returns a cell engine from a new HTTP_Client"""
return cell_engine(stomp_client(port=port, host=host, context=context))
class stomp_wrapper_class(object):
def server(self, port):
return stomp_server(port)
def client(self, port=synapse_stomp_port, host=synapse_stomp_host, context='root'):
return cell_engine(stomp_client(port=port, host=host, context=context))
class rpc_wrapper_class(object):
def server(self, port):
return rpc_server(port)
def client(self, port=synapse_stomp_port, host=synapse_stomp_host, context='root'):
return cell_engine(rpc_client(port=port, host=host, context=context))
class http_wrapper_class(object):
def server(self, port):
return http_server(port)
def client(self, port=synapse_stomp_port, host=synapse_stomp_host, context='root'):
return cell_engine(http_client(port=port, host=host, context=context))
stomp = stomp_wrapper_class()
http = http_wrapper_class()
rpc = rpc_wrapper_class()
spreadsheet = synapse_spreadsheet
|
mcornelio/synapse
|
synapse/__init__.py
|
Python
|
mit
| 29,118 | 0.033141 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MapVectorization` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
def _generate_unary_cwise_math_cases():
# TODO(rachelim): Consolidate tests with pfor when APIs are somewhat shared.
bitwise_cases = [("Invert", bitwise_ops.invert)]
logical_cases = [("LogicalNot", math_ops.logical_not)]
complex_cases = [
("Angle", math_ops.angle),
("ComplexAbs", math_ops.abs),
("Conj", math_ops.conj),
("Imag", math_ops.imag),
("Real", math_ops.real),
]
real_cases = [
("Abs", math_ops.abs),
("Acos", math_ops.acos),
("Acosh", lambda x: math_ops.acosh(1 + math_ops.square(x))),
("Asin", math_ops.asin),
("Asinh", math_ops.asinh),
("Atan", math_ops.atan),
("Atanh", math_ops.atanh),
("BesselI0e", math_ops.bessel_i0e),
("BesselI1e", math_ops.bessel_i1e),
("Ceil", math_ops.ceil),
("Cos", math_ops.cos),
("Cosh", math_ops.cosh),
("Digamma", math_ops.digamma),
("Elu", nn.elu),
("Erf", math_ops.erf),
("Erfc", math_ops.erfc),
("Exp", math_ops.exp),
("Expm1", math_ops.expm1),
("Floor", math_ops.floor),
("Inv", math_ops.inv),
("IsFinite", math_ops.is_finite),
("IsInf", math_ops.is_inf),
("Lgamma", math_ops.lgamma),
("Log", math_ops.log),
("Log1p", math_ops.log1p),
("Neg", math_ops.negative),
("Reciprocal", math_ops.reciprocal),
("Relu", nn.relu),
("Relu6", nn.relu6),
("Rint", math_ops.rint),
("Round", math_ops.round),
("Rsqrt", math_ops.rsqrt),
("Selu", nn.selu),
("Sigmoid", math_ops.sigmoid),
("Sign", math_ops.sign),
("Sin", math_ops.sin),
("Sinh", math_ops.sinh),
("Softplus", nn.softplus),
("Softsign", nn.softsign),
("Sqrt", math_ops.sqrt),
("Square", math_ops.square),
("Tan", math_ops.tan),
("Tanh", math_ops.tanh),
]
random_input = np.random.rand(3, 5)
complex_component = np.random.rand(3, 5)
random_int = np.random.randint(0, 10, (7, 3, 5))
def bitwise_dataset_factory():
return dataset_ops.Dataset.from_tensor_slices(random_int)
def logical_dataset_factory():
return dataset_ops.Dataset.from_tensor_slices(random_input > 0)
def random_dataset_factory():
return dataset_ops.Dataset.from_tensor_slices(random_input)
def complex_dataset_factory():
return dataset_ops.Dataset.from_tensor_slices(
math_ops.complex(random_input, complex_component))
case_factory_pairs = [
(bitwise_cases, bitwise_dataset_factory),
(logical_cases, logical_dataset_factory),
(complex_cases, complex_dataset_factory),
(real_cases, random_dataset_factory),
]
return [(case[0], case[1], factory)
for cases, factory in case_factory_pairs
for case in cases]
def _generate_binary_cwise_math_cases():
bitwise_cases = [("BitwiseAnd", bitwise_ops.bitwise_and),
("BitwiseOr", bitwise_ops.bitwise_or),
("BitwiseXor", bitwise_ops.bitwise_xor),
("LeftShift", bitwise_ops.left_shift),
("RightShift", bitwise_ops.right_shift)]
logical_cases = [("LogicalAnd", math_ops.logical_and),
("LogicalOr", math_ops.logical_or)]
# Wrapper functions restricting the range of inputs of zeta and polygamma.
def safe_polygamma(x, y):
return math_ops.polygamma(
math_ops.round(clip_ops.clip_by_value(y, 1, 10)), x * x + 1)
def safe_zeta(x, y):
return math_ops.zeta(x * x + 1, y * y)
real_cases = [
("Add", math_ops.add),
("AddV2", math_ops.add_v2),
("Atan2", math_ops.atan2),
("Complex", math_ops.complex),
("DivNoNan", math_ops.div_no_nan),
("Equal", math_ops.equal),
("FloorDiv", math_ops.floor_div),
("FloorMod", math_ops.floor_mod),
("Greater", math_ops.greater),
("GreaterEqual", math_ops.greater_equal),
("Igamma", math_ops.igamma),
("Igammac", math_ops.igammac),
("IgammaGradA", math_ops.igamma_grad_a),
("Less", math_ops.less),
("LessEqual", math_ops.less_equal),
("Maximum", math_ops.maximum),
("Minimum", math_ops.minimum),
("Mod", math_ops.mod),
("Mul", math_ops.multiply),
("NotEqual", math_ops.not_equal),
("Polygamma", safe_polygamma),
("Pow", math_ops.pow),
("RealDiv", math_ops.divide),
("SquareDifference", math_ops.squared_difference),
("Sub", math_ops.subtract),
("TruncateMod", math_ops.truncate_mod),
("Zeta", safe_zeta),
]
# Exercises broadcasting capabilities
x = np.random.rand(7, 3, 5)
y = np.random.rand(3, 5)
x_int = np.random.randint(0, 10, (7, 3, 5))
y_int = np.random.randint(0, 10, (3, 5))
def bitwise_dataset_factory():
return dataset_ops.Dataset.from_tensors((x_int, y_int))
def logical_dataset_factory():
return dataset_ops.Dataset.from_tensors((x > 0, y > 0))
def random_dataset_factory():
return dataset_ops.Dataset.from_tensors((x, y))
case_factory_pairs = [
(bitwise_cases, bitwise_dataset_factory),
(logical_cases, logical_dataset_factory),
(real_cases, random_dataset_factory),
]
return [(case[0], case[1], factory)
for cases, factory in case_factory_pairs
for case in cases]
def _generate_cwise_test_cases():
return _generate_unary_cwise_math_cases() + _generate_binary_cwise_math_cases(
)
def _generate_csv_test_case():
def csv_factory():
return dataset_ops.Dataset.from_tensor_slices(["1.0:2:a",
"2.4:5:c"]).repeat(5)
def decode_csv_fn(x):
return parsing_ops.decode_csv(
x,
record_defaults=[
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.string)
],
field_delim=":")
return decode_csv_fn, csv_factory
def _generate_parse_single_example_test_case():
def parse_example_factory():
def _int64_feature(*values):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))
def _bytes_feature(*values):
return feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[v.encode("utf-8") for v in values]))
return dataset_ops.Dataset.from_tensor_slices(
constant_op.constant([
example_pb2.Example(
features=feature_pb2.Features(
feature={
"dense_int": _int64_feature(i),
"dense_str": _bytes_feature(str(i)),
"sparse_int": _int64_feature(i, i * 2, i * 4, i * 8),
"sparse_str": _bytes_feature(*["abc"] * i)
})).SerializeToString() for i in range(10)
]))
def parse_single_example_fn(x):
features = {
"dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
"dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
"sparse_int": parsing_ops.VarLenFeature(dtypes.int64),
"sparse_str": parsing_ops.VarLenFeature(dtypes.string),
}
return parsing_ops.parse_single_example(x, features)
return parse_single_example_fn, parse_example_factory
def _generate_optimization_test_cases():
def base_dataset_factory():
return dataset_ops.Dataset.from_tensors(np.random.rand(10, 3)).repeat(5)
rand_val = np.random.rand(1, 1, 1, 1, 1, 1)
csv_test_case = _generate_csv_test_case()
parse_fn, parse_base = _generate_parse_single_example_test_case()
def dense_output_only_parse_fn(x):
# Since we haven't implemented a vectorizer for SerializeSparse, any
# function with sparse outputs will only be naively vectorized.
parse_result = parse_fn(x)
return [
y for y in parse_result if not isinstance(y, sparse_tensor.SparseTensor)
]
def map_fn_with_cycle(x):
c = lambda i: math_ops.less(i, 10)
b = lambda i: math_ops.add(i, 1)
return control_flow_ops.while_loop(c, b, [x])
# Misc test cases
test_cases = [
("Basic", lambda x: (x, x + 1), base_dataset_factory),
("Broadcast", lambda x: x + rand_val, base_dataset_factory),
("Cycle", map_fn_with_cycle, lambda: dataset_ops.Dataset.from_tensors(1)),
("Const", lambda x: 2, base_dataset_factory),
("Cast", lambda x: math_ops.cast(x, dtypes.float64),
base_dataset_factory),
("Reshape", lambda x: array_ops.reshape(x, (-1, 30)),
base_dataset_factory),
("Transpose", array_ops.transpose, base_dataset_factory),
("Unpack", array_ops.unstack, base_dataset_factory),
("UnpackNegativeAxis", lambda x: array_ops.unstack(x, axis=-1),
base_dataset_factory),
# Parsing ops
("DecodeCSV", csv_test_case[0], csv_test_case[1]),
("ParseSingleExample", parse_fn, parse_base),
("ParseSingleExampleDenseOutputOnly", dense_output_only_parse_fn,
parse_base),
] + _generate_cwise_test_cases()
return [{
"testcase_name":
x[0] + "Parallel" if num_parallel_calls is not None else x[0],
"map_fn":
x[1],
"base_dataset_factory":
x[2],
"num_parallel_calls":
num_parallel_calls
} for x in test_cases for num_parallel_calls in (None, 12)]
@test_util.run_all_in_graph_and_eager_modes
class MapVectorizationTest(test_base.DatasetTestBase, parameterized.TestCase):
def _get_test_datasets(self,
base_dataset,
map_fn,
num_parallel_calls=None,
expect_optimized=True):
"""Given base dataset and map fn, creates test datasets.
Returns a tuple of (unoptimized dataset, optimized dataset). The
unoptimized dataset has the assertion that Batch follows Map. The optimized
dataset has the assertion that Map follows Batch, and has the
"map_vectorization" optimization applied.
Args:
base_dataset: Input dataset to map->batch
map_fn: Map function to use
num_parallel_calls: (Optional.) num_parallel_calls argument for map
expect_optimized: (Optional.) Whether we expect the optimization to take
place, in which case we will assert that Batch is followed by Map,
otherwise Map followed by Batch. Defaults to True.
Returns:
Tuple of (unoptimized dataset, optimized dataset).
"""
map_node_name = "Map" if num_parallel_calls is None else "ParallelMap"
batch_size = 100
def _make_dataset(node_names):
return base_dataset.apply(optimization.assert_next(node_names)).map(
map_fn, num_parallel_calls=num_parallel_calls).batch(batch_size)
unoptimized = _make_dataset([map_node_name, "Batch"])
optimized = _make_dataset(["Batch", map_node_name]
if expect_optimized else [map_node_name, "Batch"])
options = dataset_ops.Options()
options.experimental_map_vectorization = True
optimized = optimized.with_options(options)
return unoptimized, optimized
@parameterized.named_parameters(_generate_optimization_test_cases())
def testOptimization(self, map_fn, base_dataset_factory, num_parallel_calls):
base_dataset = base_dataset_factory()
unoptimized, optimized = self._get_test_datasets(base_dataset, map_fn,
num_parallel_calls)
self.assertDatasetsEqual(unoptimized, optimized)
# TODO(b/117581999): Add eager coverage for the following tests.
def testSkipEagerOptimizationBadMapFn(self):
# Test map functions that give an error
def map_fn(x):
# x has leading dimension 5, this will raise an error
return array_ops.gather(x, 10)
base_dataset = dataset_ops.Dataset.range(5).repeat(5).batch(
5, drop_remainder=True)
_, optimized = self._get_test_datasets(base_dataset, map_fn)
nxt = optimized.make_one_shot_iterator().get_next()
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r"indices = 10 is not in \[0, 5\)"):
self.evaluate(nxt)
def testOptimizationWithCapturedInputs(self):
# Tests that vectorization works with captured inputs
y = constant_op.constant(1, shape=(2,))
z = constant_op.constant(2, shape=(2,))
def map_fn(x):
return x, y, z
base_dataset = dataset_ops.Dataset.from_tensor_slices([[1, 2],
[3, 4]]).repeat(5)
unoptimized, optimized = self._get_test_datasets(
base_dataset, map_fn, expect_optimized=True)
self.assertDatasetsEqual(optimized, unoptimized)
# TODO(b/117581999): Add eager coverage for the following tests.
def testSkipEagerOptimizationIgnoreStateful(self):
def map_fn(x):
with ops.control_dependencies([check_ops.assert_equal(x, 0)]):
return array_ops.identity(x)
base_dataset = dataset_ops.Dataset.from_tensor_slices([[1, 2],
[3, 4]]).repeat(5)
unoptimized, optimized = self._get_test_datasets(
base_dataset, map_fn, expect_optimized=False)
self.assertDatasetsRaiseSameError(
unoptimized, optimized, errors.InvalidArgumentError,
[("OneShotIterator", "OneShotIterator_1", 1),
("IteratorGetNext", "IteratorGetNext_1", 1)])
def testOptimizationIgnoreRagged(self):
# Make sure we ignore inputs that might not be uniformly sized
def map_fn(x):
return array_ops.gather(x, 0)
# output_shape = (?,)
base_dataset = dataset_ops.Dataset.range(20).batch(3, drop_remainder=False)
unoptimized, optimized = self._get_test_datasets(
base_dataset, map_fn, expect_optimized=False)
self.assertDatasetsEqual(unoptimized, optimized)
# TODO(b/117581999): Add eager coverage for the following tests.
def testSkipEagerOptimizationIgnoreRaggedMap(self):
# Don't optimize when the output of the map fn shapes are unknown.
def map_fn(x):
return array_ops.tile(x, x)
base_dataset = dataset_ops.Dataset.range(20).batch(1, drop_remainder=True)
unoptimized, optimized = self._get_test_datasets(
base_dataset, map_fn, expect_optimized=False)
self.assertDatasetsRaiseSameError(
unoptimized, optimized, errors.InvalidArgumentError,
[("OneShotIterator", "OneShotIterator_1", 1),
("IteratorGetNext", "IteratorGetNext_1", 1)])
class MapVectorizationBenchmark(test.Benchmark):
# TODO(rachelim): Add a benchmark for more expensive transformations, such as
# vgg_preprocessing.
def _run(self, x, num_iters=100, name=None):
deltas = []
with session.Session() as sess:
for _ in range(5):
# Warm up session...
sess.run(x)
for _ in range(num_iters):
start = time.time()
sess.run(x)
end = time.time()
deltas.append(end - start)
median_time = np.median(deltas)
self.report_benchmark(iters=num_iters, wall_time=median_time, name=name)
return median_time
def _compare(self, input_dataset, map_fn, batch_size, input_size, str_id):
num_elems = int(np.sum([np.prod(x) for x in input_size]))
name_template = "{}__batch_size_{}_input_element_size_{}_{}"
unoptimized = input_dataset.map(map_fn).batch(batch_size)
unoptimized_op = unoptimized.make_one_shot_iterator().get_next()
optimized = input_dataset.map(map_fn).batch(batch_size)
options = dataset_ops.Options()
options.experimental_map_vectorization = True
optimized = optimized.with_options(options)
optimized_op = optimized.make_one_shot_iterator().get_next()
unoptimized_time = self._run(
unoptimized_op,
name=name_template.format(str_id, batch_size, num_elems, "unoptimized"))
optimized_time = self._run(
optimized_op,
name=name_template.format(str_id, batch_size, num_elems, "optimized"))
print("Batch size: {}\n"
"Input element size: {}\n"
"Transformation: {}\n"
"Speedup: {}\n".format(batch_size, input_size, str_id,
(unoptimized_time / optimized_time)))
# Known cheap functions
def benchmarkIdentity(self):
self._benchmark_helper(lambda *args: [array_ops.identity(x) for x in args],
"identity")
def benchmarkAddConst(self):
self._benchmark_helper(lambda *args: [x + 1 for x in args], "add_const")
def benchmarkReturnConst(self):
self._benchmark_helper(lambda *args: [constant_op.constant(2)], "ret_const")
def benchmarkSelect(self):
self._benchmark_helper(lambda *args: args[0], "select")
def benchmarkCast(self):
self._benchmark_helper(
lambda *args: [math_ops.cast(x, dtypes.float64) for x in args], "cast")
def benchmarkReshape(self):
self._benchmark_helper(
lambda *args: [array_ops.reshape(x, (-1, 30)) for x in args], "reshape")
def benchmarkDecodeCSV(self):
csv_fn, csv_factory = _generate_csv_test_case()
self._benchmark_helper(csv_fn, "decode_csv", lambda: [csv_factory()])
def benchmarkParseSingleExample(self):
# NOTE: Since we haven't implemented a vectorizer for "SerializeSparse",
# this function is only naively vectorized.
parse_fn, parse_factory = _generate_parse_single_example_test_case()
self._benchmark_helper(parse_fn, "parse_single_example",
lambda: [parse_factory()])
def _default_dataset_factory(self):
input_sizes = [(10, 10, 3), (10, 100, 300)]
for sz in input_sizes:
yield dataset_ops.Dataset.from_tensor_slices(np.random.rand(*sz))
def _benchmark_helper(self, map_fn, str_id, base_dataset_factory=None):
if base_dataset_factory is None:
base_dataset_factory = self._default_dataset_factory
batch_size = 1000
for base_dataset in base_dataset_factory():
base_dataset = base_dataset.repeat()
input_size = [
tuple(shape.as_list())
for shape in nest.flatten(base_dataset.output_shapes)
]
self._compare(base_dataset, map_fn, batch_size, input_size, str_id)
if __name__ == "__main__":
test.main()
|
brchiu/tensorflow
|
tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py
|
Python
|
apache-2.0
| 20,093 | 0.004977 |
"""
Script to fetch historical data (since 2011) for matches (global, public).
Gives results in a chronological order (ascending), as they happened.
"""
from __future__ import print_function
from dota2py import api
from time import sleep as wait_for_next_fetch
def public_match_history(start_at_match_seq_num=None, matches_requested=500,
fetch_delay=1, debug=True, **kwargs):
"""
Returns list of most recent public matches according to given kwargs
Rate limits the API requests according to `fetch_delay` (in seconds)
Output : last_response_status, last_response_detail, match_history
"""
# tracking variables
matches_fetched = 0
last_match_seq_num = start_at_match_seq_num
last_response_status = 1
match_history = []
last_response_detail = "Fetch successful"
while last_response_status == 1 and matches_fetched < matches_requested:
cur_response = api.get_match_history_by_sequence_num(
start_at_match_seq_num=last_match_seq_num, **kwargs
)
last_response_status = cur_response['result']['status']
if not last_response_status == 1:
# unsuccessful query
if not 'statusDetail' in cur_response['result']:
last_response_detail = "Unknown error"
else:
last_response_detail = cur_response['result']['statusDetail']
break
else:
# successful data fetch
cur_matches = cur_response['result']['matches']
if len(cur_response['result']['matches']) >= 1:
if not match_history:
# very first fetch
match_history.extend(cur_matches)
matches_fetched += len(cur_matches)
else:
# 2nd fetch onwards, ignore the first common match
match_history.extend(cur_matches[1:])
matches_fetched += len(cur_matches) - 1
if len(cur_matches) == 1 and cur_matches[0]['match_id'] == last_match_id:
break
else:
break
last_match_seq_num = cur_matches[-1]['match_seq_num']
if debug:
print("Matches fetched - #{}...".format(matches_fetched))
wait_for_next_fetch(fetch_delay)
if debug:
print("{0}: {1}".format(last_response_status, last_response_detail))
return {'status':last_response_status, 'statusDetail':last_response_detail,
'matches':match_history}
|
ashishnitinpatil/dota2api_scripts
|
dota2api_scripts/historical_data.py
|
Python
|
bsd-2-clause
| 2,542 | 0.00236 |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
caio2k/RIDE
|
src/robotide/searchtests/__init__.py
|
Python
|
apache-2.0
| 608 | 0.001645 |
# -*- test-case-name: twisted.mail.test.test_pop3client -*-
# Copyright (c) 2001-2004 Divmod Inc.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
POP3 client protocol implementation
Don't use this module directly. Use twisted.mail.pop3 instead.
@author: Jp Calderone
"""
import re
from hashlib import md5
from twisted.python import log
from twisted.internet import defer
from twisted.protocols import basic
from twisted.protocols import policies
from twisted.internet import error
from twisted.internet import interfaces
OK = '+OK'
ERR = '-ERR'
class POP3ClientError(Exception):
"""Base class for all exceptions raised by POP3Client.
"""
class InsecureAuthenticationDisallowed(POP3ClientError):
"""Secure authentication was required but no mechanism could be found.
"""
class TLSError(POP3ClientError):
"""
Secure authentication was required but either the transport does
not support TLS or no TLS context factory was supplied.
"""
class TLSNotSupportedError(POP3ClientError):
"""
Secure authentication was required but the server does not support
TLS.
"""
class ServerErrorResponse(POP3ClientError):
"""The server returned an error response to a request.
"""
def __init__(self, reason, consumer=None):
POP3ClientError.__init__(self, reason)
self.consumer = consumer
class LineTooLong(POP3ClientError):
"""The server sent an extremely long line.
"""
class _ListSetter:
# Internal helper. POP3 responses sometimes occur in the
# form of a list of lines containing two pieces of data,
# a message index and a value of some sort. When a message
# is deleted, it is omitted from these responses. The
# setitem method of this class is meant to be called with
# these two values. In the cases where indexes are skipped,
# it takes care of padding out the missing values with None.
def __init__(self, L):
self.L = L
def setitem(self, (item, value)):
diff = item - len(self.L) + 1
if diff > 0:
self.L.extend([None] * diff)
self.L[item] = value
def _statXform(line):
# Parse a STAT response
numMsgs, totalSize = line.split(None, 1)
return int(numMsgs), int(totalSize)
def _listXform(line):
# Parse a LIST response
index, size = line.split(None, 1)
return int(index) - 1, int(size)
def _uidXform(line):
# Parse a UIDL response
index, uid = line.split(None, 1)
return int(index) - 1, uid
def _codeStatusSplit(line):
# Parse an +OK or -ERR response
parts = line.split(' ', 1)
if len(parts) == 1:
return parts[0], ''
return parts
def _dotUnquoter(line):
"""
C{'.'} characters which begin a line of a message are doubled to avoid
confusing with the terminating C{'.\\r\\n'} sequence. This function
unquotes them.
"""
if line.startswith('..'):
return line[1:]
return line
class POP3Client(basic.LineOnlyReceiver, policies.TimeoutMixin):
"""POP3 client protocol implementation class
Instances of this class provide a convenient, efficient API for
retrieving and deleting messages from a POP3 server.
@type startedTLS: C{bool}
@ivar startedTLS: Whether TLS has been negotiated successfully.
@type allowInsecureLogin: C{bool}
@ivar allowInsecureLogin: Indicate whether login() should be
allowed if the server offers no authentication challenge and if
our transport does not offer any protection via encryption.
@type serverChallenge: C{str} or C{None}
@ivar serverChallenge: Challenge received from the server
@type timeout: C{int}
@ivar timeout: Number of seconds to wait before timing out a
connection. If the number is <= 0, no timeout checking will be
performed.
"""
startedTLS = False
allowInsecureLogin = False
timeout = 0
serverChallenge = None
# Capabilities are not allowed to change during the session
# (except when TLS is negotiated), so cache the first response and
# use that for all later lookups
_capCache = None
# Regular expression to search for in the challenge string in the server
# greeting line.
_challengeMagicRe = re.compile('(<[^>]+>)')
# List of pending calls.
# We are a pipelining API but don't actually
# support pipelining on the network yet.
_blockedQueue = None
# The Deferred to which the very next result will go.
_waiting = None
# Whether we dropped the connection because of a timeout
_timedOut = False
# If the server sends an initial -ERR, this is the message it sent
# with it.
_greetingError = None
def _blocked(self, f, *a):
# Internal helper. If commands are being blocked, append
# the given command and arguments to a list and return a Deferred
# that will be chained with the return value of the function
# when it eventually runs. Otherwise, set up for commands to be
# blocked and return None.
if self._blockedQueue is not None:
d = defer.Deferred()
self._blockedQueue.append((d, f, a))
return d
self._blockedQueue = []
return None
def _unblock(self):
# Internal helper. Indicate that a function has completed.
# If there are blocked commands, run the next one. If there
# are not, set up for the next command to not be blocked.
if self._blockedQueue == []:
self._blockedQueue = None
elif self._blockedQueue is not None:
_blockedQueue = self._blockedQueue
self._blockedQueue = None
d, f, a = _blockedQueue.pop(0)
d2 = f(*a)
d2.chainDeferred(d)
# f is a function which uses _blocked (otherwise it wouldn't
# have gotten into the blocked queue), which means it will have
# re-set _blockedQueue to an empty list, so we can put the rest
# of the blocked queue back into it now.
self._blockedQueue.extend(_blockedQueue)
def sendShort(self, cmd, args):
# Internal helper. Send a command to which a short response
# is expected. Return a Deferred that fires when the response
# is received. Block all further commands from being sent until
# the response is received. Transition the state to SHORT.
d = self._blocked(self.sendShort, cmd, args)
if d is not None:
return d
if args:
self.sendLine(cmd + ' ' + args)
else:
self.sendLine(cmd)
self.state = 'SHORT'
self._waiting = defer.Deferred()
return self._waiting
def sendLong(self, cmd, args, consumer, xform):
# Internal helper. Send a command to which a multiline
# response is expected. Return a Deferred that fires when
# the entire response is received. Block all further commands
# from being sent until the entire response is received.
# Transition the state to LONG_INITIAL.
d = self._blocked(self.sendLong, cmd, args, consumer, xform)
if d is not None:
return d
if args:
self.sendLine(cmd + ' ' + args)
else:
self.sendLine(cmd)
self.state = 'LONG_INITIAL'
self._xform = xform
self._consumer = consumer
self._waiting = defer.Deferred()
return self._waiting
# Twisted protocol callback
def connectionMade(self):
if self.timeout > 0:
self.setTimeout(self.timeout)
self.state = 'WELCOME'
self._blockedQueue = []
def timeoutConnection(self):
self._timedOut = True
self.transport.loseConnection()
def connectionLost(self, reason):
if self.timeout > 0:
self.setTimeout(None)
if self._timedOut:
reason = error.TimeoutError()
elif self._greetingError:
reason = ServerErrorResponse(self._greetingError)
d = []
if self._waiting is not None:
d.append(self._waiting)
self._waiting = None
if self._blockedQueue is not None:
d.extend([deferred for (deferred, f, a) in self._blockedQueue])
self._blockedQueue = None
for w in d:
w.errback(reason)
def lineReceived(self, line):
if self.timeout > 0:
self.resetTimeout()
state = self.state
self.state = None
state = getattr(self, 'state_' + state)(line) or state
if self.state is None:
self.state = state
def lineLengthExceeded(self, buffer):
# XXX - We need to be smarter about this
if self._waiting is not None:
waiting, self._waiting = self._waiting, None
waiting.errback(LineTooLong())
self.transport.loseConnection()
# POP3 Client state logic - don't touch this.
def state_WELCOME(self, line):
# WELCOME is the first state. The server sends one line of text
# greeting us, possibly with an APOP challenge. Transition the
# state to WAITING.
code, status = _codeStatusSplit(line)
if code != OK:
self._greetingError = status
self.transport.loseConnection()
else:
m = self._challengeMagicRe.search(status)
if m is not None:
self.serverChallenge = m.group(1)
self.serverGreeting(status)
self._unblock()
return 'WAITING'
def state_WAITING(self, line):
# The server isn't supposed to send us anything in this state.
log.msg("Illegal line from server: " + repr(line))
def state_SHORT(self, line):
# This is the state we are in when waiting for a single
# line response. Parse it and fire the appropriate callback
# or errback. Transition the state back to WAITING.
deferred, self._waiting = self._waiting, None
self._unblock()
code, status = _codeStatusSplit(line)
if code == OK:
deferred.callback(status)
else:
deferred.errback(ServerErrorResponse(status))
return 'WAITING'
def state_LONG_INITIAL(self, line):
# This is the state we are in when waiting for the first
# line of a long response. Parse it and transition the
# state to LONG if it is an okay response; if it is an
# error response, fire an errback, clean up the things
# waiting for a long response, and transition the state
# to WAITING.
code, status = _codeStatusSplit(line)
if code == OK:
return 'LONG'
consumer = self._consumer
deferred = self._waiting
self._consumer = self._waiting = self._xform = None
self._unblock()
deferred.errback(ServerErrorResponse(status, consumer))
return 'WAITING'
def state_LONG(self, line):
# This is the state for each line of a long response.
# If it is the last line, finish things, fire the
# Deferred, and transition the state to WAITING.
# Otherwise, pass the line to the consumer.
if line == '.':
consumer = self._consumer
deferred = self._waiting
self._consumer = self._waiting = self._xform = None
self._unblock()
deferred.callback(consumer)
return 'WAITING'
else:
if self._xform is not None:
self._consumer(self._xform(line))
else:
self._consumer(line)
return 'LONG'
# Callbacks - override these
def serverGreeting(self, greeting):
"""Called when the server has sent us a greeting.
@type greeting: C{str} or C{None}
@param greeting: The status message sent with the server
greeting. For servers implementing APOP authentication, this
will be a challenge string. .
"""
# External API - call these (most of 'em anyway)
def startTLS(self, contextFactory=None):
"""
Initiates a 'STLS' request and negotiates the TLS / SSL
Handshake.
@type contextFactory: C{ssl.ClientContextFactory} @param
contextFactory: The context factory with which to negotiate
TLS. If C{None}, try to create a new one.
@return: A Deferred which fires when the transport has been
secured according to the given contextFactory, or which fails
if the transport cannot be secured.
"""
tls = interfaces.ITLSTransport(self.transport, None)
if tls is None:
return defer.fail(TLSError(
"POP3Client transport does not implement "
"interfaces.ITLSTransport"))
if contextFactory is None:
contextFactory = self._getContextFactory()
if contextFactory is None:
return defer.fail(TLSError(
"POP3Client requires a TLS context to "
"initiate the STLS handshake"))
d = self.capabilities()
d.addCallback(self._startTLS, contextFactory, tls)
return d
def _startTLS(self, caps, contextFactory, tls):
assert not self.startedTLS, "Client and Server are currently communicating via TLS"
if 'STLS' not in caps:
return defer.fail(TLSNotSupportedError(
"Server does not support secure communication "
"via TLS / SSL"))
d = self.sendShort('STLS', None)
d.addCallback(self._startedTLS, contextFactory, tls)
d.addCallback(lambda _: self.capabilities())
return d
def _startedTLS(self, result, context, tls):
self.transport = tls
self.transport.startTLS(context)
self._capCache = None
self.startedTLS = True
return result
def _getContextFactory(self):
try:
from twisted.internet import ssl
except ImportError:
return None
else:
context = ssl.ClientContextFactory()
context.method = ssl.SSL.TLSv1_METHOD
return context
def login(self, username, password):
"""Log into the server.
If APOP is available it will be used. Otherwise, if TLS is
available an 'STLS' session will be started and plaintext
login will proceed. Otherwise, if the instance attribute
allowInsecureLogin is set to True, insecure plaintext login
will proceed. Otherwise, InsecureAuthenticationDisallowed
will be raised (asynchronously).
@param username: The username with which to log in.
@param password: The password with which to log in.
@rtype: C{Deferred}
@return: A deferred which fires when login has
completed.
"""
d = self.capabilities()
d.addCallback(self._login, username, password)
return d
def _login(self, caps, username, password):
if self.serverChallenge is not None:
return self._apop(username, password, self.serverChallenge)
tryTLS = 'STLS' in caps
#If our transport supports switching to TLS, we might want to try to switch to TLS.
tlsableTransport = interfaces.ITLSTransport(self.transport, None) is not None
# If our transport is not already using TLS, we might want to try to switch to TLS.
nontlsTransport = interfaces.ISSLTransport(self.transport, None) is None
if not self.startedTLS and tryTLS and tlsableTransport and nontlsTransport:
d = self.startTLS()
d.addCallback(self._loginTLS, username, password)
return d
elif self.startedTLS or not nontlsTransport or self.allowInsecureLogin:
return self._plaintext(username, password)
else:
return defer.fail(InsecureAuthenticationDisallowed())
def _loginTLS(self, res, username, password):
return self._plaintext(username, password)
def _plaintext(self, username, password):
# Internal helper. Send a username/password pair, returning a Deferred
# that fires when both have succeeded or fails when the server rejects
# either.
return self.user(username).addCallback(lambda r: self.password(password))
def _apop(self, username, password, challenge):
# Internal helper. Computes and sends an APOP response. Returns
# a Deferred that fires when the server responds to the response.
digest = md5(challenge + password).hexdigest()
return self.apop(username, digest)
def apop(self, username, digest):
"""Perform APOP login.
This should be used in special circumstances only, when it is
known that the server supports APOP authentication, and APOP
authentication is absolutely required. For the common case,
use L{login} instead.
@param username: The username with which to log in.
@param digest: The challenge response to authenticate with.
"""
return self.sendShort('APOP', username + ' ' + digest)
def user(self, username):
"""Send the user command.
This performs the first half of plaintext login. Unless this
is absolutely required, use the L{login} method instead.
@param username: The username with which to log in.
"""
return self.sendShort('USER', username)
def password(self, password):
"""Send the password command.
This performs the second half of plaintext login. Unless this
is absolutely required, use the L{login} method instead.
@param password: The plaintext password with which to authenticate.
"""
return self.sendShort('PASS', password)
def delete(self, index):
"""Delete a message from the server.
@type index: C{int}
@param index: The index of the message to delete.
This is 0-based.
@rtype: C{Deferred}
@return: A deferred which fires when the delete command
is successful, or fails if the server returns an error.
"""
return self.sendShort('DELE', str(index + 1))
def _consumeOrSetItem(self, cmd, args, consumer, xform):
# Internal helper. Send a long command. If no consumer is
# provided, create a consumer that puts results into a list
# and return a Deferred that fires with that list when it
# is complete.
if consumer is None:
L = []
consumer = _ListSetter(L).setitem
return self.sendLong(cmd, args, consumer, xform).addCallback(lambda r: L)
return self.sendLong(cmd, args, consumer, xform)
def _consumeOrAppend(self, cmd, args, consumer, xform):
# Internal helper. Send a long command. If no consumer is
# provided, create a consumer that appends results to a list
# and return a Deferred that fires with that list when it is
# complete.
if consumer is None:
L = []
consumer = L.append
return self.sendLong(cmd, args, consumer, xform).addCallback(lambda r: L)
return self.sendLong(cmd, args, consumer, xform)
def capabilities(self, useCache=True):
"""Retrieve the capabilities supported by this server.
Not all servers support this command. If the server does not
support this, it is treated as though it returned a successful
response listing no capabilities. At some future time, this may be
changed to instead seek out information about a server's
capabilities in some other fashion (only if it proves useful to do
so, and only if there are servers still in use which do not support
CAPA but which do support POP3 extensions that are useful).
@type useCache: C{bool}
@param useCache: If set, and if capabilities have been
retrieved previously, just return the previously retrieved
results.
@return: A Deferred which fires with a C{dict} mapping C{str}
to C{None} or C{list}s of C{str}. For example::
C: CAPA
S: +OK Capability list follows
S: TOP
S: USER
S: SASL CRAM-MD5 KERBEROS_V4
S: RESP-CODES
S: LOGIN-DELAY 900
S: PIPELINING
S: EXPIRE 60
S: UIDL
S: IMPLEMENTATION Shlemazle-Plotz-v302
S: .
will be lead to a result of::
| {'TOP': None,
| 'USER': None,
| 'SASL': ['CRAM-MD5', 'KERBEROS_V4'],
| 'RESP-CODES': None,
| 'LOGIN-DELAY': ['900'],
| 'PIPELINING': None,
| 'EXPIRE': ['60'],
| 'UIDL': None,
| 'IMPLEMENTATION': ['Shlemazle-Plotz-v302']}
"""
if useCache and self._capCache is not None:
return defer.succeed(self._capCache)
cache = {}
def consume(line):
tmp = line.split()
if len(tmp) == 1:
cache[tmp[0]] = None
elif len(tmp) > 1:
cache[tmp[0]] = tmp[1:]
def capaNotSupported(err):
err.trap(ServerErrorResponse)
return None
def gotCapabilities(result):
self._capCache = cache
return cache
d = self._consumeOrAppend('CAPA', None, consume, None)
d.addErrback(capaNotSupported).addCallback(gotCapabilities)
return d
def noop(self):
"""Do nothing, with the help of the server.
No operation is performed. The returned Deferred fires when
the server responds.
"""
return self.sendShort("NOOP", None)
def reset(self):
"""Remove the deleted flag from any messages which have it.
The returned Deferred fires when the server responds.
"""
return self.sendShort("RSET", None)
def retrieve(self, index, consumer=None, lines=None):
"""Retrieve a message from the server.
If L{consumer} is not None, it will be called with
each line of the message as it is received. Otherwise,
the returned Deferred will be fired with a list of all
the lines when the message has been completely received.
"""
idx = str(index + 1)
if lines is None:
return self._consumeOrAppend('RETR', idx, consumer, _dotUnquoter)
return self._consumeOrAppend('TOP', '%s %d' % (idx, lines), consumer, _dotUnquoter)
def stat(self):
"""Get information about the size of this mailbox.
The returned Deferred will be fired with a tuple containing
the number or messages in the mailbox and the size (in bytes)
of the mailbox.
"""
return self.sendShort('STAT', None).addCallback(_statXform)
def listSize(self, consumer=None):
"""Retrieve a list of the size of all messages on the server.
If L{consumer} is not None, it will be called with two-tuples
of message index number and message size as they are received.
Otherwise, a Deferred which will fire with a list of B{only}
message sizes will be returned. For messages which have been
deleted, None will be used in place of the message size.
"""
return self._consumeOrSetItem('LIST', None, consumer, _listXform)
def listUID(self, consumer=None):
"""Retrieve a list of the UIDs of all messages on the server.
If L{consumer} is not None, it will be called with two-tuples
of message index number and message UID as they are received.
Otherwise, a Deferred which will fire with of list of B{only}
message UIDs will be returned. For messages which have been
deleted, None will be used in place of the message UID.
"""
return self._consumeOrSetItem('UIDL', None, consumer, _uidXform)
def quit(self):
"""Disconnect from the server.
"""
return self.sendShort('QUIT', None)
__all__ = [
# Exceptions
'InsecureAuthenticationDisallowed', 'LineTooLong', 'POP3ClientError',
'ServerErrorResponse', 'TLSError', 'TLSNotSupportedError',
# Protocol classes
'POP3Client']
|
ecolitan/fatics
|
venv/lib/python2.7/site-packages/twisted/mail/pop3client.py
|
Python
|
agpl-3.0
| 24,412 | 0.00168 |
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import absolute_import
from __future__ import unicode_literals
from dnfpluginscore import _, logger
import dnf
import dnf.cli
import dnf.pycomp
import dnfpluginscore
import dnfpluginscore.lib
import os
import re
import shutil
class ConfigManager(dnf.Plugin):
name = 'config-manager'
def __init__(self, base, cli):
super(ConfigManager, self).__init__(base, cli)
self.base = base
self.cli = cli
if self.cli is not None:
self.cli.register_command(ConfigManagerCommand)
class ConfigManagerCommand(dnf.cli.Command):
aliases = ['config-manager']
summary = _('manage dnf configuration options and repositories')
usage = '[%s] [%s]' % (_('OPTIONS'), _('KEYWORDS'))
def __init__(self, cli):
super(ConfigManagerCommand, self).__init__(cli)
self.opts = None
self.parser = None
def configure(self, args):
# setup sack and populate it with enabled repos
demands = self.cli.demands
demands.available_repos = True
self.parser = dnfpluginscore.ArgumentParser(self.aliases[0])
self.parser.add_argument(
'repo', nargs='*',
help=_('repo to modify'))
self.parser.add_argument(
'--save', default=False, action='store_true',
help=_('save the current options (useful with --setopt)'))
self.parser.add_argument(
'--set-enabled', default=False, action='store_true',
help=_('enable the specified repos (automatically saves)'))
self.parser.add_argument(
'--set-disabled', default=False, action='store_true',
help=_('disable the specified repos (automatically saves)'))
self.parser.add_argument(
'--add-repo', default=[], action='append', metavar='URL',
help=_('add (and enable) the repo from the specified file or url'))
self.parser.add_argument(
'--dump', default=False, action='store_true',
help=_('print current configuration values to stdout'))
self.opts = self.parser.parse_args(args)
if self.opts.help_cmd:
print(self.parser.format_help())
return
if (self.opts.save or self.opts.set_enabled or
self.opts.set_disabled or self.opts.add_repo):
demands.root_user = True
def run(self, _args):
"""Execute the util action here."""
if self.opts.help_cmd:
return
if self.opts.set_enabled and self.opts.set_disabled:
logger.error(
_("Error: Trying to enable and disable repos at the same time."))
self.opts.set_enabled = self.opts.set_disabled = False
if self.opts.set_enabled and not self.opts.repo:
logger.error(_("Error: Trying to enable already enabled repos."))
self.opts.set_enabled = False
if self.opts.add_repo:
self.add_repo()
else:
self.modify_repo()
def modify_repo(self):
""" process --set-enabled, --set-disabled and --setopt options """
sbc = self.base.conf
modify = []
if hasattr(self.cli, 'main_setopts') and self.cli.main_setopts:
modify = self.cli.main_setopts.items
if not self.opts.repo or 'main' in self.opts.repo:
if self.opts.dump:
print(self.base.output.fmtSection('main'))
print(self.base.conf.dump())
if self.opts.save and modify:
# modify [main] in dnf.conf
dnfpluginscore.lib.write_raw_configfile(dnf.const.CONF_FILENAME,
'main', sbc.substitutions,
sbc.cfg.options,
sbc.iteritems,
sbc.optionobj,
modify)
if self.opts.set_enabled or self.opts.set_disabled:
self.opts.save = True
modify.append('enabled')
if self.opts.repo:
matched = []
for name in self.opts.repo:
matched.extend(self.base.repos.get_matching(name))
else:
matched = self.base.repos.iter_enabled()
if not matched:
raise dnf.exceptions.Error(_("No matching repo to modify: %s.")
% ', '.join(self.opts.repo))
for repo in sorted(matched):
if self.opts.dump:
print(self.base.output.fmtSection('repo: ' + repo.id))
if self.opts.set_enabled and not repo.enabled:
repo.enable()
elif self.opts.set_disabled and repo.enabled:
repo.disable()
if self.opts.dump:
print(repo.dump())
repo_modify = modify[:]
if (hasattr(self.cli, 'repo_setopts')
and repo.id in self.cli.repo_setopts):
repo_modify.extend(self.cli.repo_setopts[repo.id].items)
if self.opts.save and modify:
dnfpluginscore.lib.write_raw_configfile(repo.repofile,
repo.id,
sbc.substitutions,
repo.cfg.options,
repo.iteritems,
repo.optionobj,
repo_modify)
def add_repo(self):
""" process --add-repo option """
# put repo file into first reposdir which exists or create it
myrepodir = None
for rdir in self.base.conf.reposdir:
if os.path.exists(rdir):
myrepodir = rdir
break
if not myrepodir:
myrepodir = self.base.conf.reposdir[0]
dnf.util.ensure_dir(myrepodir)
for url in self.opts.add_repo:
if dnf.pycomp.urlparse.urlparse(url).scheme == '':
url = 'file://' + os.path.abspath(url)
logger.info(_('Adding repo from: %s'), url)
if url.endswith('.repo'):
# .repo file - download, put into reposdir and enable it
destname = os.path.basename(url)
destname = os.path.join(myrepodir, destname)
try:
f = dnfpluginscore.lib.urlopen(self, None, url, 'w+')
shutil.copy2(f.name, destname)
os.chmod(destname, 0o644)
f.close()
except IOError as e:
logger.error(e)
continue
else:
# just url to repo, create .repo file on our own
repoid = sanitize_url_to_fs(url)
reponame = 'created by dnf config-manager from %s' % url
destname = os.path.join(myrepodir, "%s.repo" % repoid)
content = "[%s]\nname=%s\nbaseurl=%s\nenabled=1\n" % \
(repoid, reponame, url)
if not save_to_file(destname, content):
continue
def save_to_file(filename, content):
try:
with open(filename, 'w+') as fd:
dnf.pycomp.write_to_file(fd, content)
os.chmod(filename, 0o644)
except (IOError, OSError) as e:
logger.error(_('Could not save repo to repofile %s: %s'),
filename, e)
return False
return True
# Regular expressions to sanitise cache filenames
RE_SCHEME = re.compile(r'^\w+:/*(\w+:|www\.)?')
RE_SLASH = re.compile(r'[?/:&#|]+')
RE_BEGIN = re.compile(r'^[,.]*')
RE_FINAL = re.compile(r'[,.]*$')
def sanitize_url_to_fs(url):
"""Return a filename suitable for the filesystem
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if RE_SCHEME.match(url):
if dnf.pycomp.PY3:
url = url.encode('idna').decode('utf-8')
else:
if isinstance(url, str):
url = url.decode('utf-8').encode('idna')
else:
url = url.encode('idna')
if isinstance(url, unicode):
url = url.encode('utf-8')
except (UnicodeDecodeError, UnicodeEncodeError, UnicodeError, TypeError):
pass
url = RE_SCHEME.sub("", url)
url = RE_SLASH.sub("_", url)
url = RE_BEGIN.sub("", url)
url = RE_FINAL.sub("", url)
# limit length of url
if len(url) > 250:
parts = url[:185].split('_')
lastindex = 185-len(parts[-1])
csum = dnf.yum.misc.Checksums(['sha256'])
csum.update(url[lastindex:])
url = url[:lastindex] + '_' + csum.hexdigest()
return url
|
mineo/dnf-plugins-core
|
plugins/config_manager.py
|
Python
|
gpl-2.0
| 9,979 | 0.000701 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Cd',
new_name='Release',
),
]
|
ThreeDRadio/playlists
|
backend/catalogue/migrations/0002_auto_20160628_1024.py
|
Python
|
mit
| 342 | 0 |
import abc
import asyncio
import keyword
import collections
import mimetypes
import re
import os
import sys
import inspect
import warnings
from collections.abc import Sized, Iterable, Container
from pathlib import Path
from urllib.parse import urlencode, unquote
from types import MappingProxyType
from . import hdrs
from .abc import AbstractRouter, AbstractMatchInfo, AbstractView
from .protocol import HttpVersion11
from .web_exceptions import (HTTPMethodNotAllowed, HTTPNotFound,
HTTPNotModified, HTTPExpectationFailed)
from .web_reqrep import StreamResponse
from .multidict import upstr
__all__ = ('UrlDispatcher', 'UrlMappingMatchInfo',
'AbstractResource', 'Resource', 'PlainResource', 'DynamicResource',
'ResourceAdapter',
'AbstractRoute', 'ResourceRoute',
'Route', 'PlainRoute', 'DynamicRoute', 'StaticRoute', 'View')
PY_35 = sys.version_info >= (3, 5)
class AbstractResource(Sized, Iterable):
def __init__(self, *, name=None):
self._name = name
@property
def name(self):
return self._name
@abc.abstractmethod # pragma: no branch
def url(self, **kwargs):
"""Construct url for resource with additional params."""
@asyncio.coroutine
@abc.abstractmethod # pragma: no branch
def resolve(self, method, path):
"""Resolve resource
Return (UrlMappingMatchInfo, allowed_methods) pair."""
@abc.abstractmethod
def get_info(self):
"""Return a dict with additional info useful for introspection"""
@staticmethod
def _append_query(url, query):
if query is not None:
return url + "?" + urlencode(query)
else:
return url
class AbstractRoute(abc.ABC):
METHODS = hdrs.METH_ALL | {hdrs.METH_ANY}
def __init__(self, method, handler, *,
expect_handler=None,
resource=None):
if expect_handler is None:
expect_handler = _defaultExpectHandler
assert asyncio.iscoroutinefunction(expect_handler), \
'Coroutine is expected, got {!r}'.format(expect_handler)
method = upstr(method)
if method not in self.METHODS:
raise ValueError("{} is not allowed HTTP method".format(method))
assert callable(handler), handler
if asyncio.iscoroutinefunction(handler):
pass
elif inspect.isgeneratorfunction(handler):
warnings.warn("Bare generators are deprecated, "
"use @coroutine wrapper", DeprecationWarning)
elif (isinstance(handler, type) and
issubclass(handler, AbstractView)):
pass
else:
@asyncio.coroutine
def handler_wrapper(*args, **kwargs):
result = old_handler(*args, **kwargs)
if asyncio.iscoroutine(result):
result = yield from result
return result
old_handler = handler
handler = handler_wrapper
self._method = method
self._handler = handler
self._expect_handler = expect_handler
self._resource = resource
@property
def method(self):
return self._method
@property
def handler(self):
return self._handler
@property
@abc.abstractmethod
def name(self):
"""Optional route's name, always equals to resource's name."""
@property
def resource(self):
return self._resource
@abc.abstractmethod
def get_info(self):
"""Return a dict with additional info useful for introspection"""
@abc.abstractmethod # pragma: no branch
def url(self, **kwargs):
"""Construct url for route with additional params."""
@asyncio.coroutine
def handle_expect_header(self, request):
return (yield from self._expect_handler(request))
class UrlMappingMatchInfo(dict, AbstractMatchInfo):
def __init__(self, match_dict, route):
super().__init__(match_dict)
self._route = route
@property
def handler(self):
return self._route.handler
@property
def route(self):
return self._route
@property
def expect_handler(self):
return self._route.handle_expect_header
@property
def http_exception(self):
return None
def get_info(self):
return self._route.get_info()
def __repr__(self):
return "<MatchInfo {}: {}>".format(super().__repr__(), self._route)
class MatchInfoError(UrlMappingMatchInfo):
def __init__(self, http_exception):
self._exception = http_exception
super().__init__({}, SystemRoute(self._exception))
@property
def http_exception(self):
return self._exception
def __repr__(self):
return "<MatchInfoError {}: {}>".format(self._exception.status,
self._exception.reason)
@asyncio.coroutine
def _defaultExpectHandler(request):
"""Default handler for Except header.
Just send "100 Continue" to client.
raise HTTPExpectationFailed if value of header is not "100-continue"
"""
expect = request.headers.get(hdrs.EXPECT)
if request.version == HttpVersion11:
if expect.lower() == "100-continue":
request.transport.write(b"HTTP/1.1 100 Continue\r\n\r\n")
else:
raise HTTPExpectationFailed(text="Unknown Expect: %s" % expect)
class ResourceAdapter(AbstractResource):
def __init__(self, route):
assert isinstance(route, Route), \
'Instance of Route class is required, got {!r}'.format(route)
super().__init__(name=route.name)
self._route = route
route._resource = self
def url(self, **kwargs):
return self._route.url(**kwargs)
@asyncio.coroutine
def resolve(self, method, path):
route_method = self._route.method
allowed_methods = set()
match_dict = self._route.match(path)
if match_dict is not None:
allowed_methods.add(route_method)
if route_method == hdrs.METH_ANY or route_method == method:
return (UrlMappingMatchInfo(match_dict, self._route),
allowed_methods)
return None, allowed_methods
def get_info(self):
return self._route.get_info()
def __len__(self):
return 1
def __iter__(self):
yield self._route
class Resource(AbstractResource):
def __init__(self, *, name=None):
super().__init__(name=name)
self._routes = []
def add_route(self, method, handler, *,
expect_handler=None):
for route in self._routes:
if route.method == method or route.method == hdrs.METH_ANY:
raise RuntimeError("Added route will never be executed, "
"method {route.method} is "
"already registered".format(route=route))
route = ResourceRoute(method, handler, self,
expect_handler=expect_handler)
self.register_route(route)
return route
def register_route(self, route):
assert isinstance(route, ResourceRoute), \
'Instance of Route class is required, got {!r}'.format(route)
self._routes.append(route)
@asyncio.coroutine
def resolve(self, method, path):
allowed_methods = set()
match_dict = self._match(path)
if match_dict is None:
return None, allowed_methods
for route in self._routes:
route_method = route.method
allowed_methods.add(route_method)
if route_method == method or route_method == hdrs.METH_ANY:
return UrlMappingMatchInfo(match_dict, route), allowed_methods
else:
return None, allowed_methods
def __len__(self):
return len(self._routes)
def __iter__(self):
return iter(self._routes)
class PlainResource(Resource):
def __init__(self, path, *, name=None):
super().__init__(name=name)
self._path = path
def _match(self, path):
# string comparison is about 10 times faster than regexp matching
if self._path == path:
return {}
else:
return None
def get_info(self):
return {'path': self._path}
def url(self, *, query=None):
return self._append_query(self._path, query)
def __repr__(self):
name = "'" + self.name + "' " if self.name is not None else ""
return "<PlainResource {name} {path}".format(name=name,
path=self._path)
class DynamicResource(Resource):
def __init__(self, pattern, formatter, *, name=None):
super().__init__(name=name)
self._pattern = pattern
self._formatter = formatter
def _match(self, path):
match = self._pattern.match(path)
if match is None:
return None
else:
return {key: unquote(value) for key, value in
match.groupdict().items()}
def get_info(self):
return {'formatter': self._formatter,
'pattern': self._pattern}
def url(self, *, parts, query=None):
url = self._formatter.format_map(parts)
return self._append_query(url, query)
def __repr__(self):
name = "'" + self.name + "' " if self.name is not None else ""
return ("<DynamicResource {name} {formatter}"
.format(name=name, formatter=self._formatter))
class ResourceRoute(AbstractRoute):
"""A route with resource"""
def __init__(self, method, handler, resource, *,
expect_handler=None):
super().__init__(method, handler, expect_handler=expect_handler,
resource=resource)
def __repr__(self):
return "<ResourceRoute [{method}] {resource} -> {handler!r}".format(
method=self.method, resource=self._resource,
handler=self.handler)
@property
def name(self):
return self._resource.name
def url(self, **kwargs):
"""Construct url for route with additional params."""
return self._resource.url(**kwargs)
def get_info(self):
return self._resource.get_info()
_append_query = staticmethod(Resource._append_query)
class Route(AbstractRoute):
"""Old fashion route"""
def __init__(self, method, handler, name, *, expect_handler=None):
super().__init__(method, handler, expect_handler=expect_handler)
self._name = name
@property
def name(self):
return self._name
@abc.abstractmethod
def match(self, path):
"""Return dict with info for given path or
None if route cannot process path."""
_append_query = staticmethod(Resource._append_query)
class PlainRoute(Route):
def __init__(self, method, handler, name, path, *, expect_handler=None):
super().__init__(method, handler, name, expect_handler=expect_handler)
self._path = path
def match(self, path):
# string comparison is about 10 times faster than regexp matching
if self._path == path:
return {}
else:
return None
def url(self, *, query=None):
return self._append_query(self._path, query)
def get_info(self):
return {'path': self._path}
def __repr__(self):
name = "'" + self.name + "' " if self.name is not None else ""
return "<PlainRoute {name}[{method}] {path} -> {handler!r}".format(
name=name, method=self.method, path=self._path,
handler=self.handler)
class DynamicRoute(Route):
def __init__(self, method, handler, name, pattern, formatter, *,
expect_handler=None):
super().__init__(method, handler, name, expect_handler=expect_handler)
self._pattern = pattern
self._formatter = formatter
def match(self, path):
match = self._pattern.match(path)
if match is None:
return None
else:
return match.groupdict()
def url(self, *, parts, query=None):
url = self._formatter.format_map(parts)
return self._append_query(url, query)
def get_info(self):
return {'formatter': self._formatter,
'pattern': self._pattern}
def __repr__(self):
name = "'" + self.name + "' " if self.name is not None else ""
return ("<DynamicRoute {name}[{method}] {formatter} -> {handler!r}"
.format(name=name, method=self.method,
formatter=self._formatter, handler=self.handler))
class StaticRoute(Route):
def __init__(self, name, prefix, directory, *,
expect_handler=None, chunk_size=256*1024,
response_factory=StreamResponse):
assert prefix.startswith('/'), prefix
assert prefix.endswith('/'), prefix
super().__init__(
'GET', self.handle, name, expect_handler=expect_handler)
self._prefix = prefix
self._prefix_len = len(self._prefix)
try:
directory = Path(directory)
if str(directory).startswith('~'):
directory = Path(os.path.expanduser(str(directory)))
directory = directory.resolve()
if not directory.is_dir():
raise ValueError('Not a directory')
except (FileNotFoundError, ValueError) as error:
raise ValueError(
"No directory exists at '{}'".format(directory)) from error
self._directory = directory
self._chunk_size = chunk_size
self._response_factory = response_factory
if bool(os.environ.get("AIOHTTP_NOSENDFILE")):
self._sendfile = self._sendfile_fallback
def match(self, path):
if not path.startswith(self._prefix):
return None
return {'filename': path[self._prefix_len:]}
def url(self, *, filename, query=None):
if isinstance(filename, Path):
filename = str(filename)
while filename.startswith('/'):
filename = filename[1:]
url = self._prefix + filename
return self._append_query(url, query)
def get_info(self):
return {'directory': self._directory,
'prefix': self._prefix}
def _sendfile_cb(self, fut, out_fd, in_fd, offset, count, loop,
registered):
if registered:
loop.remove_writer(out_fd)
try:
n = os.sendfile(out_fd, in_fd, offset, count)
if n == 0: # EOF reached
n = count
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
fut.set_exception(exc)
return
if n < count:
loop.add_writer(out_fd, self._sendfile_cb, fut, out_fd, in_fd,
offset + n, count - n, loop, True)
else:
fut.set_result(None)
@asyncio.coroutine
def _sendfile_system(self, req, resp, fobj, count):
"""
Write `count` bytes of `fobj` to `resp` starting from `offset` using
the ``sendfile`` system call.
`req` should be a :obj:`aiohttp.web.Request` instance.
`resp` should be a :obj:`aiohttp.web.StreamResponse` instance.
`fobj` should be an open file object.
`offset` should be an integer >= 0.
`count` should be an integer > 0.
"""
transport = req.transport
if transport.get_extra_info("sslcontext"):
yield from self._sendfile_fallback(req, resp, fobj, count)
return
yield from resp.drain()
loop = req.app.loop
out_fd = transport.get_extra_info("socket").fileno()
in_fd = fobj.fileno()
fut = asyncio.Future(loop=loop)
self._sendfile_cb(fut, out_fd, in_fd, 0, count, loop, False)
yield from fut
@asyncio.coroutine
def _sendfile_fallback(self, req, resp, fobj, count):
"""
Mimic the :meth:`_sendfile_system` method, but without using the
``sendfile`` system call. This should be used on systems that don't
support the ``sendfile`` system call.
To avoid blocking the event loop & to keep memory usage low, `fobj` is
transferred in chunks controlled by the `chunk_size` argument to
:class:`StaticRoute`.
"""
chunk_size = self._chunk_size
chunk = fobj.read(chunk_size)
while chunk and count > chunk_size:
resp.write(chunk)
yield from resp.drain()
count = count - chunk_size
chunk = fobj.read(chunk_size)
if chunk:
resp.write(chunk[:count])
yield from resp.drain()
if hasattr(os, "sendfile"): # pragma: no cover
_sendfile = _sendfile_system
else: # pragma: no cover
_sendfile = _sendfile_fallback
@asyncio.coroutine
def handle(self, request):
filename = request.match_info['filename']
try:
filepath = self._directory.joinpath(filename).resolve()
filepath.relative_to(self._directory)
except (ValueError, FileNotFoundError) as error:
# relatively safe
raise HTTPNotFound() from error
except Exception as error:
# perm error or other kind!
request.logger.exception(error)
raise HTTPNotFound() from error
# Make sure that filepath is a file
if not filepath.is_file():
raise HTTPNotFound()
st = filepath.stat()
modsince = request.if_modified_since
if modsince is not None and st.st_mtime <= modsince.timestamp():
raise HTTPNotModified()
ct, encoding = mimetypes.guess_type(str(filepath))
if not ct:
ct = 'application/octet-stream'
resp = self._response_factory()
resp.content_type = ct
if encoding:
resp.headers[hdrs.CONTENT_ENCODING] = encoding
resp.last_modified = st.st_mtime
file_size = st.st_size
resp.content_length = file_size
resp.set_tcp_cork(True)
try:
yield from resp.prepare(request)
with filepath.open('rb') as f:
yield from self._sendfile(request, resp, f, file_size)
finally:
resp.set_tcp_nodelay(True)
return resp
def __repr__(self):
name = "'" + self.name + "' " if self.name is not None else ""
return "<StaticRoute {name}[{method}] {path} -> {directory!r}".format(
name=name, method=self.method, path=self._prefix,
directory=self._directory)
class SystemRoute(Route):
def __init__(self, http_exception):
super().__init__(hdrs.METH_ANY, self._handler, None)
self._http_exception = http_exception
def url(self, **kwargs):
raise RuntimeError(".url() is not allowed for SystemRoute")
def match(self, path):
return None
def get_info(self):
return {'http_exception': self._http_exception}
@asyncio.coroutine
def _handler(self, request):
raise self._http_exception
@property
def status(self):
return self._http_exception.status
@property
def reason(self):
return self._http_exception.reason
def __repr__(self):
return "<SystemRoute {self.status}: {self.reason}>".format(self=self)
class View(AbstractView):
@asyncio.coroutine
def __iter__(self):
if self.request.method not in hdrs.METH_ALL:
self._raise_allowed_methods()
method = getattr(self, self.request.method.lower(), None)
if method is None:
self._raise_allowed_methods()
resp = yield from method()
return resp
if PY_35:
def __await__(self):
return (yield from self.__iter__())
def _raise_allowed_methods(self):
allowed_methods = {m for m in hdrs.METH_ALL if hasattr(self, m)}
raise HTTPMethodNotAllowed(self.request.method, allowed_methods)
class ResourcesView(Sized, Iterable, Container):
def __init__(self, resources):
self._resources = resources
def __len__(self):
return len(self._resources)
def __iter__(self):
yield from self._resources
def __contains__(self, resource):
return resource in self._resources
class RoutesView(Sized, Iterable, Container):
def __init__(self, resources):
self._routes = []
for resource in resources:
for route in resource:
self._routes.append(route)
def __len__(self):
return len(self._routes)
def __iter__(self):
yield from self._routes
def __contains__(self, route):
return route in self._routes
class UrlDispatcher(AbstractRouter, collections.abc.Mapping):
DYN = re.compile(r'^\{(?P<var>[a-zA-Z][_a-zA-Z0-9]*)\}$')
DYN_WITH_RE = re.compile(
r'^\{(?P<var>[a-zA-Z][_a-zA-Z0-9]*):(?P<re>.+)\}$')
GOOD = r'[^{}/]+'
ROUTE_RE = re.compile(r'(\{[_a-zA-Z][^{}]*(?:\{[^{}]*\}[^{}]*)*\})')
NAME_SPLIT_RE = re.compile('[.:-]')
def __init__(self):
super().__init__()
self._resources = []
self._named_resources = {}
@asyncio.coroutine
def resolve(self, request):
path = request.raw_path
method = request.method
allowed_methods = set()
for resource in self._resources:
match_dict, allowed = yield from resource.resolve(method, path)
if match_dict is not None:
return match_dict
else:
allowed_methods |= allowed
else:
if allowed_methods:
return MatchInfoError(HTTPMethodNotAllowed(method,
allowed_methods))
else:
return MatchInfoError(HTTPNotFound())
def __iter__(self):
return iter(self._named_resources)
def __len__(self):
return len(self._named_resources)
def __contains__(self, name):
return name in self._named_resources
def __getitem__(self, name):
return self._named_resources[name]
def resources(self):
return ResourcesView(self._resources)
def routes(self):
return RoutesView(self._resources)
def named_resources(self):
return MappingProxyType(self._named_resources)
def named_routes(self):
# NB: it's ambiguous but it's really resources.
warnings.warn("Use .named_resources instead", DeprecationWarning)
return self.named_resources()
def register_route(self, route):
warnings.warn("Use resource-based interface", DeprecationWarning)
resource = ResourceAdapter(route)
self._reg_resource(resource)
def _reg_resource(self, resource):
assert isinstance(resource, AbstractResource), \
'Instance of AbstractResource class is required, got {!r}'.format(
resource)
name = resource.name
if name is not None:
parts = self.NAME_SPLIT_RE.split(name)
for part in parts:
if not part.isidentifier() or keyword.iskeyword(part):
raise ValueError('Incorrect route name {!r}, '
'the name should be a sequence of '
'python identifiers separated '
'by dash, dot or column'.format(name))
if name in self._named_resources:
raise ValueError('Duplicate {!r}, '
'already handled by {!r}'
.format(name, self._named_resources[name]))
self._named_resources[name] = resource
self._resources.append(resource)
def add_resource(self, path, *, name=None):
if not path.startswith('/'):
raise ValueError("path should be started with /")
if not ('{' in path or '}' in path or self.ROUTE_RE.search(path)):
resource = PlainResource(path, name=name)
self._reg_resource(resource)
return resource
pattern = ''
formatter = ''
for part in self.ROUTE_RE.split(path):
match = self.DYN.match(part)
if match:
pattern += '(?P<{}>{})'.format(match.group('var'), self.GOOD)
formatter += '{' + match.group('var') + '}'
continue
match = self.DYN_WITH_RE.match(part)
if match:
pattern += '(?P<{var}>{re})'.format(**match.groupdict())
formatter += '{' + match.group('var') + '}'
continue
if '{' in part or '}' in part:
raise ValueError("Invalid path '{}'['{}']".format(path, part))
formatter += part
pattern += re.escape(part)
try:
compiled = re.compile('^' + pattern + '$')
except re.error as exc:
raise ValueError(
"Bad pattern '{}': {}".format(pattern, exc)) from None
resource = DynamicResource(compiled, formatter, name=name)
self._reg_resource(resource)
return resource
def add_route(self, method, path, handler,
*, name=None, expect_handler=None):
resource = self.add_resource(path, name=name)
return resource.add_route(method, handler,
expect_handler=expect_handler)
def add_static(self, prefix, path, *, name=None, expect_handler=None,
chunk_size=256*1024, response_factory=StreamResponse):
"""
Adds static files view
:param prefix - url prefix
:param path - folder with files
"""
assert prefix.startswith('/')
if not prefix.endswith('/'):
prefix += '/'
route = StaticRoute(name, prefix, path,
expect_handler=expect_handler,
chunk_size=chunk_size,
response_factory=response_factory)
self.register_route(route)
return route
|
jashandeep-sohi/aiohttp
|
aiohttp/web_urldispatcher.py
|
Python
|
apache-2.0
| 26,407 | 0 |
# -*- coding: utf-8 -*-
"""Umgang mit mehrdimensionalen Arrays.
Im Folgenden wird der Umgang mit mehrdimensionalen Arrays
veranschaulicht. Die Beispiele zeigen zweidimensionale Arrays
(Matrizen), das Verhalten lässt sich jedoch auf Arrays höherer
Dimensionen übertragen.
"""
import numpy as np
# Definition zufälliger Matrizen
A = np.random.random_integers(0, 10, (3, 3))
B = np.random.random_integers(0, 10, (3, 3))
# Rechenarten
A + B # Addition
A - B # Subraktion
A * B # Multiplikation
A @ B # Matrixmultiplikation
np.cross(A, B) # Kreuzprodukt
A.T # Transponieren einer Matrix
|
lkluft/python-toolbox
|
scripts/matrixoperationen.py
|
Python
|
gpl-3.0
| 599 | 0 |
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class BTDefaultUnitInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"key": (str,), # noqa: E501
"value": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"key": "key", # noqa: E501
"value": "value", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_default_unit_info.BTDefaultUnitInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
key (str): [optional] # noqa: E501
value (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
onshape-public/onshape-clients
|
python/onshape_client/oas/models/bt_default_unit_info.py
|
Python
|
mit
| 4,695 | 0 |
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import re
from calibre.ebooks.docx.names import XPath, get
class Field(object):
def __init__(self, start):
self.start = start
self.end = None
self.contents = []
self.instructions = []
def add_instr(self, elem):
raw = elem.text
if not raw:
return
name, rest = raw.strip().partition(' ')[0::2]
self.instructions.append((name, rest.strip()))
WORD, FLAG = 0, 1
scanner = re.Scanner([
(r'\\\S{1}', lambda s, t: (t, FLAG)), # A flag of the form \x
(r'"[^"]*"', lambda s, t: (t[1:-1], WORD)), # Quoted word
(r'[^\s\\"]\S*', lambda s, t: (t, WORD)), # A non-quoted word, must not start with a backslash or a space or a quote
(r'\s+', None),
], flags=re.DOTALL)
def parse_hyperlink(raw, log):
ans = {}
last_option = None
raw = raw.replace('\\\\', '\x01').replace('\\"', '\x02')
for token, token_type in scanner.scan(raw)[0]:
token = token.replace('\x01', '\\').replace('\x02', '"')
if token_type is FLAG:
last_option = {'l':'anchor', 'm':'image-map', 'n':'target', 'o':'title', 't':'target'}.get(token[1], None)
if last_option is not None:
ans[last_option] = None
elif token_type is WORD:
if last_option is None:
ans['url'] = token
else:
ans[last_option] = token
last_option = None
return ans
class Fields(object):
def __init__(self):
self.fields = []
def __call__(self, doc, log):
stack = []
for elem in XPath(
'//*[name()="w:p" or name()="w:r" or name()="w:instrText" or (name()="w:fldChar" and (@w:fldCharType="begin" or @w:fldCharType="end"))]')(doc):
if elem.tag.endswith('}fldChar'):
typ = get(elem, 'w:fldCharType')
if typ == 'begin':
stack.append(Field(elem))
self.fields.append(stack[-1])
else:
try:
stack.pop().end = elem
except IndexError:
pass
elif elem.tag.endswith('}instrText'):
if stack:
stack[-1].add_instr(elem)
else:
if stack:
stack[-1].contents.append(elem)
# Parse hyperlink fields
self.hyperlink_fields = []
for field in self.fields:
if len(field.instructions) == 1 and field.instructions[0][0] == 'HYPERLINK':
hl = parse_hyperlink(field.instructions[0][1], log)
if hl:
if 'target' in hl and hl['target'] is None:
hl['target'] = '_blank'
all_runs = []
current_runs = []
# We only handle spans in a single paragraph
# being wrapped in <a>
for x in field.contents:
if x.tag.endswith('}p'):
if current_runs:
all_runs.append(current_runs)
current_runs = []
elif x.tag.endswith('}r'):
current_runs.append(x)
if current_runs:
all_runs.append(current_runs)
for runs in all_runs:
self.hyperlink_fields.append((hl, runs))
def test_parse_hyperlink():
import unittest
class TestParseHyperLink(unittest.TestCase):
def test_parsing(self):
self.assertEqual(parse_hyperlink(
r'\l anchor1', None), {'anchor':'anchor1'})
self.assertEqual(parse_hyperlink(
r'www.calibre-ebook.com', None), {'url':'www.calibre-ebook.com'})
self.assertEqual(parse_hyperlink(
r'www.calibre-ebook.com \t target \o tt', None), {'url':'www.calibre-ebook.com', 'target':'target', 'title': 'tt'})
self.assertEqual(parse_hyperlink(
r'"c:\\Some Folder"', None), {'url': 'c:\\Some Folder'})
suite = unittest.TestLoader().loadTestsFromTestCase(TestParseHyperLink)
unittest.TextTestRunner(verbosity=4).run(suite)
|
insomnia-lab/calibre
|
src/calibre/ebooks/docx/fields.py
|
Python
|
gpl-3.0
| 4,524 | 0.0042 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Files API.
.. deprecated:: 1.8.1
Use Google Cloud Storage Client library instead.
Lightweight record format.
This format implements log file format from leveldb:
http://leveldb.googlecode.com/svn/trunk/doc/log_format.txt
Full specification of format follows in case leveldb decides to change it.
The log file contents are a sequence of 32KB blocks. The only
exception is that the tail of the file may contain a partial block.
Each block consists of a sequence of records:
block := record* trailer?
record :=
checksum: uint32 // masked crc32c of type and data[]
length: uint16
type: uint8 // One of FULL, FIRST, MIDDLE, LAST
data: uint8[length]
A record never starts within the last six bytes of a block (since it
won't fit). Any leftover bytes here form the trailer, which must
consist entirely of zero bytes and must be skipped by readers.
Aside: if exactly seven bytes are left in the current block, and a new
non-zero length record is added, the writer must emit a FIRST record
(which contains zero bytes of user data) to fill up the trailing seven
bytes of the block and then emit all of the user data in subsequent
blocks.
More types may be added in the future. Some Readers may skip record
types they do not understand, others may report that some data was
skipped.
FULL == 1
FIRST == 2
MIDDLE == 3
LAST == 4
The FULL record contains the contents of an entire user record.
FIRST, MIDDLE, LAST are types used for user records that have been
split into multiple fragments (typically because of block boundaries).
FIRST is the type of the first fragment of a user record, LAST is the
type of the last fragment of a user record, and MID is the type of all
interior fragments of a user record.
Example: consider a sequence of user records:
A: length 1000
B: length 97270
C: length 8000
A will be stored as a FULL record in the first block.
B will be split into three fragments: first fragment occupies the rest
of the first block, second fragment occupies the entirety of the
second block, and the third fragment occupies a prefix of the third
block. This will leave six bytes free in the third block, which will
be left empty as the trailer.
C will be stored as a FULL record in the fourth block.
"""
import logging
import struct
import google
from google.appengine.api.files import crc32c
BLOCK_SIZE = 32 * 1024
HEADER_FORMAT = '<IHB'
HEADER_LENGTH = struct.calcsize(HEADER_FORMAT)
RECORD_TYPE_NONE = 0
RECORD_TYPE_FULL = 1
RECORD_TYPE_FIRST = 2
RECORD_TYPE_MIDDLE = 3
RECORD_TYPE_LAST = 4
class Error(Exception):
"""Base class for exceptions in this module."""
class InvalidRecordError(Error):
"""Raised when invalid record encountered."""
class FileWriter(object):
"""Interface specification for writers to be used with records module."""
def write(self, data):
"""Write data to the file.
Args:
data: byte array, string or iterable over bytes.
"""
raise NotImplementedError()
class FileReader(object):
"""Interface specification for writers to be used with recordrecords module.
FileReader defines a reader with position and efficient seek/position
determining. All reads occur at current position.
"""
def read(self, size):
"""Read data from file.
Reads data from current position and advances position past the read data
block.
Args:
size: number of bytes to read.
Returns:
iterable over bytes. If number of bytes read is less then 'size' argument,
it is assumed that end of file was reached.
"""
raise NotImplementedError()
def tell(self):
"""Get current file position.
Returns:
current position as a byte offset in the file as integer.
"""
raise NotImplementedError()
_CRC_MASK_DELTA = 0xa282ead8
def _mask_crc(crc):
"""Mask crc.
Args:
crc: integer crc.
Returns:
masked integer crc.
"""
return (((crc >> 15) | (crc << 17)) + _CRC_MASK_DELTA) & 0xFFFFFFFFL
def _unmask_crc(masked_crc):
"""Unmask crc.
Args:
masked_crc: masked integer crc.
Retruns:
orignal crc.
"""
rot = (masked_crc - _CRC_MASK_DELTA) & 0xFFFFFFFFL
return ((rot >> 17) | (rot << 15)) & 0xFFFFFFFFL
class RecordsWriter(object):
"""A writer for records format.
This writer should be used only inside with statement:
with records.RecordsWriter(file) as writer:
writer.write("record")
RecordsWriter will pad last block with 0 when exiting with statement scope.
"""
def __init__(self, writer, _pad_last_block=True):
"""Constructor.
Args:
writer: a writer to use. Should conform to FileWriter interface.
"""
self.__writer = writer
self.__position = 0
self.__entered = False
self.__pad_last_block = _pad_last_block
def __write_record(self, record_type, data):
"""Write single physical record."""
length = len(data)
crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type])
crc = crc32c.crc_update(crc, data)
crc = crc32c.crc_finalize(crc)
self.__writer.write(
struct.pack(HEADER_FORMAT, _mask_crc(crc), length, record_type))
self.__writer.write(data)
self.__position += HEADER_LENGTH + length
def write(self, data):
"""Write single record.
Args:
data: record data to write as string, byte array or byte sequence.
"""
if not self.__entered:
raise Exception("RecordWriter should be used only with 'with' statement.")
block_remaining = BLOCK_SIZE - self.__position % BLOCK_SIZE
if block_remaining < HEADER_LENGTH:
self.__writer.write('\x00' * block_remaining)
self.__position += block_remaining
block_remaining = BLOCK_SIZE
if block_remaining < len(data) + HEADER_LENGTH:
first_chunk = data[:block_remaining - HEADER_LENGTH]
self.__write_record(RECORD_TYPE_FIRST, first_chunk)
data = data[len(first_chunk):]
while True:
block_remaining = BLOCK_SIZE - self.__position % BLOCK_SIZE
if block_remaining >= len(data) + HEADER_LENGTH:
self.__write_record(RECORD_TYPE_LAST, data)
break
else:
chunk = data[:block_remaining - HEADER_LENGTH]
self.__write_record(RECORD_TYPE_MIDDLE, chunk)
data = data[len(chunk):]
else:
self.__write_record(RECORD_TYPE_FULL, data)
def __enter__(self):
self.__entered = True
return self
def __exit__(self, atype, value, traceback):
self.close()
def close(self):
if self.__pad_last_block:
pad_length = BLOCK_SIZE - self.__position % BLOCK_SIZE
if pad_length and pad_length != BLOCK_SIZE:
self.__writer.write('\x00' * pad_length)
class RecordsReader(object):
"""A reader for records format."""
def __init__(self, reader):
self.__reader = reader
def __try_read_record(self):
"""Try reading a record.
Returns:
(data, record_type) tuple.
Raises:
EOFError: when end of file was reached.
InvalidRecordError: when valid record could not be read.
"""
block_remaining = BLOCK_SIZE - self.__reader.tell() % BLOCK_SIZE
if block_remaining < HEADER_LENGTH:
return ('', RECORD_TYPE_NONE)
header = self.__reader.read(HEADER_LENGTH)
if len(header) != HEADER_LENGTH:
raise EOFError('Read %s bytes instead of %s' %
(len(header), HEADER_LENGTH))
(masked_crc, length, record_type) = struct.unpack(HEADER_FORMAT, header)
crc = _unmask_crc(masked_crc)
if length + HEADER_LENGTH > block_remaining:
raise InvalidRecordError('Length is too big')
data = self.__reader.read(length)
if len(data) != length:
raise EOFError('Not enough data read. Expected: %s but got %s' %
(length, len(data)))
if record_type == RECORD_TYPE_NONE:
return ('', record_type)
actual_crc = crc32c.crc_update(crc32c.CRC_INIT, [record_type])
actual_crc = crc32c.crc_update(actual_crc, data)
actual_crc = crc32c.crc_finalize(actual_crc)
if actual_crc != crc:
raise InvalidRecordError('Data crc does not match')
return (data, record_type)
def __sync(self):
"""Skip reader to the block boundary."""
pad_length = BLOCK_SIZE - self.__reader.tell() % BLOCK_SIZE
if pad_length and pad_length != BLOCK_SIZE:
data = self.__reader.read(pad_length)
if len(data) != pad_length:
raise EOFError('Read %d bytes instead of %d' %
(len(data), pad_length))
def read(self):
"""Reads record from current position in reader."""
data = None
while True:
last_offset = self.tell()
try:
(chunk, record_type) = self.__try_read_record()
if record_type == RECORD_TYPE_NONE:
self.__sync()
elif record_type == RECORD_TYPE_FULL:
if data is not None:
logging.warning(
"Ordering corruption: Got FULL record while already "
"in a chunk at offset %d", last_offset)
return chunk
elif record_type == RECORD_TYPE_FIRST:
if data is not None:
logging.warning(
"Ordering corruption: Got FIRST record while already "
"in a chunk at offset %d", last_offset)
data = chunk
elif record_type == RECORD_TYPE_MIDDLE:
if data is None:
logging.warning(
"Ordering corruption: Got MIDDLE record before FIRST "
"record at offset %d", last_offset)
else:
data += chunk
elif record_type == RECORD_TYPE_LAST:
if data is None:
logging.warning(
"Ordering corruption: Got LAST record but no chunk is in "
"progress at offset %d", last_offset)
else:
result = data + chunk
data = None
return result
else:
raise InvalidRecordError("Unsupported record type: %s" % record_type)
except InvalidRecordError, e:
logging.warning("Invalid record encountered at %s (%s). Syncing to "
"the next block", last_offset, e)
data = None
self.__sync()
def __iter__(self):
try:
while True:
yield self.read()
except EOFError:
pass
def tell(self):
"""Return file's current position."""
return self.__reader.tell()
def seek(self, *args, **kwargs):
"""Set the file's current position.
Arguments are passed directly to the underlying reader.
"""
return self.__reader.seek(*args, **kwargs)
|
ychen820/microblog
|
y/google-cloud-sdk/platform/google_appengine/google/appengine/api/files/records.py
|
Python
|
bsd-3-clause
| 11,204 | 0.006426 |
from django.db.models import Min
from django.http import Http404
from django.utils.encoding import force_str
from django.utils.translation import gettext as _
from django.views.generic import DetailView, YearArchiveView
from django.views.generic.detail import SingleObjectMixin
from spectator.core import app_settings
from spectator.core.views import PaginatedListView
from .models import Event, Venue, Work
class EventListView(PaginatedListView):
"""
Includes context of counts of all different Event types,
plus the kind of event this page is for,
plus adding `event_list` (synonym for `object_list`).
Expects a `kind_slug` like 'movies', 'gigs', 'concerts', etc.
"""
model = Event
ordering = [
"-date",
]
def get(self, request, *args, **kwargs):
slug = self.kwargs.get("kind_slug", None)
if slug is not None and slug not in Event.get_valid_kind_slugs():
raise Http404("Invalid kind_slug: '%s'" % slug)
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update(self.get_event_counts())
# e.g. 'concert'
kind = self.get_event_kind()
context["event_kind"] = kind
if kind:
# e.g. 'Concert':
context["event_kind_name"] = Event.get_kind_name(kind)
# e.g. 'Concerts':
context["event_kind_name_plural"] = Event.get_kind_name_plural(kind)
context["event_list"] = context["object_list"]
return context
def get_event_counts(self):
"""
Returns a dict like:
{'counts': {
'all': 30,
'movie': 12,
'gig': 10,
}}
"""
counts = {
"all": Event.objects.count(),
}
for val in Event.Kind.values:
# e.g. 'movie_count':
counts[val] = Event.objects.filter(kind=val).count()
return {
"counts": counts,
}
def get_event_kind(self):
"""
Unless we're on the front page we'll have a kind_slug like 'movies'.
We need to translate that into an event `kind` like 'movie'.
"""
slug = self.kwargs.get("kind_slug", None)
if slug is None:
return None # Front page; showing all Event kinds.
else:
slugs_to_kinds = {v: k for k, v in Event.Kind.slugs().items()}
return slugs_to_kinds.get(slug, None)
def get_queryset(self):
"Restrict to a single kind of event, if any, and include Venue data."
qs = super().get_queryset()
kind = self.get_event_kind()
if kind is not None:
qs = qs.filter(kind=kind)
qs = qs.select_related("venue")
return qs
class EventDetailView(DetailView):
model = Event
class EventYearArchiveView(YearArchiveView):
allow_empty = True
date_field = "date"
make_object_list = True
model = Event
ordering = "date"
def get_queryset(self):
"Reduce the number of queries and speed things up."
qs = super().get_queryset()
qs = qs.select_related("venue")
return qs
def get_dated_items(self):
items, qs, info = super().get_dated_items()
if "year" in info and info["year"]:
# Get the earliest date we have an Event for:
date_min = Event.objects.aggregate(Min("date"))["date__min"]
# Make it a 'yyyy-01-01' date:
min_year_date = date_min.replace(month=1, day=1)
if info["year"] < min_year_date:
# The year we're viewing is before our minimum date, so 404.
raise Http404(
_("No %(verbose_name_plural)s available")
% {
"verbose_name_plural": force_str(
qs.model._meta.verbose_name_plural
)
}
)
elif info["year"] == min_year_date:
# This is the earliest year we have events for, so
# there is no previous year.
info["previous_year"] = None
return items, qs, info
# WORKS
class WorkMixin:
kind_slug = None
def get(self, request, *args, **kwargs):
slug = self.kwargs.get("kind_slug", None)
if slug is not None and slug not in Work.get_valid_kind_slugs():
raise Http404("Invalid kind_slug: '%s'" % slug)
else:
self.kind_slug = slug
return super().get(request, *args, **kwargs)
def get_work_kind(self):
"""
We'll have a kind_slug like 'movies'.
We need to translate that into a work `kind` like 'movie'.
"""
slugs_to_kinds = {v: k for k, v in Work.Kind.slugs().items()}
return slugs_to_kinds.get(self.kind_slug, None)
class WorkListView(WorkMixin, PaginatedListView):
model = Work
def get_queryset(self):
kind = self.get_work_kind()
qs = super().get_queryset()
qs = qs.filter(kind=kind)
qs = qs.prefetch_related("roles__creator")
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# 'movie', 'Movie', and 'Movies' respectively:
kind = self.get_work_kind()
kind_name = Work.get_kind_name(kind)
kind_name_plural = Work.get_kind_name_plural(kind)
context["page_title"] = kind_name_plural
context["breadcrumb_list_title"] = kind_name_plural
context["work_kind"] = kind
context["work_kind_name"] = kind_name
context["work_kind_name_plural"] = kind_name_plural
context["breadcrumb_list_url"] = self.model().get_list_url(
kind_slug=self.kind_slug
)
return context
class WorkDetailView(WorkMixin, DetailView):
model = Work
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
kind = self.get_work_kind()
context["breadcrumb_list_title"] = Work.get_kind_name_plural(kind)
context["breadcrumb_list_url"] = self.model().get_list_url(
kind_slug=self.kind_slug
)
return context
# VENUES
class VenueListView(PaginatedListView):
model = Venue
ordering = ["name_sort"]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["country_list"] = self.get_countries()
return context
def get_countries(self):
"""
Returns a list of dicts, one per country that has at least one Venue
in it.
Each dict has 'code' and 'name' elements.
The list is sorted by the country 'name's.
"""
qs = (
Venue.objects.values("country")
.exclude(country="")
.distinct()
.order_by("country")
)
countries = []
for c in qs:
countries.append(
{"code": c["country"], "name": Venue.get_country_name(c["country"])}
)
return sorted(countries, key=lambda k: k["name"])
class VenueDetailView(SingleObjectMixin, PaginatedListView):
template_name = "spectator_events/venue_detail.html"
def get(self, request, *args, **kwargs):
self.object = self.get_object(queryset=Venue.objects.all())
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["venue"] = self.object
context["event_list"] = context["object_list"]
if self.object.latitude is None or self.object.longitude is None:
context["SPECTATOR_MAPS"] = {"enable": False}
else:
context["SPECTATOR_MAPS"] = app_settings.MAPS
return context
def get_queryset(self):
return self.object.event_set.order_by("-date")
|
philgyford/django-spectator
|
spectator/events/views.py
|
Python
|
mit
| 8,037 | 0.000249 |
"""
.. module:: CreateProfileForm
:synopsis: A form for completing a user's profile.
.. moduleauthor:: Dan Schlosser <dan@schlosser.io>
"""
from flask.ext.wtf import Form
from wtforms import StringField, HiddenField
from wtforms.validators import URL, Email, Required
EMAIL_ERROR = 'Please provide a valid email address.'
class CreateProfileForm(Form):
"""A form for completing a :class:`~app.models.User` profile after they
login to Eventum for the first time.
:ivar email: :class:`wtforms.fields.StringField` - The user's email
address.
:ivar name: :class:`wtforms.fields.StringField` - The user's name.
:ivar next: :class:`wtforms.fields.HiddenField` - The URL that they should
be redirected to after completing their profile.
"""
name = StringField('Full Name')
email = StringField('Email Address', [Email(message=EMAIL_ERROR),
Required(message=EMAIL_ERROR)])
next = HiddenField('hidden', [URL(require_tld=False)])
|
danrschlosser/eventum
|
eventum/forms/CreateProfileForm.py
|
Python
|
mit
| 1,023 | 0 |
import os
import json
import arcpy
import types
import general
from .._abstract import abstract
########################################################################
class SpatialReference(abstract.AbstractGeometry):
""" creates a spatial reference instance """
_wkid = None
#----------------------------------------------------------------------
def __init__(self, wkid):
"""Constructor"""
self._wkid = wkid
#----------------------------------------------------------------------
@property
def wkid(self):
""" get/set the wkid """
return self._wkid
@wkid.setter
def wkid(self, wkid):
""" get/set the wkid """
self._wkid = wkid
@property
def asDictionary(self):
"""returns the wkid id for use in json calls"""
return {"wkid": self._wkid}
#----------------------------------------------------------------------
@property
def value(self):
"""returns the wkid id for use in json calls"""
return {"wkid": self._wkid}
########################################################################
class Point(abstract.AbstractGeometry):
""" Point Geometry
Inputs:
coord - list of [X,Y] pair or arcpy.Point Object
wkid - well know id of spatial references
z - is the Z coordinate value
m - m value
"""
_x = None
_y = None
_z = None
_m = None
_wkid = None
_json = None
_geom = None
_dict = None
#----------------------------------------------------------------------
def __init__(self, coord, wkid, z=None, m=None):
"""Constructor"""
if isinstance(coord, list):
self._x = float(coord[0])
self._y = float(coord[1])
elif isinstance(coord, arcpy.Geometry):
self._x = coord.centroid.X
self._y = coord.centroid.Y
self._z = coord.centroid.Z
self._m = coord.centroid.M
self._geom = coord.centroid
self._wkid = wkid
if not z is None:
self._z = float(z)
if not m is None:
self._m = m
#----------------------------------------------------------------------
def __str__(self):
""" returns the object as a string """
return json.dumps(self.asDictionary,
default=general._date_handler)
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""returns the geometry spatial reference"""
return {'wkid' : self._wkid}
#----------------------------------------------------------------------
@property
def type(self):
""" returns the geometry type """
return "esriGeometryPoint"
#----------------------------------------------------------------------
@property
def asJSON(self):
""" returns a geometry as JSON """
value = self._json
if value is None:
value = json.dumps(self.asDictionary,
default=general._date_handler)
self._json = value
return self._json
#----------------------------------------------------------------------
@property
def asArcPyObject(self):
""" returns the Point as an ESRI arcpy.Point object """
return arcpy.AsShape(self.asDictionary, True)
#----------------------------------------------------------------------
@property
def asDictionary(self):
""" returns the object as a python dictionary """
#
template = {"x" : self._x,
"y" : self._y,
"spatialReference" : {"wkid" : self._wkid}
}
if not self._z is None:
template['z'] = self._z
if not self._m is None:
template['z'] = self._m
return template
#----------------------------------------------------------------------
@property
def asList(self):
""" returns a Point value as a list of [x,y,<z>,<m>] """
base = [self._x, self._y]
if not self._z is None:
base.append(self._z)
elif not self._m is None:
base.append(self._m)
return base
#----------------------------------------------------------------------
@property
def X(self):
""" gets the X coordinate """
return self._x
#----------------------------------------------------------------------
@X.setter
def X(self, value):
"""sets the X coordinate"""
if isinstance(value, (int, float,
long, types.NoneType)):
self._x = value
#----------------------------------------------------------------------
@property
def Y(self):
""" gets the Y Coordinate """
return self._y
#----------------------------------------------------------------------
@Y.setter
def Y(self, value):
""" sets the Y coordinate """
if isinstance(value, (int, float,
long, types.NoneType)):
self._y = value
#----------------------------------------------------------------------
@property
def Z(self):
""" gets the Z Coordinate """
return self._z
#----------------------------------------------------------------------
@Z.setter
def Z(self, value):
""" sets the Z coordinate """
if isinstance(value, (int, float,
long, types.NoneType)):
self._z = value
#----------------------------------------------------------------------
@property
def wkid(self):
""" gets the wkid """
return self._wkid
#----------------------------------------------------------------------
@wkid.setter
def wkid(self, value):
""" sets the wkid """
if isinstance(value, (int,
long)):
self._wkid = value
########################################################################
class MultiPoint(abstract.AbstractGeometry):
""" Implements the ArcGIS JSON MultiPoint Geometry Object """
_geom = None
_json = None
_dict = None
_wkid = None
_points = None
_hasZ = False
_hasM = False
#----------------------------------------------------------------------
def __init__(self, points, wkid, hasZ=False, hasM=False):
"""Constructor"""
if isinstance(points, list):
self._points = points
elif isinstance(points, arcpy.Geometry):
self._points = self.__geomToPointList(points)
self._wkid = wkid
self._hasZ = hasZ
self._hasM = hasM
#----------------------------------------------------------------------
def __geomToPointList(self, geom):
""" converts a geometry object to a common.Geometry object """
if isinstance(geom, arcpy.Multipoint):
feature_geom = []
fPart = []
for part in geom:
fPart = []
for pnt in part:
fPart.append(Point(coord=[pnt.X, pnt.Y],
wkid=geom.spatialReference.factoryCode,
z=pnt.Z, m=pnt.M))
feature_geom.append(fPart)
return feature_geom
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""returns the geometry spatial reference"""
return {'wkid' : self._wkid}
#----------------------------------------------------------------------
@property
def type(self):
""" returns the geometry type """
return "esriGeometryMultipoint"
#----------------------------------------------------------------------
@property
def asJSON(self):
""" returns a geometry as JSON """
value = self._json
if value is None:
value = json.dumps(self.asDictionary,
default=general._date_handler)
self._json = value
return self._json
#----------------------------------------------------------------------
@property
def asArcPyObject(self):
""" returns the Point as an ESRI arcpy.MultiPoint object """
return arcpy.AsShape(self.asDictionary, True)
#----------------------------------------------------------------------
@property
def asDictionary(self):
""" returns the object as a python dictionary """
#
value = self._dict
if value is None:
template = {
"hasM" : self._hasM,
"hasZ" : self._hasZ,
"points" : [],
"spatialReference" : {"wkid" : self._wkid}
}
for pt in self._points:
template['points'].append(pt.asList)
self._dict = template
return self._dict
########################################################################
class Polyline(abstract.AbstractGeometry):
""" Implements the ArcGIS REST API Polyline Object
Inputs:
paths - list - list of lists of Point objects
wkid - integer - well know spatial reference id
hasZ - boolean -
hasM - boolean -
"""
_paths = None
_wkid = None
_json = None
_dict = None
_geom = None
_hasZ = None
_hasM = None
#----------------------------------------------------------------------
def __init__(self, paths, wkid, hasZ=False, hasM=False):
"""Constructor"""
if isinstance(paths, list):
self._paths = paths
elif isinstance(paths, arcpy.Geometry):
self._paths = self.__geomToPointList(paths)
self._wkid = wkid
self._hasM = hasM
self._hasZ = hasZ
#----------------------------------------------------------------------
def __geomToPointList(self, geom):
""" converts a geometry object to a common.Geometry object """
if isinstance(geom, arcpy.Polyline):
feature_geom = []
fPart = []
for part in geom:
fPart = []
for pnt in part:
if geom.spatialReference is None:
wkid = self._wkid
else:
wkid = geom.spatialReference.factoryCode
fPart.append(Point(coord=[pnt.X, pnt.Y],
wkid=wkid,
z=pnt.Z, m=pnt.M))
feature_geom.append(fPart)
return feature_geom
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""returns the geometry spatial reference"""
return {'wkid' : self._wkid}
#----------------------------------------------------------------------
@property
def type(self):
""" returns the geometry type """
return "esriGeometryPolyline"
#----------------------------------------------------------------------
@property
def asJSON(self):
""" returns a geometry as JSON """
value = self._json
if value is None:
value = json.dumps(self.asDictionary,
default=general._date_handler)
self._json = value
return self._json
#----------------------------------------------------------------------
@property
def asArcPyObject(self):
""" returns the Polyline as an ESRI arcpy.Polyline object """
return arcpy.AsShape(self.asDictionary, True)
#----------------------------------------------------------------------
@property
def asDictionary(self):
""" returns the object as a python dictionary """
value = self._dict
if value is None:
template = {
"hasM" : self._hasM,
"hasZ" : self._hasZ,
"paths" : [],
"spatialReference" : {"wkid" : self._wkid}
}
for part in self._paths:
lpart = []
for pt in part:
lpart.append(pt.asList)
template['paths'].append(lpart)
del lpart
self._dict = template
return self._dict
########################################################################
class Polygon(abstract.AbstractGeometry):
""" Implements the ArcGIS REST JSON for Polygon Object """
_rings = None
_wkid = None
_json = None
_dict = None
_geom = None
_hasZ = None
_hasM = None
#----------------------------------------------------------------------
def __init__(self, rings, wkid, hasZ=False, hasM=False):
"""Constructor"""
if isinstance(rings, list):
self._rings = rings
elif isinstance(rings, arcpy.Geometry):
self._rings = self.__geomToPointList(rings)
## self._json = rings.JSON
## self._dict = _unicode_convert(json.loads(self._json))
self._wkid = wkid
self._hasM = hasM
self._hasZ = hasZ
#----------------------------------------------------------------------
def __geomToPointList(self, geom):
""" converts a geometry object to a common.Geometry object """
sr = geom.spatialReference
if sr is None:
wkid = self._wkid
else:
wkid = sr.factoryCode
g = json.loads(geom.JSON)
top = []
for gring in g['rings']:
ring = []
for g in gring:
ring.append(Point(coord=g, wkid=wkid, z=None, m=None))
top.append(ring)
return top
#if isinstance(geom, arcpy.Polygon):
#feature_geom = []
#fPart = []
#for part in geom:
#fPart = []
#for pnt in part:
#if geom.spatialReference is None:
#wkid = self._wkid
#else:
#wkid = geom.spatialReference.factoryCode
#fPart.append(Point(coord=[pnt.X, pnt.Y],
#wkid=wkid,
#z=pnt.Z, m=pnt.M))
#feature_geom.append(fPart)
#return feature_geom
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""returns the geometry spatial reference"""
return {'wkid' : self._wkid}
#----------------------------------------------------------------------
@property
def type(self):
""" returns the geometry type """
return "esriGeometryPolygon"
#----------------------------------------------------------------------
@property
def asJSON(self):
""" returns a geometry as JSON """
value = self._json
if value is None:
value = json.dumps(self.asDictionary,
default=general._date_handler)
self._json = value
return self._json
#----------------------------------------------------------------------
@property
def asArcPyObject(self):
""" returns the Polyline as an ESRI arcpy.Polyline object """
return arcpy.AsShape(self.asDictionary, True)
#----------------------------------------------------------------------
@property
def asDictionary(self):
""" returns the object as a python dictionary """
value = self._dict
if value is None:
template = {
"hasM" : self._hasM,
"hasZ" : self._hasZ,
"rings" : [],
"spatialReference" : {"wkid" : self._wkid}
}
for part in self._rings:
lpart = []
for pt in part:
if isinstance(pt, list):
lpart.append(pt)
elif isinstance(pt, Point):
lpart.append(pt.asList)
template['rings'].append(lpart)
del lpart
self._dict = template
return self._dict
########################################################################
class Envelope(abstract.AbstractGeometry):
"""
An envelope is a rectangle defined by a range of values for each
coordinate and attribute. It also has a spatialReference field.
The fields for the z and m ranges are optional.
"""
_json = None
_dict = None
_geom = None
_xmin = None
_ymin = None
_zmin = None
_mmin = None
_xmax = None
_ymax = None
_zmax = None
_mmax = None
_wkid = None
#----------------------------------------------------------------------
def __init__(self, xmin, ymin, xmax, ymax, wkid,
zmin=None, zmax=None, mmin=None, mmax=None):
"""Constructor"""
self._xmin = xmin
self._ymin = ymin
self._zmin = zmin
self._mmin = mmin
self._xmax = xmax
self._ymax = ymax
self._zmax = zmax
self._mmax = mmax
self._wkid = wkid
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""returns the geometry spatial reference"""
return {'wkid' : self._wkid}
#----------------------------------------------------------------------
@property
def type(self):
""" returns the geometry type """
return "esriGeometryEnvelope"
#----------------------------------------------------------------------
@property
def asDictionary(self):
""" returns the envelope as a dictionary """
template = {
"xmin" : self._xmin,
"ymin" : self._ymin,
"xmax" : self._xmax,
"ymax" : self._ymax,
"spatialReference" : {"wkid" : self._wkid}
}
if self._zmax is not None and \
self._zmin is not None:
template['zmin'] = self._zmin
template['zmax'] = self._zmax
if self._mmin is not None and \
self._mmax is not None:
template['mmax'] = self._mmax
template['mmin'] = self._mmin
return template
#----------------------------------------------------------------------
@property
def value(self):
""" returns the envelope as a dictionary """
template = {
"xmin" : self._xmin,
"ymin" : self._ymin,
"xmax" : self._xmax,
"ymax" : self._ymax,
"spatialReference" : {"wkid" : self._wkid}
}
if self._zmax is not None and \
self._zmin is not None:
template['zmin'] = self._zmin
template['zmax'] = self._zmax
if self._mmin is not None and \
self._mmax is not None:
template['mmax'] = self._mmax
template['mmin'] = self._mmin
return template
#----------------------------------------------------------------------
def __str__(self):
"""returns object as string"""
return self.asJSON
#----------------------------------------------------------------------
@property
def asJSON(self):
""" returns a geometry as JSON """
value = self._json
if value is None:
value = json.dumps(self.asDictionary,
default=general._date_handler)
self._json = value
return self._json
#----------------------------------------------------------------------
@property
def asArcPyObject(self):
""" returns the Envelope as an ESRI arcpy.Polygon object """
env = self.asDictionary
ring = [[
Point(env['xmin'], env['ymin'], self._wkid),
Point(env['xmax'], env['ymin'], self._wkid),
Point(env['xmax'], env['ymax'], self._wkid),
Point(env['xmin'], env['ymax'], self._wkid)
]]
return Polygon(ring, self._wkid).asArcPyObject
|
achapkowski/ArcREST
|
src/arcrest/common/geometry.py
|
Python
|
apache-2.0
| 20,189 | 0.009114 |
game_type = 'input_output'
parameter_list = [['$x1','int'], ['$y0','int'], ['$y1','int']]
tuple_list = [
['KnR_1-7b_',[-3,None,None]]
]
global_code_template = '''\
d #include <stdio.h>
x #include <stdio.h>
dx
dx /* power: raise base to n-th power; n >= 0 */
dx /* (old-style version) */
dx power(base, n)
dx int base, n;
dx {
dx int i, p;
dx
dx p = 1;
dx for (i = 1; i <= n; ++i)
dx p = p * base;
dx return p;
dx }
dx
dx /* test power function */
'''
main_code_template = '''\
dx int i;
dx
dx for (i = 0; i < 3 ; ++i)
dx printf("%d %d %d\\n", i, power(2,i), power($x1,i));
'''
argv_template = ''
stdin_template = ''
stdout_template = '''\
0 1 1
1 2 $y0
2 4 $y1
'''
|
stryder199/RyarkAssignments
|
Assignment2/ttt/archive/_old/KnR/KnR_1-7b.py
|
Python
|
mit
| 689 | 0.015965 |
__author__ = 'jtsreinaldo'
from radio_constants import *
from validation_constants import *
class TXConfigRadioGenerator(object):
"""
A class for the reception configuration of a radio.
"""
def __init__(self):
"""
CTOR
"""
pass
@staticmethod
def tx_generator(radio):
"""
Receives a variable formatted in YAML file style, containing information about some information about radio
configurations, which will be used to generate an source file.
@param radio
"""
# Checks if the transmitter is OFDM or GMSK and creates the correct instance. If it is not either of them,
# raise an exception.
if OFDM in radio[TX][TYPE]:
tx_type = OFDM
elif GMSK in radio[TX][TYPE]:
tx_type = GMSK
else:
raise Exception("The type of the transmitter should be gmsk or ofdm!")
if tx_type == OFDM:
# The user may not given all the parameters (all of them have default values), so we have to be
# precautious and use the try/except statement.
try:
the_fft_length = radio[TX][FFT_LENGTH]
except:
# from DEFAULTS dict:
the_fft_length = DEFAULTS[TX][OFDM][FFT_LENGTH]
try:
the_cp_length = radio[TX][CP_LENGTH]
except:
# from DEFAULTS dict:
the_cp_length = DEFAULTS[TX][OFDM][FFT_LENGTH]
try:
occ_tones = radio[TX][OCC_TONES]
except:
# from DEFAULTS dict:
occ_tones = DEFAULTS[TX][OFDM][OCC_TONES]
try:
the_modulation = radio[TX][MODULATION]
except:
#from DEFAULTS dict:
the_modulation = DEFAULTS[TX][OFDM][MODULATION]
tx_arch = "PacketOFDMTx(modulation={modulation}, cp_length={cp_length}, fft_length={fft_length}, " \
"occupied_tones={occupied_tones})"
# The modulation needs to be a string, so we have to format it.
str_modulation = "\"{modulation}\""
str_modulation = str_modulation.format(modulation=the_modulation)
the_modulation = str_modulation
tx_arch = tx_arch.format(fft_length=the_fft_length,
cp_length=the_cp_length,
modulation=the_modulation,
occupied_tones=occ_tones)
elif tx_type == GMSK:
try:
samples_per_symbol = radio[TX][SAMPLES_PER_SYMBOL]
except:
samples_per_symbol = DEFAULTS[TX][GMSK][SAMPLES_PER_SYMBOL]
try:
bt = radio[TX][BT]
except:
bt = DEFAULTS[TX][GMSK][SAMPLES_PER_SYMBOL]
try:
modulator = "digital.gmsk_mod(samples_per_symbol={samples_per_symbol}, bt={bt})"
modulator = modulator.format(samples_per_symbol=samples_per_symbol, bt=bt)
except:
modulator = DEFAULTS[TX][GMSK][MODULATOR]
tx_arch = "PacketGMSKTx(modulator={modulator})"
tx_arch = tx_arch.format(modulator=modulator)
ip = radio[USRP][IP]
# Checks if the user passed the usrp ip address.
if ip is None:
uhd_sink = "UHDSink()"
else:
uhd_sink = "UHDSink(\"addr={ip}\")"
uhd_sink = uhd_sink.format(ip=ip)
return tx_type, tx_arch, uhd_sink
|
ComputerNetworks-UFRGS/OpERA
|
python/experiment_design/transmission_config.py
|
Python
|
apache-2.0
| 3,622 | 0.004694 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# See the COPYING file for license information.
#
# Copyright (c) 2007 Guillaume Chazarain <guichaz@gmail.com>
# Allow printing with same syntax in Python 2/3
from __future__ import print_function
import curses
import errno
import locale
import math
import optparse
import os
import select
import signal
import sys
import time
from iotop.data import find_uids, TaskStatsNetlink, ProcessList, Stats,find_pids
from iotop.data import ThreadInfo
from iotop.version import VERSION
from iotop import ioprio
from iotop.ioprio import IoprioSetError
#
# Utility functions for the UI
#
UNITS = ['B', 'K', 'M', 'G', 'T', 'P', 'E']
def human_size(size):
if size > 0:
sign = ''
elif size < 0:
sign = '-'
size = -size
else:
return '0.00 B'
expo = int(math.log(size / 2, 2) / 10)
return '%s%.2f %s' % (
sign, (float(size) / (1 << (10 * expo))), UNITS[expo])
def format_size(options, bytes):
if options.kilobytes:
return '%.2f K' % (bytes / 1024.0)
return human_size(bytes)
def format_bandwidth(options, size, duration):
return format_size(options, size and float(size) / duration) + '/s'
def format_stats(options, process, duration):
# Keep in sync with TaskStatsNetlink.members_offsets and
# IOTopUI.get_data(self)
def delay2percent(delay): # delay in ns, duration in s
return '%.2f %%' % min(99.99, delay / (duration * 10000000.0))
if options.accumulated:
stats = process.stats_accum
display_format = lambda size, duration: format_size(options, size)
duration = time.time() - process.stats_accum_timestamp
else:
stats = process.stats_delta
display_format = lambda size, duration: format_bandwidth(
options, size, duration)
io_delay = delay2percent(stats.blkio_delay_total)
swapin_delay = delay2percent(stats.swapin_delay_total)
read_bytes = display_format(stats.read_bytes, duration)
written_bytes = stats.write_bytes - stats.cancelled_write_bytes
written_bytes = max(0, written_bytes)
write_bytes = display_format(written_bytes, duration)
return io_delay, swapin_delay, read_bytes, write_bytes
def get_max_pid_width():
try:
return len(open('/proc/sys/kernel/pid_max').read().strip())
except Exception as e:
print(e)
# Reasonable default in case something fails
return 5
MAX_PID_WIDTH = get_max_pid_width()
#
# UI Exceptions
#
class CancelInput(Exception):
pass
class InvalidInt(Exception):
pass
class InvalidPid(Exception):
pass
class InvalidTid(Exception):
pass
class InvalidIoprioData(Exception):
pass
#
# The UI
#
class IOTopUI(object):
# key, reverse
sorting_keys = [
(lambda p, s: p.pid, False),
(lambda p, s: p.ioprio_sort_key(), False),
(lambda p, s: p.get_user(), False),
(lambda p, s: s.read_bytes, True),
(lambda p, s: s.write_bytes - s.cancelled_write_bytes, True),
(lambda p, s: s.swapin_delay_total, True),
# The default sorting (by I/O % time) should show processes doing
# only writes, without waiting on them
(lambda p, s: s.blkio_delay_total or
int(not(not(s.read_bytes or s.write_bytes))), True),
(lambda p, s: p.get_cmdline(), False),
]
def __init__(self, win, process_list, options):
self.process_list = process_list
self.options = options
self.sorting_key = 6
self.sorting_reverse = IOTopUI.sorting_keys[self.sorting_key][1]
if not self.options.batch:
self.win = win
self.resize()
try:
curses.use_default_colors()
curses.start_color()
curses.curs_set(0)
except curses.error:
# This call can fail with misconfigured terminals, for example
# TERM=xterm-color. This is harmless
pass
def resize(self):
self.height, self.width = self.win.getmaxyx()
def run(self):
iterations = 0
poll = select.poll()
if not self.options.batch:
poll.register(sys.stdin.fileno(), select.POLLIN | select.POLLPRI)
while self.options.iterations is None or \
iterations < self.options.iterations:
total, current = self.process_list.refresh_processes()
self.refresh_display(iterations == 0, total, current,
self.process_list.duration)
if self.options.iterations is not None:
iterations += 1
if iterations >= self.options.iterations:
break
elif iterations == 0:
iterations = 1
try:
events = poll.poll(self.options.delay_seconds * 1000.0)
except select.error as e:
if e.args and e.args[0] == errno.EINTR:
events = []
else:
raise
for (fd, event) in events:
if event & (select.POLLERR | select.POLLHUP):
sys.exit(1)
if not self.options.batch:
self.resize()
if events:
key = self.win.getch()
self.handle_key(key)
def reverse_sorting(self):
self.sorting_reverse = not self.sorting_reverse
def adjust_sorting_key(self, delta):
orig_sorting_key = self.sorting_key
self.sorting_key += delta
self.sorting_key = max(0, self.sorting_key)
self.sorting_key = min(len(IOTopUI.sorting_keys) - 1, self.sorting_key)
if orig_sorting_key != self.sorting_key:
self.sorting_reverse = IOTopUI.sorting_keys[self.sorting_key][1]
# I wonder if switching to urwid for the display would be better here
def prompt_str(self, prompt, default=None, empty_is_cancel=True):
self.win.hline(1, 0, ord(' ') | curses.A_NORMAL, self.width)
self.win.addstr(1, 0, prompt, curses.A_BOLD)
self.win.refresh()
curses.echo()
curses.curs_set(1)
inp = self.win.getstr(1, len(prompt))
curses.curs_set(0)
curses.noecho()
if inp not in (None, ''):
return inp
if empty_is_cancel:
raise CancelInput()
return default
def prompt_int(self, prompt, default=None, empty_is_cancel=True):
inp = self.prompt_str(prompt, default, empty_is_cancel)
try:
return int(inp)
except ValueError:
raise InvalidInt()
def prompt_pid(self):
try:
return self.prompt_int('PID to ionice: ')
except InvalidInt:
raise InvalidPid()
except CancelInput:
raise
def prompt_tid(self):
try:
return self.prompt_int('TID to ionice: ')
except InvalidInt:
raise InvalidTid()
except CancelInput:
raise
def prompt_data(self, ioprio_data):
try:
if ioprio_data is not None:
inp = self.prompt_int('I/O priority data (0-7, currently %s): '
% ioprio_data, ioprio_data, False)
else:
inp = self.prompt_int('I/O priority data (0-7): ', None, False)
except InvalidInt:
raise InvalidIoprioData()
if inp < 0 or inp > 7:
raise InvalidIoprioData()
return inp
def prompt_set(self, prompt, display_list, ret_list, selected):
try:
selected = ret_list.index(selected)
except ValueError:
selected = -1
set_len = len(display_list) - 1
while True:
self.win.hline(1, 0, ord(' ') | curses.A_NORMAL, self.width)
self.win.insstr(1, 0, prompt, curses.A_BOLD)
offset = len(prompt)
for i, item in enumerate(display_list):
display = ' %s ' % item
if i is selected:
attr = curses.A_REVERSE
else:
attr = curses.A_NORMAL
self.win.insstr(1, offset, display, attr)
offset += len(display)
while True:
key = self.win.getch()
if key in (curses.KEY_LEFT, ord('l')) and selected > 0:
selected -= 1
break
elif (key in (curses.KEY_RIGHT, ord('r')) and
selected < set_len):
selected += 1
break
elif key in (curses.KEY_ENTER, ord('\n'), ord('\r')):
return ret_list[selected]
elif key in (27, curses.KEY_CANCEL, curses.KEY_CLOSE,
curses.KEY_EXIT, ord('q'), ord('Q')):
raise CancelInput()
def prompt_class(self, ioprio_class=None):
prompt = 'I/O priority class: '
classes_prompt = ['Real-time', 'Best-effort', 'Idle']
classes_ret = ['rt', 'be', 'idle']
if ioprio_class is None:
ioprio_class = 2
inp = self.prompt_set(prompt, classes_prompt,
classes_ret, ioprio_class)
return inp
def prompt_error(self, error='Error!'):
self.win.hline(1, 0, ord(' ') | curses.A_NORMAL, self.width)
self.win.insstr(1, 0, ' %s ' % error, curses.A_REVERSE)
self.win.refresh()
time.sleep(1)
def prompt_clear(self):
self.win.hline(1, 0, ord(' ') | curses.A_NORMAL, self.width)
self.win.refresh()
def handle_key(self, key):
def toggle_accumulated():
self.options.accumulated ^= True
def toggle_only_io():
self.options.only ^= True
def toggle_processes():
self.options.processes ^= True
self.process_list.clear()
self.process_list.refresh_processes()
def ionice():
try:
if self.options.processes:
pid = self.prompt_pid()
exec_unit = self.process_list.get_process(pid)
else:
tid = self.prompt_tid()
exec_unit = ThreadInfo(tid,
self.process_list.taskstats_connection)
ioprio_value = exec_unit.get_ioprio()
(ioprio_class, ioprio_data) = \
ioprio.to_class_and_data(ioprio_value)
ioprio_class = self.prompt_class(ioprio_class)
if ioprio_class == 'idle':
ioprio_data = 0
else:
ioprio_data = self.prompt_data(ioprio_data)
exec_unit.set_ioprio(ioprio_class, ioprio_data)
self.process_list.clear()
self.process_list.refresh_processes()
except IoprioSetError as e:
self.prompt_error('Error setting I/O priority: %s' % e.err)
except InvalidPid:
self.prompt_error('Invalid process id!')
except InvalidTid:
self.prompt_error('Invalid thread id!')
except InvalidIoprioData:
self.prompt_error('Invalid I/O priority data!')
except InvalidInt:
self.prompt_error('Invalid integer!')
except CancelInput:
self.prompt_clear()
else:
self.prompt_clear()
key_bindings = {
ord('q'):
lambda: sys.exit(0),
ord('Q'):
lambda: sys.exit(0),
ord('r'):
lambda: self.reverse_sorting(),
ord('R'):
lambda: self.reverse_sorting(),
ord('a'):
toggle_accumulated,
ord('A'):
toggle_accumulated,
ord('o'):
toggle_only_io,
ord('O'):
toggle_only_io,
ord('p'):
toggle_processes,
ord('P'):
toggle_processes,
ord('i'):
ionice,
ord('I'):
ionice,
curses.KEY_LEFT:
lambda: self.adjust_sorting_key(-1),
curses.KEY_RIGHT:
lambda: self.adjust_sorting_key(1),
curses.KEY_HOME:
lambda: self.adjust_sorting_key(-len(IOTopUI.sorting_keys)),
curses.KEY_END:
lambda: self.adjust_sorting_key(len(IOTopUI.sorting_keys))
}
action = key_bindings.get(key, lambda: None)
action()
def get_data(self):
def format(p):
stats = format_stats(self.options, p, self.process_list.duration)
io_delay, swapin_delay, read_bytes, write_bytes = stats
if Stats.has_blkio_delay_total:
delay_stats = '%7s %7s ' % (swapin_delay, io_delay)
else:
delay_stats = ' ?unavailable? '
pid_format = '%%%dd' % MAX_PID_WIDTH
line = (pid_format + ' %4s %-8s %11s %11s %s') % (
p.pid, p.get_ioprio(), p.get_user()[:8], read_bytes,
write_bytes, delay_stats)
cmdline = p.get_cmdline()
if not self.options.batch:
remaining_length = self.width - len(line)
if 2 < remaining_length < len(cmdline):
len1 = (remaining_length - 1) // 2
offset2 = -(remaining_length - len1 - 1)
cmdline = cmdline[:len1] + '~' + cmdline[offset2:]
line += cmdline
if not self.options.batch:
line = line[:self.width]
return line
def should_format(p):
return not self.options.only or \
p.did_some_io(self.options.accumulated)
processes = list(filter(should_format,
self.process_list.processes.values()))
key = IOTopUI.sorting_keys[self.sorting_key][0]
if self.options.accumulated:
stats_lambda = lambda p: p.stats_accum
else:
stats_lambda = lambda p: p.stats_delta
processes.sort(key=lambda p: key(p, stats_lambda(p)),
reverse=self.sorting_reverse)
if not self.options.batch:
del processes[self.height - 2:]
return list(map(format, processes))
def refresh_display(self, first_time, total, current, duration):
summary = [
'Total DISK READ : %s | Total DISK WRITE : %s' % (
format_bandwidth(self.options, total[0], duration).rjust(14),
format_bandwidth(self.options, total[1], duration).rjust(14)),
'Current DISK READ: %s | Current DISK WRITE: %s' % (
format_bandwidth(self.options, current[0], duration).rjust(14),
format_bandwidth(self.options, current[1], duration).rjust(14))
]
pid = max(0, (MAX_PID_WIDTH - 3)) * ' '
if self.options.processes:
pid += 'PID'
else:
pid += 'TID'
titles = [pid, ' PRIO', ' USER', ' DISK READ', ' DISK WRITE',
' SWAPIN', ' IO', ' COMMAND']
lines = self.get_data()
if self.options.time:
titles = [' TIME'] + titles
current_time = time.strftime('%H:%M:%S ')
lines = [current_time + l for l in lines]
summary = [current_time + s for s in summary]
if self.options.batch:
if self.options.quiet <= 2:
for s in summary:
print(s)
if self.options.quiet <= int(first_time):
print(''.join(titles))
for l in lines:
print(l)
sys.stdout.flush()
else:
self.win.erase()
for i, s in enumerate(summary):
self.win.addstr(i, 0, s[:self.width])
self.win.hline(len(summary), 0, ord(' ') | curses.A_REVERSE,
self.width)
remaining_cols = self.width
for i in range(len(titles)):
attr = curses.A_REVERSE
title = titles[i]
if i == self.sorting_key:
title = title[1:]
if i == self.sorting_key:
attr |= curses.A_BOLD
title += self.sorting_reverse and '>' or '<'
title = title[:remaining_cols]
remaining_cols -= len(title)
self.win.addstr(title, attr)
if Stats.has_blkio_delay_total:
status_msg = None
else:
status_msg = ('CONFIG_TASK_DELAY_ACCT not enabled in kernel, '
'cannot determine SWAPIN and IO %')
num_lines = min(len(lines),
self.height - 2 - int(bool(status_msg)))
for i in range(num_lines):
try:
def print_line(line):
self.win.addstr(i + len(summary) + 1, 0, line)
try:
print_line(lines[i])
except UnicodeEncodeError:
# Python2: 'ascii' codec can't encode character ...
# http://bugs.debian.org/708252
print_line(lines[i].encode('utf-8'))
except curses.error:
pass
if status_msg:
self.win.insstr(self.height - len(summary), 0, status_msg,
curses.A_BOLD)
self.win.refresh()
def run_iotop_window(win, options):
if options.batch:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
else:
def clean_exit(*args, **kwargs):
sys.exit(0)
signal.signal(signal.SIGINT, clean_exit)
signal.signal(signal.SIGTERM, clean_exit)
taskstats_connection = TaskStatsNetlink(options)
process_list = ProcessList(taskstats_connection, options)
ui = IOTopUI(win, process_list, options)
ui.run()
def run_iotop(options):
try:
if options.batch:
return run_iotop_window(None, options)
else:
return curses.wrapper(run_iotop_window, options)
except OSError as e:
if e.errno == errno.EPERM:
print(e, file=sys.stderr)
print('''
The Linux kernel interfaces that iotop relies on now require root privileges
or the NET_ADMIN capability. This change occurred because a security issue
(CVE-2011-2494) was found that allows leakage of sensitive data across user
boundaries. If you require the ability to run iotop as a non-root user, please
configure sudo to allow you to run iotop as root.
Please do not file bugs on iotop about this.''', file=sys.stderr)
sys.exit(1)
else:
raise
#
# Profiling
#
def _profile(continuation):
prof_file = 'iotop.prof'
try:
import cProfile
import pstats
print('Profiling using cProfile')
cProfile.runctx('continuation()', globals(), locals(), prof_file)
stats = pstats.Stats(prof_file)
except ImportError:
import hotshot
import hotshot.stats
prof = hotshot.Profile(prof_file, lineevents=1)
print('Profiling using hotshot')
prof.runcall(continuation)
prof.close()
stats = hotshot.stats.load(prof_file)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(50)
stats.print_callees(50)
os.remove(prof_file)
#
# Main program
#
USAGE = '''%s [OPTIONS]
DISK READ and DISK WRITE are the block I/O bandwidth used during the sampling
period. SWAPIN and IO are the percentages of time the thread spent respectively
while swapping in and waiting on I/O more generally. PRIO is the I/O priority
at which the thread is running (set using the ionice command).
Controls: left and right arrows to change the sorting column, r to invert the
sorting order, o to toggle the --only option, p to toggle the --processes
option, a to toggle the --accumulated option, i to change I/O priority, q to
quit, any other key to force a refresh.''' % sys.argv[0]
def main():
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
print('unable to set locale, falling back to the default locale')
parser = optparse.OptionParser(usage=USAGE, version='iotop ' + VERSION)
parser.add_option('-o', '--only', action='store_true',
dest='only', default=False,
help='only show processes or threads actually doing I/O')
parser.add_option('-b', '--batch', action='store_true', dest='batch',
help='non-interactive mode')
parser.add_option('-n', '--iter', type='int', dest='iterations',
metavar='NUM',
help='number of iterations before ending [infinite]')
parser.add_option('-d', '--delay', type='float', dest='delay_seconds',
help='delay between iterations [1 second]',
metavar='SEC', default=1)
parser.add_option('-p', '--pid', type='str', dest='pids',
help='processes/threads to monitor [all]\n\
for example:-p python,chrome,2258', metavar='PID|PNAME')
parser.add_option('-u', '--user', type='str', dest='users',
help='users to monitor [all]\n\
if you need to monitor multiple users,you can\
use / to separate each user\
for example:-u root/mysql/500',default="",
# action='append', help='users to monitor [all]',
metavar='USER')
parser.add_option('-P', '--processes', action='store_true',
dest='processes', default=False,
help='only show processes, not all threads')
parser.add_option('-a', '--accumulated', action='store_true',
dest='accumulated', default=False,
help='show accumulated I/O instead of bandwidth')
parser.add_option('-k', '--kilobytes', action='store_true',
dest='kilobytes', default=False,
help='use kilobytes instead of a human friendly unit')
parser.add_option('-t', '--time', action='store_true', dest='time',
help='add a timestamp on each line (implies --batch)')
parser.add_option('-q', '--quiet', action='count', dest='quiet', default=0,
help='suppress some lines of header (implies --batch)')
parser.add_option('--profile', action='store_true', dest='profile',
default=False, help=optparse.SUPPRESS_HELP)
options, args = parser.parse_args()
if args:
parser.error('Unexpected arguments: ' + ' '.join(args))
find_uids(options)
find_pids(options)
options.batch = options.batch or options.time or options.quiet
main_loop = lambda: run_iotop(options)
if options.profile:
def safe_main_loop():
try:
main_loop()
except Exception:
pass
_profile(safe_main_loop)
else:
main_loop()
|
lujinda/iotop
|
iotop/ui.py
|
Python
|
gpl-2.0
| 24,215 | 0.000991 |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class HostRegistry:
""" Class stores registry with host tests and objects representing them
"""
HOST_TESTS = {} # host_test_name -> host_test_ojbect
def register_host_test(self, ht_name, ht_object):
if ht_name not in self.HOST_TESTS:
self.HOST_TESTS[ht_name] = ht_object
def unregister_host_test(self):
if ht_name in HOST_TESTS:
self.HOST_TESTS[ht_name] = None
def get_host_test(self, ht_name):
return self.HOST_TESTS[ht_name] if ht_name in self.HOST_TESTS else None
def is_host_test(self, ht_name):
return ht_name in self.HOST_TESTS
|
jferreir/mbed
|
workspace_tools/host_tests/host_registry.py
|
Python
|
apache-2.0
| 1,214 | 0.004942 |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright (C) 2013-2019 British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------------
"""Test isodatetime.datetimeoper functionalities."""
import os
import unittest
from unittest.mock import patch
from metomi.isodatetime.data import (
get_timepoint_from_seconds_since_unix_epoch as seconds2point)
import metomi.isodatetime.datetimeoper as idt_dtoper
class TestDateTimeOperator(unittest.TestCase):
"""Test isodatetime.datetimeoper.TestDateTimeOperator functionalities."""
@patch('metomi.isodatetime.datetimeoper.now2point')
def test_process_time_point_str_now_0(self, mock_now_func):
"""DateTimeOperator.process_time_point_str()"""
# 2009-02-13T23:31:30Z
mock_now = seconds2point(1234567890)
mock_now_func.return_value = mock_now
datetimeoper = idt_dtoper.DateTimeOperator()
self.assertEqual(str(mock_now), datetimeoper.process_time_point_str())
self.assertEqual(
str(mock_now),
datetimeoper.process_time_point_str(datetimeoper.STR_NOW))
@patch('metomi.isodatetime.datetimeoper.now2point')
def test_process_time_point_str_ref_0(self, mock_now_func):
"""DateTimeOperator.process_time_point_str('ref')
But without explicit reference time, so default to now.
"""
# 2009-02-13T23:31:30Z
mock_now = seconds2point(1234567890)
mock_now_func.return_value = mock_now
datetimeoper = idt_dtoper.DateTimeOperator()
# Ensure that the ISODATETIMEREF environment variable is not set
# Or the test may not work.
environ = os.environ.copy()
if datetimeoper.ENV_REF in environ:
del environ[datetimeoper.ENV_REF]
with patch.dict(os.environ, environ, clear=True):
self.assertEqual(
str(mock_now),
datetimeoper.process_time_point_str(datetimeoper.STR_REF))
def test_process_time_point_str_ref_1(self):
"""DateTimeOperator.process_time_point_str('ref')
With explicit reference time.
"""
# 2009-02-13T23:31:30Z
ref_point_str = str(seconds2point(1234567890))
datetimeoper = idt_dtoper.DateTimeOperator(
ref_point_str=ref_point_str)
self.assertEqual(
ref_point_str,
datetimeoper.process_time_point_str(datetimeoper.STR_REF))
def test_process_time_point_str_ref_2(self):
"""DateTimeOperator.process_time_point_str('ref')
With explicit reference time as ISODATETIMEREF environment variable.
"""
# 2009-02-13T23:31:30Z
ref_point_str = str(seconds2point(1234567890))
# Set ISODATETIMEREF.
# Or the test may not work.
environ = os.environ.copy()
environ[idt_dtoper.DateTimeOperator.ENV_REF] = (
ref_point_str)
with patch.dict(os.environ, environ):
datetimeoper = idt_dtoper.DateTimeOperator()
self.assertEqual(
ref_point_str,
datetimeoper.process_time_point_str(datetimeoper.STR_REF))
def test_process_time_point_str_x(self):
"""DateTimeOperator.process_time_point_str(...)
Basic parse and dump of a time point string.
"""
# 2009-02-13T23:31:30Z
point_str = str(seconds2point(1234567890))
datetimeoper = idt_dtoper.DateTimeOperator()
# Unix time
self.assertEqual(
'2019-01-11T10:40:15Z',
datetimeoper.process_time_point_str(
'Fri 11 Jan 10:40:15 UTC 2019',
print_format=datetimeoper.CURRENT_TIME_DUMP_FORMAT_Z))
# Basic
self.assertEqual(
point_str,
datetimeoper.process_time_point_str(point_str))
# +ve offset
point_str_1 = str(seconds2point(1234567890 + 3600))
self.assertEqual(
point_str_1,
datetimeoper.process_time_point_str(point_str, ['PT1H']))
# +ve offset, time point like duration
point_str_1 = str(seconds2point(1234567890 + 3600))
self.assertEqual(
point_str_1,
datetimeoper.process_time_point_str(point_str, ['P0000-00-00T01']))
# -ve offset
point_str_2 = str(seconds2point(1234567890 - 86400))
self.assertEqual(
point_str_2,
datetimeoper.process_time_point_str(point_str, ['-P1D']))
# offsets that cancel out
self.assertEqual(
point_str,
datetimeoper.process_time_point_str(point_str, ['PT1H', '-PT60M']))
# Multiple offsets in 1 string
point_str_3 = str(seconds2point(1234567890 - 86400 - 3600))
self.assertEqual(
point_str_3,
datetimeoper.process_time_point_str(point_str, ['-P1DT1H']))
# Multiple offsets
self.assertEqual(
point_str_3,
datetimeoper.process_time_point_str(point_str, ['-P1D', '-PT1H']))
# Bad time point string
self.assertRaises(
ValueError,
datetimeoper.process_time_point_str, 'teatime')
# Bad offset string
with self.assertRaises(
idt_dtoper.OffsetValueError,
) as ctxmgr:
datetimeoper.process_time_point_str(point_str, ['ages'])
self.assertEqual('ages: bad offset value', str(ctxmgr.exception))
# Bad offset string, unsupported time point like duration
with self.assertRaises(
idt_dtoper.OffsetValueError,
) as ctxmgr:
datetimeoper.process_time_point_str(point_str, ['P0000-W01-1'])
self.assertEqual(
'P0000-W01-1: bad offset value',
str(ctxmgr.exception))
def test_process_time_point_str_calendar(self):
"""DateTimeOperator.process_time_point_str(...)
Alternate calendars.
"""
self.assertEqual(
'gregorian',
idt_dtoper.DateTimeOperator.get_calendar_mode())
self.assertRaises(
KeyError,
idt_dtoper.DateTimeOperator.set_calendar_mode,
'milkywaygalactic')
for cal, str_in, offsets, str_out in [
# 360day
('360day', '20130301', ['-P1D'], '20130230'),
('360day', '20130230', ['P1D'], '20130301'),
# 360_day
('360_day', '20130301', ['-P1D'], '20130230'),
('360_day', '20130230', ['P1D'], '20130301'),
# 365day
('365day', '20130301', ['-P1D'], '20130228'),
('365day', '20130228', ['P1D'], '20130301'),
# 365_day
('365_day', '20130301', ['-P1D'], '20130228'),
('365_day', '20130228', ['P1D'], '20130301'),
# 366day
('366day', '20130301', ['-P1D'], '20130229'),
('366day', '20130229', ['P1D'], '20130301'),
# 366_day
('366_day', '20130301', ['-P1D'], '20130229'),
('366_day', '20130229', ['P1D'], '20130301'),
]:
# Calendar mode, is unfortunately, a global variable,
# so needs to reset value on return.
calendar_mode = (
idt_dtoper.DateTimeOperator.get_calendar_mode())
# Calendar mode by constructor.
try:
datetimeoper = idt_dtoper.DateTimeOperator(
calendar_mode=cal)
self.assertEqual(
str_out,
datetimeoper.process_time_point_str(str_in, offsets))
finally:
idt_dtoper.DateTimeOperator.set_calendar_mode(
calendar_mode)
# Calendar mode by environment variable
try:
environ = os.environ.copy()
key = (
idt_dtoper.DateTimeOperator.ENV_CALENDAR_MODE
)
environ[key] = cal
with patch.dict(os.environ, environ, clear=True):
datetimeoper = idt_dtoper.DateTimeOperator()
self.assertEqual(
str_out,
datetimeoper.process_time_point_str(
str_in, offsets))
finally:
idt_dtoper.DateTimeOperator.set_calendar_mode(
calendar_mode)
def test_process_time_point_str_format(self):
"""DateTimeOperator.process_time_point_str(...)
With parse_format and print_format.
"""
for parse_format, print_format, point_str_in, point_str_out in [
('%d/%m/%Y %H:%M:%S', '%Y-%m-%dT%H:%M:%S',
'24/12/2012 06:00:00', '2012-12-24T06:00:00'),
('%Y,%M,%d,%H', '%Y%M%d%H', '2014,01,02,05', '2014010205'),
('%Y%m%d', '%y%m%d', '20141231', '141231'),
('%Y%m%d%H%M%S', '%s', '20140402100000', '1396432800'),
('%s', '%Y%m%dT%H%M%S%z', '1396429200', '20140402T090000+0000'),
('%d/%m/%Y %H:%M:%S', 'CCYY-MM-DDThh:mm',
'24/12/2012 06:00:00', '2012-12-24T06:00'),
(None, 'CCYY-MM-DDThh:mm+01:00',
'2014-091T15:14:03Z', '2014-04-01T16:14+01:00'),
(None, '%m', '2014-02-01T04:05:06', '02'),
(None, '%Y', '2014-02-01T04:05:06', '2014'),
(None, '%H', '2014-02-01T04:05:06', '04'),
(None, '%Y%m%d_%H%M%S', '2014-02-01T04:05:06', '20140201_040506'),
(None, '%Y.file', '2014-02-01T04:05:06', '2014.file'),
(None, 'y%Ym%md%d', '2014-02-01T04:05:06', 'y2014m02d01'),
(None, '%F', '2014-02-01T04:05:06', '2014-02-01'),
]:
datetimeoper = idt_dtoper.DateTimeOperator(
utc_mode=True,
parse_format=parse_format)
self.assertEqual(
point_str_out,
datetimeoper.process_time_point_str(
point_str_in, print_format=print_format))
# Bad parse format
datetimeoper = idt_dtoper.DateTimeOperator(
parse_format='%o')
with self.assertRaises(ValueError) as ctxmgr:
datetimeoper.process_time_point_str('0000')
self.assertEqual(
"'o' is a bad directive in format '%o'",
str(ctxmgr.exception))
def test_format_duration_str_x(self):
"""DateTimeOperator.format_duration_str(...)"""
datetimeoper = idt_dtoper.DateTimeOperator()
# Good ones
for print_format, duration_str_in, duration_out in [
('s', 'PT1M', 60.0),
('s', 'P1DT1H1M1S', 90061.0),
('m', 'PT1S', 0.0166666666667),
('h', 'P832DT23H12M45S', 19991.2125),
('S', '-PT1M1S', -61.0),
]:
self.assertAlmostEqual(
duration_out,
datetimeoper.format_duration_str(
duration_str_in, print_format))
# Bad ones
for print_format, duration_str_in in [
('y', 'PT1M'),
('s', 'quickquick'),
]:
self.assertRaises(
ValueError,
datetimeoper.format_duration_str,
duration_str_in, print_format)
def test_diff_time_point_strs(self):
"""DateTimeOperator.diff_time_point_strs(...)"""
datetimeoper = idt_dtoper.DateTimeOperator(
ref_point_str='20150106')
for (
time_point_str1,
time_point_str2,
offsets1, offsets2,
print_format,
duration_print_format,
duration_out,
) in [
( # Positive
'20130101T12',
'20130301',
None,
None,
None,
None,
'P58DT12H',
),
( # Positive, non integer seconds
# Use (3.1 - 3.0) to bypass str(float) precision issue
'20190101T010203',
'20190101T010203.1',
None,
None,
None,
None,
'PT%sS' % (str(3.1 - 3.0).replace('.', ',')),
),
( # Positive, non integer seconds, print format
# Use (3.1 - 3.0) to bypass str(float) precision issue
'20190101T010203',
'20190101T010203.1',
None,
None,
's',
None,
str(3.1 - 3.0),
),
( # Offset 1, reference time 2, positive
'20140101',
'ref',
['P11M24D'],
None,
None,
None,
'P12D',
),
( # Offset 2, positive
'20100101T00',
'20100201T00',
None,
['P1D'],
None,
None,
'P32D',
),
( # Neutral
'20151225T00',
'20151225',
None,
None,
None,
None,
'P0Y',
),
( # Negative
'20150101T12',
'20130301',
None,
None,
None,
None,
'-P671DT12H',
),
( # Alternate format
'20130101T12',
'20130301',
None,
None,
'y,m,d,h,M,s',
None,
'0,0,58,12,0,0',
),
( # Offset 2, alternate format
'0000',
'0000',
['-PT2M'],
None,
'y,m,d,h,M,s',
None,
'0,0,0,0,2,0',
),
( # As seconds, positive
'2000-01-01T00:00:00',
'2000-01-01T01:00:00',
None,
None,
None,
's',
3600.0,
),
( # As seconds, neutral
'2000-01-01T00:00:00',
'2000-01-01T00:00:00',
None,
None,
None,
's',
0.0,
),
( # As seconds, negative
'2000-01-01T00:00:00',
'1999-12-31T23:00:00',
None,
None,
None,
's',
-3600.0,
),
]:
self.assertEqual(
duration_out,
datetimeoper.diff_time_point_strs(
time_point_str1,
time_point_str2,
offsets1,
offsets2,
print_format,
duration_print_format))
if __name__ == '__main__':
unittest.main()
|
matthewrmshin/isodatetime
|
metomi/isodatetime/tests/test_datetimeoper.py
|
Python
|
lgpl-3.0
| 15,759 | 0 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Duracloud'
db.create_table(
u"locations_duracloud",
(
(u"id", self.gf("django.db.models.fields.AutoField")(primary_key=True)),
(
"space",
self.gf("django.db.models.fields.related.OneToOneField")(
to=orm["locations.Space"], to_field="uuid", unique=True
),
),
("host", self.gf("django.db.models.fields.CharField")(max_length=256)),
("user", self.gf("django.db.models.fields.CharField")(max_length=64)),
(
"password",
self.gf("django.db.models.fields.CharField")(max_length=64),
),
(
"duraspace",
self.gf("django.db.models.fields.CharField")(max_length=64),
),
),
)
db.send_create_signal("locations", ["Duracloud"])
def backwards(self, orm):
# Deleting model 'Duracloud'
db.delete_table(u"locations_duracloud")
models = {
u"auth.group": {
"Meta": {"object_name": "Group"},
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "80"},
),
"permissions": (
"django.db.models.fields.related.ManyToManyField",
[],
{
"to": u"orm['auth.Permission']",
"symmetrical": "False",
"blank": "True",
},
),
},
u"auth.permission": {
"Meta": {
"ordering": "(u'content_type__app_label', u'content_type__model', u'codename')",
"unique_together": "((u'content_type', u'codename'),)",
"object_name": "Permission",
},
"codename": (
"django.db.models.fields.CharField",
[],
{"max_length": "100"},
),
"content_type": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": u"orm['contenttypes.ContentType']"},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "50"}),
},
u"auth.user": {
"Meta": {"object_name": "User"},
"date_joined": (
"django.db.models.fields.DateTimeField",
[],
{"default": "datetime.datetime.now"},
),
"email": (
"django.db.models.fields.EmailField",
[],
{"max_length": "75", "blank": "True"},
),
"first_name": (
"django.db.models.fields.CharField",
[],
{"max_length": "30", "blank": "True"},
),
"groups": (
"django.db.models.fields.related.ManyToManyField",
[],
{"to": u"orm['auth.Group']", "symmetrical": "False", "blank": "True"},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"is_active": (
"django.db.models.fields.BooleanField",
[],
{"default": "True"},
),
"is_staff": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"is_superuser": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"last_login": (
"django.db.models.fields.DateTimeField",
[],
{"default": "datetime.datetime.now"},
),
"last_name": (
"django.db.models.fields.CharField",
[],
{"max_length": "30", "blank": "True"},
),
"password": (
"django.db.models.fields.CharField",
[],
{"max_length": "128"},
),
"user_permissions": (
"django.db.models.fields.related.ManyToManyField",
[],
{
"to": u"orm['auth.Permission']",
"symmetrical": "False",
"blank": "True",
},
),
"username": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "30"},
),
},
u"contenttypes.contenttype": {
"Meta": {
"ordering": "('name',)",
"unique_together": "(('app_label', 'model'),)",
"object_name": "ContentType",
"db_table": "'django_content_type'",
},
"app_label": (
"django.db.models.fields.CharField",
[],
{"max_length": "100"},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"model": ("django.db.models.fields.CharField", [], {"max_length": "100"}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "100"}),
},
"locations.callback": {
"Meta": {"object_name": "Callback"},
"enabled": (
"django.db.models.fields.BooleanField",
[],
{"default": "True"},
),
"event": ("django.db.models.fields.CharField", [], {"max_length": "15"}),
"expected_status": (
"django.db.models.fields.IntegerField",
[],
{"default": "200"},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"method": ("django.db.models.fields.CharField", [], {"max_length": "10"}),
"uri": ("django.db.models.fields.CharField", [], {"max_length": "1024"}),
"uuid": (
"django.db.models.fields.CharField",
[],
{"max_length": "36", "blank": "True"},
),
},
"locations.duracloud": {
"Meta": {"object_name": "Duracloud"},
"duraspace": (
"django.db.models.fields.CharField",
[],
{"max_length": "64"},
),
"host": ("django.db.models.fields.CharField", [], {"max_length": "256"}),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"password": ("django.db.models.fields.CharField", [], {"max_length": "64"}),
"space": (
"django.db.models.fields.related.OneToOneField",
[],
{
"to": "orm['locations.Space']",
"to_field": "'uuid'",
"unique": "True",
},
),
"user": ("django.db.models.fields.CharField", [], {"max_length": "64"}),
},
"locations.event": {
"Meta": {"object_name": "Event"},
"admin_id": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": u"orm['auth.User']", "null": "True", "blank": "True"},
),
"event_reason": ("django.db.models.fields.TextField", [], {}),
"event_type": (
"django.db.models.fields.CharField",
[],
{"max_length": "8"},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"package": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['locations.Package']", "to_field": "'uuid'"},
),
"pipeline": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['locations.Pipeline']", "to_field": "'uuid'"},
),
"status": ("django.db.models.fields.CharField", [], {"max_length": "8"}),
"status_reason": (
"django.db.models.fields.TextField",
[],
{"null": "True", "blank": "True"},
),
"status_time": (
"django.db.models.fields.DateTimeField",
[],
{"auto_now": "True", "blank": "True"},
),
"store_data": (
"django.db.models.fields.TextField",
[],
{"null": "True", "blank": "True"},
),
"user_email": (
"django.db.models.fields.EmailField",
[],
{"max_length": "254"},
),
"user_id": ("django.db.models.fields.PositiveIntegerField", [], {}),
},
"locations.fedora": {
"Meta": {"object_name": "Fedora"},
"fedora_name": (
"django.db.models.fields.CharField",
[],
{"max_length": "256"},
),
"fedora_password": (
"django.db.models.fields.CharField",
[],
{"max_length": "256"},
),
"fedora_user": (
"django.db.models.fields.CharField",
[],
{"max_length": "64"},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"space": (
"django.db.models.fields.related.OneToOneField",
[],
{
"to": "orm['locations.Space']",
"to_field": "'uuid'",
"unique": "True",
},
),
},
"locations.file": {
"Meta": {"object_name": "File"},
"checksum": (
"django.db.models.fields.TextField",
[],
{"max_length": "128"},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": ("django.db.models.fields.TextField", [], {"max_length": "1000"}),
"source_id": (
"django.db.models.fields.TextField",
[],
{"max_length": "128"},
),
"stored": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"uuid": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "36", "blank": "True"},
),
},
"locations.localfilesystem": {
"Meta": {"object_name": "LocalFilesystem"},
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"space": (
"django.db.models.fields.related.OneToOneField",
[],
{
"to": "orm['locations.Space']",
"to_field": "'uuid'",
"unique": "True",
},
),
},
"locations.location": {
"Meta": {"object_name": "Location"},
"description": (
"django.db.models.fields.CharField",
[],
{
"default": "None",
"max_length": "256",
"null": "True",
"blank": "True",
},
),
"enabled": (
"django.db.models.fields.BooleanField",
[],
{"default": "True"},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"pipeline": (
"django.db.models.fields.related.ManyToManyField",
[],
{
"symmetrical": "False",
"to": "orm['locations.Pipeline']",
"null": "True",
"through": "orm['locations.LocationPipeline']",
"blank": "True",
},
),
"purpose": ("django.db.models.fields.CharField", [], {"max_length": "2"}),
"quota": (
"django.db.models.fields.BigIntegerField",
[],
{"default": "None", "null": "True", "blank": "True"},
),
"relative_path": ("django.db.models.fields.TextField", [], {}),
"space": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['locations.Space']", "to_field": "'uuid'"},
),
"used": ("django.db.models.fields.BigIntegerField", [], {"default": "0"}),
"uuid": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "36", "blank": "True"},
),
},
"locations.locationpipeline": {
"Meta": {"object_name": "LocationPipeline"},
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"location": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['locations.Location']", "to_field": "'uuid'"},
),
"pipeline": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['locations.Pipeline']", "to_field": "'uuid'"},
),
},
"locations.lockssomatic": {
"Meta": {"object_name": "Lockssomatic"},
"au_size": (
"django.db.models.fields.BigIntegerField",
[],
{"null": "True", "blank": "True"},
),
"checksum_type": (
"django.db.models.fields.CharField",
[],
{"max_length": "64", "null": "True", "blank": "True"},
),
"collection_iri": (
"django.db.models.fields.CharField",
[],
{"max_length": "256", "null": "True", "blank": "True"},
),
"content_provider_id": (
"django.db.models.fields.CharField",
[],
{"max_length": "32"},
),
"external_domain": (
"django.db.models.fields.URLField",
[],
{"max_length": "200"},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"keep_local": (
"django.db.models.fields.BooleanField",
[],
{"default": "True"},
),
"sd_iri": ("django.db.models.fields.URLField", [], {"max_length": "256"}),
"space": (
"django.db.models.fields.related.OneToOneField",
[],
{
"to": "orm['locations.Space']",
"to_field": "'uuid'",
"unique": "True",
},
),
},
"locations.nfs": {
"Meta": {"object_name": "NFS"},
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"manually_mounted": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"remote_name": (
"django.db.models.fields.CharField",
[],
{"max_length": "256"},
),
"remote_path": ("django.db.models.fields.TextField", [], {}),
"space": (
"django.db.models.fields.related.OneToOneField",
[],
{
"to": "orm['locations.Space']",
"to_field": "'uuid'",
"unique": "True",
},
),
"version": (
"django.db.models.fields.CharField",
[],
{"default": "'nfs4'", "max_length": "64"},
),
},
"locations.package": {
"Meta": {"object_name": "Package"},
"current_location": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['locations.Location']", "to_field": "'uuid'"},
),
"current_path": ("django.db.models.fields.TextField", [], {}),
"description": (
"django.db.models.fields.CharField",
[],
{
"default": "None",
"max_length": "256",
"null": "True",
"blank": "True",
},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"misc_attributes": (
"jsonfield.fields.JSONField",
[],
{"default": "{}", "null": "True", "blank": "True"},
),
"origin_pipeline": (
"django.db.models.fields.related.ForeignKey",
[],
{
"to": "orm['locations.Pipeline']",
"to_field": "'uuid'",
"null": "True",
"blank": "True",
},
),
"package_type": (
"django.db.models.fields.CharField",
[],
{"max_length": "8"},
),
"pointer_file_location": (
"django.db.models.fields.related.ForeignKey",
[],
{
"blank": "True",
"related_name": "'+'",
"to_field": "'uuid'",
"null": "True",
"to": "orm['locations.Location']",
},
),
"pointer_file_path": (
"django.db.models.fields.TextField",
[],
{"null": "True", "blank": "True"},
),
"size": ("django.db.models.fields.IntegerField", [], {"default": "0"}),
"status": (
"django.db.models.fields.CharField",
[],
{"default": "'FAIL'", "max_length": "8"},
),
"uuid": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "36", "blank": "True"},
),
},
"locations.packagedownloadtask": {
"Meta": {"object_name": "PackageDownloadTask"},
"download_completion_time": (
"django.db.models.fields.DateTimeField",
[],
{"default": "None", "null": "True", "blank": "True"},
),
"downloads_attempted": (
"django.db.models.fields.IntegerField",
[],
{"default": "0"},
),
"downloads_completed": (
"django.db.models.fields.IntegerField",
[],
{"default": "0"},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"package": (
"django.db.models.fields.related.ForeignKey",
[],
{"to": "orm['locations.Package']", "to_field": "'uuid'"},
),
"uuid": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "36", "blank": "True"},
),
},
"locations.packagedownloadtaskfile": {
"Meta": {"object_name": "PackageDownloadTaskFile"},
"completed": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"failed": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"filename": (
"django.db.models.fields.CharField",
[],
{"max_length": "256"},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"task": (
"django.db.models.fields.related.ForeignKey",
[],
{
"related_name": "'download_file_set'",
"to_field": "'uuid'",
"to": "orm['locations.PackageDownloadTask']",
},
),
"url": ("django.db.models.fields.TextField", [], {}),
"uuid": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "36", "blank": "True"},
),
},
"locations.pipeline": {
"Meta": {"object_name": "Pipeline"},
"api_key": (
"django.db.models.fields.CharField",
[],
{
"default": "None",
"max_length": "256",
"null": "True",
"blank": "True",
},
),
"api_username": (
"django.db.models.fields.CharField",
[],
{
"default": "None",
"max_length": "256",
"null": "True",
"blank": "True",
},
),
"description": (
"django.db.models.fields.CharField",
[],
{
"default": "None",
"max_length": "256",
"null": "True",
"blank": "True",
},
),
"enabled": (
"django.db.models.fields.BooleanField",
[],
{"default": "True"},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"remote_name": (
"django.db.models.fields.CharField",
[],
{
"default": "None",
"max_length": "256",
"null": "True",
"blank": "True",
},
),
"uuid": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "36"},
),
},
"locations.pipelinelocalfs": {
"Meta": {"object_name": "PipelineLocalFS"},
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"remote_name": (
"django.db.models.fields.CharField",
[],
{"max_length": "256"},
),
"remote_user": (
"django.db.models.fields.CharField",
[],
{"max_length": "64"},
),
"space": (
"django.db.models.fields.related.OneToOneField",
[],
{
"to": "orm['locations.Space']",
"to_field": "'uuid'",
"unique": "True",
},
),
},
"locations.space": {
"Meta": {"object_name": "Space"},
"access_protocol": (
"django.db.models.fields.CharField",
[],
{"max_length": "8"},
),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"last_verified": (
"django.db.models.fields.DateTimeField",
[],
{"default": "None", "null": "True", "blank": "True"},
),
"path": (
"django.db.models.fields.TextField",
[],
{"default": "''", "blank": "True"},
),
"size": (
"django.db.models.fields.BigIntegerField",
[],
{"default": "None", "null": "True", "blank": "True"},
),
"staging_path": ("django.db.models.fields.TextField", [], {}),
"used": ("django.db.models.fields.BigIntegerField", [], {"default": "0"}),
"uuid": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "36", "blank": "True"},
),
"verified": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
},
}
complete_apps = ["locations"]
|
artefactual/archivematica-storage-service
|
storage_service/locations/south_migrations/0006_duracloud.py
|
Python
|
agpl-3.0
| 25,555 | 0.001722 |
##
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from itertools import count, cycle
from urllib2 import HTTPDigestAuthHandler
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from twisted.web.http import CREATED
from contrib.performance.httpauth import AuthHandlerAgent
from contrib.performance.httpclient import StringProducer
from contrib.performance.benchlib import initialize, sample
from contrib.performance.benchmarks.event import makeEvent
@inlineCallbacks
def measure(host, port, dtrace, attendeeCount, samples):
organizerSequence = 1
user = password = "user%02d" % (organizerSequence,)
root = "/"
principal = "/"
# Two calendars between which to move the event.
fooCalendar = "event-move-foo-benchmark"
barCalendar = "event-move-bar-benchmark"
authinfo = HTTPDigestAuthHandler()
authinfo.add_password(
realm="Test Realm",
uri="http://%s:%d/" % (host, port),
user=user,
passwd=password)
agent = AuthHandlerAgent(Agent(reactor), authinfo)
# Set up the calendars first
for calendar in [fooCalendar, barCalendar]:
yield initialize(
agent, host, port, user, password, root, principal, calendar)
fooURI = 'http://%s:%d/calendars/__uids__/%s/%s/some-event.ics' % (
host, port, user, fooCalendar)
barURI = 'http://%s:%d/calendars/__uids__/%s/%s/some-event.ics' % (
host, port, user, barCalendar)
# Create the event that will move around
headers = Headers({"content-type": ["text/calendar"]})
yield agent.request(
'PUT', fooURI, headers,
StringProducer(makeEvent(1, organizerSequence, attendeeCount)))
# Move it around sooo much
source = cycle([fooURI, barURI])
dest = cycle([barURI, fooURI])
params = (
('MOVE', source.next(),
Headers({"destination": [dest.next()], "overwrite": ["F"]}))
for i in count(1))
samples = yield sample(dtrace, samples, agent, params.next, CREATED)
returnValue(samples)
|
macosforge/ccs-calendarserver
|
contrib/performance/benchmarks/event_move.py
|
Python
|
apache-2.0
| 2,708 | 0 |
"""Output formatters using shell syntax.
"""
from .base import SingleFormatter
import argparse
import six
class ShellFormatter(SingleFormatter):
def add_argument_group(self, parser):
group = parser.add_argument_group(
title='shell formatter',
description='a format a UNIX shell can parse (variable="value")',
)
group.add_argument(
'--variable',
action='append',
default=[],
dest='variables',
metavar='VARIABLE',
help=argparse.SUPPRESS,
)
group.add_argument(
'--prefix',
action='store',
default='',
dest='prefix',
help='add a prefix to all variable names',
)
def emit_one(self, column_names, data, stdout, parsed_args):
variable_names = [c.lower().replace(' ', '_')
for c in column_names
]
desired_columns = parsed_args.variables
for name, value in zip(variable_names, data):
if name in desired_columns or not desired_columns:
if isinstance(value, six.string_types):
value = value.replace('"', '\\"')
stdout.write('%s%s="%s"\n' % (parsed_args.prefix, name, value))
return
|
sjsucohort6/openstack
|
python/venv/lib/python2.7/site-packages/cliff/formatters/shell.py
|
Python
|
mit
| 1,337 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test suite for language_check."""
from __future__ import unicode_literals
import unittest
import warnings
from collections import namedtuple
import language_check
class TestLanguageTool(unittest.TestCase):
CheckTest = namedtuple('CheckTest', ('text', 'matches'))
Match = namedtuple('Match', ('fromy', 'fromx', 'ruleId'))
check_tests = {
'en': [
CheckTest(
('Paste your own text here... or check this text too see '
'a few of the problems that that LanguageTool can detect. '
'Did you notice that their is no spelcheckin included?'),
[
Match(0, 47, 'TOO_TO'),
Match(0, 132, 'THEIR_IS'),
]
),
],
'fr': [
CheckTest(
('Se texte est un exemple pour pour vous montrer '
'le fonctionnement de LanguageTool. '
'notez que LanguageTool ne comporte pas '
'de correcteur orthographique.'),
[
Match(0, 0, 'SE_CE'),
Match(0, 3, 'TE_NV'),
Match(0, 24, 'FRENCH_WORD_REPEAT_RULE'),
Match(0, 82, 'UPPERCASE_SENTENCE_START'),
]
),
CheckTest(
'je me rappelle de tout sans aucun soucis!',
[
Match(0, 0, 'UPPERCASE_SENTENCE_START'),
Match(0, 6, 'RAPPELER_DE'),
Match(0, 28, 'ACCORD_NOMBRE'),
Match(0, 34, 'FRENCH_WHITESPACE'),
]
),
],
}
correct_tests = {
'en-US': {
'that would of been to impressive.':
'That would have been too impressive.',
},
'fr': {
'il monte en haut si il veut.':
'Il monte s’il veut.',
},
}
def test_check(self):
lang_check = language_check.LanguageTool()
for language, tests in self.check_tests.items():
try:
lang_check.language = language
except ValueError:
version = language_check.get_version()
warnings.warn(
'LanguageTool {} doesn’t support language {!r}'
.format(version, language)
)
for text, expected_matches in tests:
matches = lang_check.check(text)
for expected_match in expected_matches:
for match in matches:
if (
(match.fromy, match.fromx, match.ruleId) ==
(expected_match.fromy, expected_match.fromx,
expected_match.ruleId)
):
break
else:
raise IndexError(
'can’t find {!r}'.format(expected_match))
def test_correct(self):
lang_check = language_check.LanguageTool()
for language, tests in self.correct_tests.items():
try:
lang_check.language = language
except ValueError:
version = language_check.get_version()
warnings.warn(
'LanguageTool {} doesn’t support language {!r}'
.format(version, language)
)
for text, result in tests.items():
self.assertEqual(lang_check.correct(text), result)
def test_languages(self):
self.assertIn('en', language_check.get_languages())
def test_version(self):
self.assertTrue(language_check.get_version())
def test_get_build_date(self):
self.assertTrue(language_check.get_build_date())
def test_get_directory(self):
path = language_check.get_directory()
language_check.set_directory(path)
self.assertEqual(path, language_check.get_directory())
def test_disable_spellcheck(self):
sentence_with_misspelling = 'This is baad.'
lang_check = language_check.LanguageTool()
self.assertTrue(lang_check.check(sentence_with_misspelling))
lang_check.disable_spellchecking()
self.assertFalse(lang_check.check(sentence_with_misspelling))
lang_check.enable_spellchecking()
self.assertTrue(lang_check.check(sentence_with_misspelling))
def test_README_with_unicode(self):
tool = language_check.LanguageTool('en-US')
text = ('A sentence with a error in the '
'Hitchhiker’s Guide tot he Galaxy')
matches = tool.check(text)
self.assertEqual(len(matches), 2)
self.assertEqual((matches[0].fromy, matches[0].fromx),
(0, 16))
self.assertEqual((matches[0].ruleId, matches[0].replacements),
('EN_A_VS_AN', ['an']))
self.assertEqual((matches[1].fromy, matches[1].fromx),
(0, 50))
self.assertEqual((matches[1].ruleId, matches[1].replacements),
('TOT_HE', ['to the']))
corrected = language_check.correct(text, matches)
self.assertEqual(corrected, 'A sentence with an error in the '
'Hitchhiker’s Guide to the Galaxy')
if __name__ == '__main__':
unittest.main()
|
myint/language-check
|
test.py
|
Python
|
lgpl-3.0
| 5,483 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import re
import unicodedata
import datetime
import subprocess
from py3compat import string_types, text_type
from django.utils import timezone
from django.conf import settings
from uninond.models.SMSMessages import SMSMessage
# default country prefix
COUNTRY_PREFIX = getattr(settings, 'COUNTRY_PREFIX', 223)
ALL_COUNTRY_CODES = [1242, 1246, 1264, 1268, 1284, 1340, 1345, 1441, 1473,
1599, 1649, 1664, 1670, 1671, 1684, 1758, 1767, 1784,
1809, 1868, 1869, 1876, 1, 20, 212, 213, 216, 218, 220,
221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231,
232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
243, 244, 245, 248, 249, 250, 251, 252, 253, 254, 255,
256, 257, 258, 260, 261, 262, 263, 264, 265, 266, 267,
268, 269, 27, 290, 291, 297, 298, 299, 30, 31, 32, 33,
34, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359,
36, 370, 371, 372, 373, 374, 375, 376, 377, 378, 380,
381, 382, 385, 386, 387, 389, 39, 40, 41, 420, 421, 423,
43, 44, 45, 46, 47, 48, 49, 500, 501, 502, 503, 504,
505, 506, 507, 508, 509, 51, 52, 53, 54, 55, 56, 57, 58,
590, 591, 592, 593, 595, 597, 598, 599, 60, 61, 62, 63,
64, 65, 66, 670, 672, 673, 674, 675, 676, 677, 678, 679,
680, 681, 682, 683, 685, 686, 687, 688, 689, 690, 691,
692, 7, 81, 82, 84, 850, 852, 853, 855, 856, 86, 870,
880, 886, 90, 91, 92, 93, 94, 95, 960, 961, 962, 963,
964, 965, 966, 967, 968, 970, 971, 972, 973, 974, 975,
976, 977, 98, 992, 993, 994, 995, 996, 998]
MONTHS = ['J', 'F', 'M', 'A', 'Y', 'U', 'L', 'G', 'S', 'O', 'N', 'D']
ALPHA = 'abcdefghijklmnopqrstuvwxyz'
def phonenumber_isint(number):
''' whether number is in international format '''
if re.match(r'^[+|(]', number):
return True
if re.match(r'^\d{1,4}\.\d+$', number):
return True
return False
def phonenumber_indicator(number):
''' extract indicator from number or "" '''
for indic in ALL_COUNTRY_CODES:
if number.startswith("%{}".format(indic)) \
or number.startswith("+{}".format(indic)):
return str(indic)
return ""
def phonenumber_cleaned(number):
''' return (indicator, number) cleaned of space and other '''
# clean up
if not isinstance(number, string_types):
number = number.__str__()
# cleanup markup
clean_number = re.sub(r'[^\d\+]', '', number)
if phonenumber_isint(clean_number):
h, indicator, clean_number = \
clean_number.partition(phonenumber_indicator(clean_number))
return (indicator, clean_number)
return (None, clean_number)
def join_phonenumber(prefix, number, force_intl=True):
if not number:
return None
if not prefix and force_intl:
prefix = COUNTRY_PREFIX
return "+{prefix}{number}".format(prefix=prefix, number=number)
def phonenumber_repr(number, skip_indicator=str(COUNTRY_PREFIX)):
''' properly formated for visualization: (xxx) xx xx xx xx '''
def format(number):
if len(number) % 2 == 0:
span = 2
else:
span = 3
# use NBSP
return " ".join(["".join(number[i:i + span])
for i in range(0, len(number), span)])
indicator, clean_number = phonenumber_cleaned(number)
# string-only identity goes into indicator
if indicator is None and not clean_number:
return number.strip()
if indicator and indicator != skip_indicator:
return "(%(ind)s) %(num)s" \
% {'ind': indicator,
'num': format(clean_number)}
return format(clean_number)
def normalized_phonenumber(number_text):
if number_text is None or not number_text.strip():
return None
return join_phonenumber(*phonenumber_cleaned(number_text))
def operator_from_malinumber(number, default=settings.FOREIGN):
''' ORANGE or MALITEL based on the number prefix '''
indicator, clean_number = phonenumber_cleaned(
normalized_phonenumber(number))
if indicator is not None and indicator != str(COUNTRY_PREFIX):
return default
for operator, opt in settings.OPERATORS.items():
for prefix in opt[1]:
if clean_number.startswith(str(prefix)):
return operator
return default
def send_sms(to, text):
return SMSMessage.objects.create(
direction=SMSMessage.OUTGOING,
identity=to,
event_on=timezone.now(),
text=text)
def fake_message(to, text):
message = send_sms(to, text)
message.handled = True
message.save()
return message
def to_ascii(text):
return unicodedata.normalize('NFKD', unicode(text)) \
.encode('ASCII', 'ignore').strip()
def date_to_ident(adate):
year, month, day = adate.timetuple()[0:3]
hyear = text_type(year)[-1]
if day > 16:
hmonth = ALPHA[month * 2]
hday = hex(day // 2)[2:]
else:
hmonth = ALPHA[month]
hday = hex(day)[2:]
return "{y}{m}{d}".format(m=hmonth, d=hday, y=hyear)
def ident_to_date(ident):
hyear, hmonth, hday = ident[0], ident[1], ident[2:]
year = int('201{}'.format(hyear))
day = int(hday, 16)
month = ALPHA.index(hmonth)
if month > 12:
month //= 2
day *= 2
return datetime.date(year, month, day)
def dispatch_sms(text, roles, root):
sent_messages = []
for identity in root.ancestors_contacts(roles, identies_only=True):
sent_messages.append(send_sms(identity, text))
return sent_messages
def datetime_repr(adatetime):
return ("{date} à {time}"
.format(date=adatetime.strftime("%A %-d"),
time=adatetime.strftime("%Hh%M")).lower())
def exec_cmd(command):
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
return process.returncode
|
yeleman/uninond
|
uninond/tools.py
|
Python
|
mit
| 6,367 | 0 |
import csv
from numpy import histogram
def review_stats(count_ratings, length):
# print "in extract_rows"
ip_csv = "data\input\yelp_academic_dataset_review_ext.csv"
with open(ip_csv, "rb") as source:
rdr = csv.reader(source)
firstline = True
for r in rdr:
if firstline: # skip first line
firstline = False
continue
count_ratings[int(r[2])] += 1
length.append(len(r[0]))
def business_stats(categories, category_count):
ip_csv = "data\input\yelp_academic_dataset_business_ext.csv"
with open(ip_csv, "rb") as source:
rdr = csv.reader(source)
next(rdr)
# c = 0
for r in rdr:
cat = r[0]
items = cat.split(',')
for i in items:
i = i.lstrip()
if category_count.has_key(i):
category_count[i] = category_count[i] + 1
else:
category_count[i] = 1
categories.append(i)
# print categories
# print category_count
if __name__ == '__main__':
count_ratings = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0}
length = []
review_stats(count_ratings, length)
print "Review Stats"
print ('-' * 100)
print "total reviews", count_ratings[1] + count_ratings[2] + count_ratings[3] + count_ratings[4] + count_ratings[5]
print "Review breakup per ratings"
print "Review 1 star", count_ratings[1]
print "Review 2 star", count_ratings[2]
print "Review 3 star", count_ratings[3]
print "Review 4 star", count_ratings[4]
print "Review 5 star", count_ratings[5]
length.sort()
sum = 0.0
for i in length:
sum += i
print "Min length: ", min(length), "Max length: ", max(length)
print "Avg length: ", sum / len(length), "Median: ", length[len(length) / 2]
hist,bin_edges = histogram(a=length,bins=20)
print hist
print bin_edges
'''
print "Business Stats"
print ('-' * 100)
categories = []
category_count = {}
business_stats(categories, category_count)
print "Number of categories", len(categories)
print "Reviews per category:"
for c in categories:
print c + "?" + str(category_count[c])
'''
|
abhirevan/Yelp-Rate-my-Review
|
src/review_stats.py
|
Python
|
mit
| 2,318 | 0.004745 |
from code_intelligence import graphql
import fire
import github3
import json
import logging
import os
import numpy as np
import pprint
import retrying
import json
TOKEN_NAME_PREFERENCE = ["INPUT_GITHUB_PERSONAL_ACCESS_TOKEN", "GITHUB_PERSONAL_ACCESS_TOKEN", "GITHUB_TOKEN"]
for token in TOKEN_NAME_PREFERENCE:
if os.getenv(token):
TOKEN_NAME = token
break
assert TOKEN_NAME, f"You must supply one of the following environment variables: {', '.join(TOKEN_NAME_PREFERENCE)}"
PULL_REQUEST_TYPE = "PullRequest"
# TODO(jlewi): Rewrite this code to use:
# i) graphql.unpack_and_split_nodes
# ii) graphql.shard_writer
def process_notification(n):
# Mark as read anything that isn't an explicit mention.
# For PRs there doesn't seem like a simple way to detect if the notice
# is because the state changed
#
# We exclude mentions on PR because that gets overwhelmed by "/assign"
# statements. We should potentially be more discerning and not mark the
# notification as read for PRs which aren't assigned to the user.
if n.reason == "mention":
if n.subject.get("type") != "PullRequest":
return
title = n.subject.get("title")
logging.info("Marking as read: type: %s reason: %s title: %s",
n.subject.get("type"), n.reason, title)
n.mark()
def process_issue_results(data):
"""Process the data returned by the issues GraphQL request.
Args:
data: The data returned
Returns:
issues: A list of dicts; each dict is the data for some of
the results
"""
edges = data.get("data").get("repository").get("issues").get("edges")
issues = []
for e in edges:
issues.append(e["node"])
return issues
class NotificationManager(object):
def mark_read(self, user):
token = os.getenv(TOKEN_NAME)
if not token:
raise ValueError(("Environment variable {0} needs to be set to a GitHub "
"token.").format(token))
client = github3.GitHub(username=user, token=token)
notifications = client.notifications()
# https://developer.github.com/v3/activity/notifications/
#
# How do we identify closed pull requests?
for n in notifications:
process_notification(n)
def write_notifications(self, user, output):
"""Write all notifications to a file.
Args:
user: Name of the user to get notifications for
output: The file to write notifications to.
Fetches all notifications, including ones marked read,
and writes them to the supplied file.
"""
token = os.getenv(TOKEN_NAME)
if not token:
raise ValueError(("Environment variable {0} needs to be set to a GitHub "
"token.").format(token))
client = github3.GitHub(username=user, token=token)
notifications = client.notifications(all=True)
# https://developer.github.com/v3/activity/notifications/
#
# How do we identify closed pull requests?
i = 0
with open(output, mode="w") as hf:
for n in notifications:
i += 1
hf.write(n.as_json())
hf.write("\n")
logging.info("Wrote %s notifications to %s", i, output)
def fetch_issues(self, org, repo, output):
"""Fetch issues for a repository
Args:
org: The org that owns the repository
repo: The directory for the repository
output: The directory to write the results
Writes the issues along with the first comments to a file in output
directory.
"""
client = graphql.GraphQLClient()
num_issues_per_page = 100
query_template = """{{
repository(owner: "{org}", name: "{repo}") {{
issues(first:{num_issues_per_page} {issues_cursor}) {{
totalCount
pageInfo {{
endCursor
hasNextPage
}}
edges{{
node {{
author {{
__typename
... on User {{
login
}}
... on Bot{{
login
}}
}}
title
body
comments(first:20, ){{
totalCount
edges {{
node {{
author {{
__typename
... on User {{
login
}}
... on Bot{{
login
}}
}}
body
createdAt
}}
}}
}}
}}
}}
}}
}}
}}
"""
shard = 0
num_pages = None
if not os.path.exists(output):
os.makedirs(output)
total_issues = None
has_next_issues_page = True
# TODO(jlewi): We should persist the cursors to disk so we can resume
# after errors
issues_cursor = None
while has_next_issues_page:
issues_cursor_text = ""
if issues_cursor:
issues_cursor_text = "after:\"{0}\"".format(issues_cursor)
query = query_template.format(org=org, repo=repo,
num_issues_per_page=num_issues_per_page,
issues_cursor=issues_cursor_text)
results = client.run_query(query)
if results.get("errors"):
logging.error("There was a problem issuing the query; errors:\n%s",
"\n".join(results.get("errors")))
return
if not total_issues:
total_issues = results["data"]["repository"]["issues"]["totalCount"]
num_pages = int(np.ceil(total_issues/float(num_issues_per_page)))
logging.info("%s/%s has a total of %s issues", org, repo, total_issues)
shard_file = os.path.join(
output, "issues-{0}-{1}-{2:03d}-of-{3:03d}.json".format(org, repo, shard,
num_pages))
issues = process_issue_results(results)
with open(shard_file, "w") as hf:
for i in issues:
json.dump(i, hf)
hf.write("\n")
logging.info("Wrote shard %s to %s", shard, shard_file)
shard += 1
page_info = results["data"]["repository"]["issues"]["pageInfo"]
issues_cursor = page_info["endCursor"]
has_next_issues_page = page_info["hasNextPage"]
def _create_client(self, user):
token = os.getenv(TOKEN_NAME)
if not token:
raise ValueError(("Environment variable {0} needs to be set to a GitHub "
"token.").format(token))
client = github3.GitHub(username=user, token=token)
return client
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(message)s|%(pathname)s|%(lineno)d|'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
fire.Fire(NotificationManager)
|
kubeflow/code-intelligence
|
py/notifications/notifications.py
|
Python
|
mit
| 6,630 | 0.008748 |
# -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
url(r"^/android/setup$", views.android_setup_view, name="notif_android_setup"),
url(r"^/chrome/setup$", views.chrome_setup_view, name="notif_chrome_setup"),
url(r"^/chrome/getdata$", views.chrome_getdata_view, name="notif_chrome_getdata"),
url(r"^/gcm/post$", views.gcm_post_view, name="notif_gcm_post"), url(r"^/gcm/list$", views.gcm_list_view, name="notif_gcm_list")
]
|
jacobajit/ion
|
intranet/apps/notifications/urls.py
|
Python
|
gpl-2.0
| 483 | 0.008282 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-04 09:25
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='GalaxyInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField(help_text='Instance URL', null=True)),
('title', models.CharField(help_text='The name / title of the instance. E.g. GalaxyP', max_length=256, null=True)),
('description', models.TextField(help_text='Any extra description you wish to add.', null=True)),
('users_recent', models.IntegerField(default=0)),
('users_total', models.IntegerField(default=0)),
('jobs_run', models.IntegerField(default=0)),
('api_key', models.UUIDField(default=uuid.uuid4, editable=False)),
('last_import', models.FloatField(default=-1)),
('owners', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_job_id', models.IntegerField(default=-1)),
('tool_id', models.CharField(max_length=255)),
('tool_version', models.TextField()),
('state', models.CharField(max_length=16)),
('create_time', models.DateTimeField(blank=True, null=True)),
('instance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.GalaxyInstance')),
],
),
migrations.CreateModel(
name='JobParam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_job_id', models.IntegerField(default=-1)),
('name', models.CharField(max_length=256)),
('value', models.TextField()),
('instance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.GalaxyInstance')),
],
),
migrations.CreateModel(
name='MetricNumeric',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_job_id', models.IntegerField(default=-1)),
('plugin', models.CharField(max_length=256)),
('name', models.CharField(max_length=256)),
('value', models.DecimalField(decimal_places=7, max_digits=22)),
('instance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.GalaxyInstance')),
],
),
migrations.CreateModel(
name='MetricText',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_job_id', models.IntegerField(default=-1)),
('plugin', models.CharField(max_length=256)),
('name', models.CharField(max_length=256)),
('value', models.CharField(max_length=256)),
('instance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.GalaxyInstance')),
],
),
migrations.AlterUniqueTogether(
name='job',
unique_together=set([('instance', 'external_job_id')]),
),
]
|
erasche/galactic-radio-telescope
|
api/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 3,931 | 0.003561 |
# Copyright (c) 2001-2016, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import
from .tests_mechanism import AbstractTestFixture, dataset
from .check_utils import *
@dataset({"main_stif_test": {}})
class TestStif(AbstractTestFixture):
"""
Test the stif scenario responses
Possible journeys from A to B:
1/ 8h00 ====(line A)====> 10h00
2/ 9h00 ==(line B + C)==> 11h00
3/ 10h00 ====(line A)====> 12h00
"""
def test_stif_simple(self):
"""
Test of simple request :
* we want to make at least 2 journey calls (not only the best journey, but also try next)
* we don't want 2 journeys using the same line and changing at same points
So here we want journeys 1 and 2
"""
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \
"&min_nb_journeys=1&_min_journeys_calls=2&_final_line_filter=true&_max_successive_physical_mode=3"\
.format(from_sp="stopA", to_sp="stopB", datetime="20140614T075500")
response = self.query_region(query)
assert len(response['journeys']) == 2
assert response['journeys'][0]['arrival_date_time'] == '20140614T100000'
assert response['journeys'][1]['arrival_date_time'] == '20140614T110000'
def test_stif_override_min_journeys_calls(self):
"""
Test of simple request :
* we only want 1 journey calls (no next call)
So here we only want journeys 1
"""
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \
"&min_nb_journeys=1&_min_journeys_calls=1&_final_line_filter=true&_max_successive_physical_mode=3"\
.format(from_sp="stopA", to_sp="stopB", datetime="20140614T075500")
response = self.query_region(query)
assert len(response['journeys']) == 1
assert response['journeys'][0]['arrival_date_time'] == '20140614T100000'
def test_stif_override_final_line_filter(self):
"""
Test of simple request :
* we want to make at least 2 journey calls (not only the best journey, but also try next)
* we deactivate the filter on journeys using the same line and changing at same points
So here we want journeys 1, 2 and 3
"""
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \
"&min_nb_journeys=1&_min_journeys_calls=2&_final_line_filter=false&_max_successive_physical_mode=3"\
.format(from_sp="stopA", to_sp="stopB", datetime="20140614T075500")
response = self.query_region(query)
assert len(response['journeys']) == 3
assert response['journeys'][0]['arrival_date_time'] == '20140614T100000'
assert response['journeys'][1]['arrival_date_time'] == '20140614T110000'
assert response['journeys'][2]['arrival_date_time'] == '20140614T120000'
def test_stif_max_successive_buses(self):
"""
BUS Bus Bus Bus
stopP ----> stopQ ----> stopR ----> stopS ----> stopT
15:00 16:00 17:00 18:00 19:00
Bus
stopP ----------------------------------------> stopT
15:00 20:00
Test of request with parameter "_max_successive_physical_mode":
* we want to make at least 2 journey calls (not only the best journey, but also try next)
* we don't want the journey using more than 3 Buses
So here we want journey1
"""
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \
"&_max_successive_physical_mode=3&_max_additional_connections=10"\
.format(from_sp="stopP", to_sp="stopT", datetime="20140614T145500")
response = self.query_region(query)
assert len(response['journeys']) == 1
#As we modify the value of _max_successive_physical_mode to 5 we want two journeys
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \
"&_max_successive_physical_mode=5&_max_additional_connections=10"\
.format(from_sp="stopP", to_sp="stopT", datetime="20140614T145500")
response = self.query_region(query)
assert len(response['journeys']) == 2
def test_stif_max_successive_buses_with_tram_in_between(self):
"""
BUS Bus Bus Bus Tram Bus Bus
stopP ----> stopQ ----> stopR ----> stopS ----> stopT ----> stopU ----> stopV ----> stopW
15:00 16:00 17:00 18:00 19:00 19:30 20:00 20:30
Bus
stopP ----------------------------------------------------------------------------> stopW
15:00 21:00
Test of request with parameter "_max_successive_physical_mode":
* we want to make at least 2 journey calls (not only the best journey, but also try next)
* we don't want the journey using more than 3 Buses successive
* we have "Bus" and "Tram" as means of transport
"""
#As there are 4 buses successive to be used from stopP to stopW and _max_successive_physical_mode = 3
# we have 1 journey
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default"\
"&_max_successive_physical_mode=3&_max_additional_connections=10"\
.format(from_sp="stopP", to_sp="stopW", datetime="20140614T145500")
response = self.query_region(query)
assert len(response['journeys']) == 1
#As we modify the value of _max_successive_physical_mode to 5 we want two journeys
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \
"&_max_successive_physical_mode=5&_max_additional_connections=10"\
.format(from_sp="stopP", to_sp="stopW", datetime="20140614T145500")
response = self.query_region(query)
assert len(response['journeys']) == 2
# As we modify the value of _max_additional_connections to 2 we delete the second journey because
# it contains more then nb_connections + 2 ()
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \
"&_max_successive_physical_mode=5&_max_additional_connections=2"\
.format(from_sp="stopP", to_sp="stopW", datetime="20140614T145500")
response = self.query_region(query)
assert len(response['journeys']) == 1
|
kadhikari/navitia
|
source/jormungandr/tests/stif_tests.py
|
Python
|
agpl-3.0
| 7,989 | 0.005007 |
# The MIT License (MIT)
# Copyright (c) 2009 Max Polk
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import errno
import fcntl
import os
class FLock:
'''
Ensures application is running only once, by using a lock file.
Ensure call to lock works. Then call unlock at program exit.
You cannot read or write to the lock file, but for some reason you can
remove it. Once removed, it is still in a locked state somehow. Another
application attempting to lock against the file will fail, even though
the directory listing does not show the file. Mysterious, but we are glad
the lock integrity is upheld in such a case.
Instance variables:
lockfile -- Full path to lock file
lockfd -- File descriptor of lock file exclusively locked
'''
def __init__(self, lockfile):
self.lockfile = lockfile
self.lockfd = None
def lock(self):
'''
Creates and holds on to the lock file with exclusive access.
Returns True if lock successful, False if it is not, and raises
an exception upon operating system errors encountered creating the
lock file.
'''
try:
#
# Create or else open and trucate lock file, in read-write mode.
#
# A crashed app might not delete the lock file, so the
# os.O_CREAT | os.O_EXCL combination that guarantees
# atomic create isn't useful here. That is, we don't want to
# fail locking just because the file exists.
#
# Could use os.O_EXLOCK, but that doesn't exist yet in my Python
#
self.lockfd = os.open(self.lockfile,
os.O_TRUNC | os.O_CREAT | os.O_RDWR)
# Acquire exclusive lock on the file,
# but don't block waiting for it
fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
# Writing to file is pointless, nobody can see it
os.write(self.lockfd, "lockfile")
return True
except (OSError, IOError), e:
# Lock cannot be acquired is okay,
# everything else reraise exception
if e.errno in (errno.EACCES, errno.EAGAIN):
return False
else:
raise
def unlock(self):
try:
# FIRST unlink file, then close it. This way, we avoid file
# existence in an unlocked state
os.unlink(self.lockfile)
# Just in case, let's not leak file descriptors
os.close(self.lockfd)
except (OSError, IOError):
# Ignore error destroying lock file. See class doc about how
# lockfile can be erased and everything still works normally.
pass
|
toha10/python-cudet
|
cudet/flock.py
|
Python
|
gpl-2.0
| 3,884 | 0 |
#!/usr/bin/python
# AUTHOR : liuxu-0703@163.com
# used to extract keyword sets from xml
# used by aplog_helper.sh and adblogcat.sh
import os
import sys
import getopt
from xml.dom.minidom import parse, parseString
#=======================================
class KeywordSet:
def __init__(self, xml_node):
self.name = self.getText(xml_node.getElementsByTagName('name')[0])
self.type = self.getText(xml_node.getElementsByTagName('type')[0])
active = self.getText(xml_node.getElementsByTagName('active')[0])
if active == 'true':
self.active = True
else:
self.active = False
try:
self.project = self.getText(xml_node.getElementsByTagName('project')[0])
except:
self.project = 'None'
self.keywords = []
self.readKeywords(xml_node.getElementsByTagName('keywords')[0])
def getText(self, text_node):
'''get text from xml node
$text_node should be a node with type NODE_TEXT
return str of the text
'''
ret = ''
for n in text_node.childNodes:
ret = ret + n.nodeValue
return ret
def readKeywords(self, keywords_node):
'''read keywords and store them in self.keywords
$keywords_node should be xml node with name of <keywords>
return none
'''
for n in keywords_node.getElementsByTagName('k'):
self.keywords.append(self.getText(n))
def printKeywords(self):
'''print all keywords in self.keywords
return none
'''
for k in self.keywords:
print k
def printAllInfo(self):
print 'name: ' + self.name
print 'type: ' + self.type
print 'proj: ' + self.project
print 'acti: ' + str(self.active)
word_str = ''
for k in self.keywords:
word_str = word_str + k + '; '
print 'keywords:'
print word_str
print ' '
#=======================================
class KeywordManager:
def __init__(self, path):
if not os.path.isfile(path):
print '*. cannot find keywordset.xml file !'
return
self.path = path
self.xml_doc = parse(self.path)
self.xml_ksm = self.xml_doc.getElementsByTagName('KeywordSetManager')[0]
self.xml_ks_list = self.xml_ksm.getElementsByTagName('keywordset')
self.keywordset_list = []
self.print_inactive = False
for node in self.xml_ks_list:
#print self.getText(node.getElementsByTagName('name')[0])
self.readKeywordSet(node)
self.keywordset_list.sort(lambda x,y: self.compare(x, y))
def compare(self, a, b):
'''compare between two KeywordSet instance
$a and $b should be instance of KeywordSet
return -1, 0, 1
'''
if a.type != b.type:
if a.type == 'include':
return -1
if a.type == 'exclude':
return 1
if a.project != b.project:
if a.project == 'None':
return -1
if b.project == 'None':
return 1
cmp_result = cmp(a.project, b.project)
if cmp_result != 0:
return cmp_result
return cmp(a.name, b.name)
def getText(self, text_node):
'''get text from xml node
$text_node should be a node with type NODE_TEXT
return str of the text
'''
r = ''
for n in text_node.childNodes:
r = r + n.nodeValue
return r
#param $node should be a 'keywordset' node in xml file
def readKeywordSet(self, node):
'''read keywords and store them in self.keywordset_list
$keywords_node should be xml node twith name of <keywordset>
return none
'''
ks = KeywordSet(node)
self.keywordset_list.append(ks)
#param should be true or false
def setPrintInactiveEnabled(self, inactive):
'''set self.print_inactive
'''
self.print_inactive = inactive
def listSets(self):
'''print all keywordsets
'''
for ks in self.keywordset_list:
if ks.active or self.print_inactive:
print ks.name
#param $set_type should be either include or exclude
def listSetsByType(self, set_type):
'''list keywordsets by include/exclude type
'''
for ks in self.keywordset_list:
if ks.type == set_type:
if ks.active or self.print_inactive:
print ks.name
#param $set_name should be name of a keywordset
def printKeywordsBySetName(self, set_name):
'''list keywords in a keywordset by name
if more than one keywordsets are with the same name, print them all
'''
for ks in self.keywordset_list:
if ks.name == set_name:
if ks.active or self.print_inactive:
ks.printKeywords()
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[2:], 't:n:d')
xml = sys.argv[1];
km = KeywordManager(xml)
for op, value in opts:
if op == '-t':
km.listSetsByType(value)
elif op == '-n':
km.printKeywordsBySetName(value)
elif op == '-d':
for ks in km.keywordset_list:
ks.printAllInfo()
|
liuxu0703/lx_bash_script
|
android_script/keyword_manager.py
|
Python
|
mit
| 5,470 | 0.006764 |
"""# Components.
You can adapt some component functions from the `gdsfactory.components` module.
Each function there returns a Component object
Here are two equivalent functions
"""
import gdsfactory as gf
def straight_wide1(width=10, **kwargs) -> gf.Component:
return gf.components.straight(width=width, **kwargs)
straight_wide2 = gf.partial(gf.components.straight, width=10)
if __name__ == "__main__":
# c = straight_wide1()
c = straight_wide2()
c.show()
|
gdsfactory/gdsfactory
|
gdsfactory/samples/20_components.py
|
Python
|
mit
| 482 | 0 |
from PyQt5 import QtCore, QtWidgets
import chigger
import peacock
from peacock.ExodusViewer.plugins.ExodusPlugin import ExodusPlugin
from MeshBlockSelectorWidget import MeshBlockSelectorWidget
class BlockHighlighterPlugin(peacock.base.PeacockCollapsibleWidget, ExodusPlugin):
"""
Widget for controlling the visible blocks/nodesets/sidesets of the mesh.
Mirrored off of peaocock.Exodus.plugins.BlockPlugin
"""
#: pyqtSignal: Emitted when window needs to change
windowRequiresUpdate = QtCore.pyqtSignal()
highlight = QtCore.pyqtSignal(object, object, object)
def __init__(self, collapsible_layout=QtWidgets.QHBoxLayout, **kwargs):
peacock.base.PeacockCollapsibleWidget.__init__(self, collapsible_layout=collapsible_layout)
ExodusPlugin.__init__(self, **kwargs)
self.setTitle('Highlight')
self.setEnabled(False)
self.MainLayout = self.collapsibleLayout()
# Block, nodeset, and sideset selector widgets
self.BlockSelector = MeshBlockSelectorWidget(chigger.exodus.ExodusReader.BLOCK, 'Blocks:')
self.SidesetSelector = MeshBlockSelectorWidget(chigger.exodus.ExodusReader.SIDESET, 'Boundaries:')
self.NodesetSelector = MeshBlockSelectorWidget(chigger.exodus.ExodusReader.NODESET, 'Nodesets:')
self.MainLayout.addWidget(self.BlockSelector)
self.MainLayout.addWidget(self.SidesetSelector)
self.MainLayout.addWidget(self.NodesetSelector)
self.BlockSelector.selectionChanged.connect(self.setBlock)
self.SidesetSelector.selectionChanged.connect(self.setSideset)
self.NodesetSelector.selectionChanged.connect(self.setNodeset)
self.setup()
def onWindowCreated(self, *args):
"""
Initializes the selector widgets for the supplied reader/results.
"""
super(BlockHighlighterPlugin, self).onWindowCreated(*args)
self.BlockSelector.updateBlocks(self._reader, True)
self.SidesetSelector.updateBlocks(self._reader, True)
self.NodesetSelector.updateBlocks(self._reader, True)
self.__updateVariableState()
def onWindowUpdated(self):
"""
Update boundary/nodeset visibility when window is updated.
"""
if self._reader:
self.blockSignals(True)
self.BlockSelector.updateBlocks(self._reader)
self.SidesetSelector.updateBlocks(self._reader)
self.NodesetSelector.updateBlocks(self._reader)
self.blockSignals(False)
self.__updateVariableState()
def setBlock(self):
"""
Highlights a block and resets nodesets/sidesets
"""
block = self.BlockSelector.getBlocks()
self.SidesetSelector.reset()
self.NodesetSelector.reset()
self.highlight.emit(block, None, None)
def setSideset(self):
"""
Highlights a sideset and resets nodesets/blocks
"""
sideset = self.SidesetSelector.getBlocks()
self.BlockSelector.reset()
self.NodesetSelector.reset()
self.highlight.emit(None, sideset, None)
def setNodeset(self):
"""
Highlights a nodeset and resets sidesets/blocks
"""
nodeset = self.NodesetSelector.getBlocks()
self.BlockSelector.reset()
self.SidesetSelector.reset()
self.highlight.emit(None, None, nodeset)
def __updateVariableState(self):
"""
Enable/disable the nodeset/sidest selection based on variable type.
"""
varinfo = self._result[0].getCurrentVariableInformation()
if varinfo:
if varinfo.object_type == chigger.exodus.ExodusReader.ELEMENTAL:
self.SidesetSelector.setEnabled(False)
self.NodesetSelector.setEnabled(False)
else:
self.SidesetSelector.setEnabled(True)
self.NodesetSelector.setEnabled(True)
|
yipenggao/moose
|
python/peacock/Input/BlockHighlighterPlugin.py
|
Python
|
lgpl-2.1
| 3,928 | 0.001527 |
"""
Handle logging in a Message Box?
"""
from PyQt4 import QtGui, QtCore
import logging
import sys
class MyQWidget(QtGui.QWidget):
def center(self):
frameGm = self.frameGeometry()
screen = QtGui.QApplication.desktop().screenNumber(QtGui.QApplication.desktop().cursor().pos())
centerPoint = QtGui.QApplication.desktop().screenGeometry(screen).center()
frameGm.moveCenter(centerPoint)
self.move(frameGm.topLeft())
class ConsoleWindowLogHandler(logging.Handler):
def __init__(self, sigEmitter):
super(ConsoleWindowLogHandler, self).__init__()
self.sigEmitter = sigEmitter
def emit(self, logRecord):
message = str(logRecord.getMessage())
self.sigEmitter.emit(QtCore.SIGNAL("logMsg(QString)"), message)
|
CNR-Engineering/ModelerTools
|
common/qt_log_in_textbrowser.py
|
Python
|
gpl-3.0
| 788 | 0.002538 |
"""
Parser for HTML forms, that fills in defaults and errors. See ``render``.
"""
from __future__ import absolute_import
import re
from formencode.rewritingparser import RewritingParser, html_quote
import six
__all__ = ['render', 'htmlliteral', 'default_formatter',
'none_formatter', 'escape_formatter',
'FillingParser']
def render(form, defaults=None, errors=None, use_all_keys=False,
error_formatters=None, add_attributes=None,
auto_insert_errors=True, auto_error_formatter=None,
text_as_default=False, checkbox_checked_if_present=False,
listener=None, encoding=None,
error_class='error', prefix_error=True,
force_defaults=True, skip_passwords=False,
data_formencode_form=None, data_formencode_ignore=None,
):
"""
Render the ``form`` (which should be a string) given the ``defaults``
and ``errors``. Defaults are the values that go in the input fields
(overwriting any values that are there) and errors are displayed
inline in the form (and also effect input classes). Returns the
rendered string.
If ``auto_insert_errors`` is true (the default) then any errors
for which ``<form:error>`` tags can't be found will be put just
above the associated input field, or at the top of the form if no
field can be found.
If ``use_all_keys`` is true, if there are any extra fields from
defaults or errors that couldn't be used in the form it will be an
error.
``error_formatters`` is a dictionary of formatter names to
one-argument functions that format an error into HTML. Some
default formatters are provided if you don't provide this.
``error_class`` is the class added to input fields when there is
an error for that field.
``add_attributes`` is a dictionary of field names to a dictionary
of attribute name/values. If the name starts with ``+`` then the
value will be appended to any existing attribute (e.g.,
``{'+class': ' important'}``).
``auto_error_formatter`` is used to create the HTML that goes
above the fields. By default it wraps the error message in a span
and adds a ``<br>``.
If ``text_as_default`` is true (default false) then ``<input
type="unknown">`` will be treated as text inputs.
If ``checkbox_checked_if_present`` is true (default false) then
``<input type="checkbox">`` will be set to ``checked`` if any
corresponding key is found in the ``defaults`` dictionary, even
a value that evaluates to False (like an empty string). This
can be used to support pre-filling of checkboxes that do not have
a ``value`` attribute, since browsers typically will only send
the name of the checkbox in the form submission if the checkbox
is checked, so simply the presence of the key would mean the box
should be checked.
``listener`` can be an object that watches fields pass; the only
one currently is in ``htmlfill_schemabuilder.SchemaBuilder``
``encoding`` specifies an encoding to assume when mixing str and
unicode text in the template.
``prefix_error`` specifies if the HTML created by auto_error_formatter is
put before the input control (default) or after the control.
``force_defaults`` specifies if a field default is not given in
the ``defaults`` dictionary then the control associated with the
field should be set as an unsuccessful control. So checkboxes will
be cleared, radio and select controls will have no value selected,
and textareas will be emptied. This defaults to ``True``, which is
appropriate the defaults are the result of a form submission.
``skip_passwords`` specifies if password fields should be skipped when
rendering form-content. If disabled the password fields will not be filled
with anything, which is useful when you don't want to return a user's
password in plain-text source.
``data_formencode_form`` if a string is passed in (default `None`) only
fields with the html attribute `data-formencode-form` that matches this
string will be processed. For example: if a HTML fragment has two forms they
can be differentiated to Formencode by decorating the input elements with
attributes such as `data-formencode-form="a"` or `data-formencode-form="b"`,
then instructing `render()` to only process the "a" or "b" fields.
``data_formencode_ignore`` if True (default `None`) fields with the html
attribute `data-formencode-ignore` will not be processed. This attribute
need only be present in the tag: `data-formencode-ignore="1"`,
`data-formencode-ignore=""` and `data-formencode-ignore` without a value are
all valid signifiers.
"""
if defaults is None:
defaults = {}
if auto_insert_errors and auto_error_formatter is None:
auto_error_formatter = default_formatter
p = FillingParser(
defaults=defaults, errors=errors,
use_all_keys=use_all_keys,
error_formatters=error_formatters,
add_attributes=add_attributes,
auto_error_formatter=auto_error_formatter,
text_as_default=text_as_default,
checkbox_checked_if_present=checkbox_checked_if_present,
listener=listener, encoding=encoding,
prefix_error=prefix_error,
error_class=error_class,
force_defaults=force_defaults,
skip_passwords=skip_passwords,
data_formencode_form=data_formencode_form,
data_formencode_ignore=data_formencode_ignore,
)
p.feed(form)
p.close()
return p.text()
class htmlliteral(object):
def __init__(self, html, text=None):
if text is None:
text = re.sub(r'<.*?>', '', html)
text = html.replace('>', '>')
text = html.replace('<', '<')
text = html.replace('"', '"')
# @@: Not very complete
self.html = html
self.text = text
def __str__(self):
return self.text
def __repr__(self):
return '<%s html=%r text=%r>' % (
self.__class__.__name__, self.html, self.text)
def __html__(self):
return self.html
def default_formatter(error):
"""
Formatter that escapes the error, wraps the error in a span with
class ``error-message``, and adds a ``<br>``
"""
return '<span class="error-message">%s</span><br />\n' % html_quote(error)
def none_formatter(error):
"""
Formatter that does nothing, no escaping HTML, nothin'
"""
return error
def escape_formatter(error):
"""
Formatter that escapes HTML, no more.
"""
return html_quote(error)
def escapenl_formatter(error):
"""
Formatter that escapes HTML, and translates newlines to ``<br>``
"""
error = html_quote(error)
error = error.replace('\n', '<br>\n')
return error
def ignore_formatter(error):
"""
Formatter that emits nothing, regardless of the error.
"""
return ''
class FillingParser(RewritingParser):
r"""
Fills HTML with default values, as in a form.
Examples::
>>> defaults = dict(name='Bob Jones',
... occupation='Crazy Cultist',
... address='14 W. Canal\nNew Guinea',
... living='no',
... nice_guy=0)
>>> parser = FillingParser(defaults)
>>> parser.feed('''<input type="text" name="name" value="fill">
... <select name="occupation"> <option value="">Default</option>
... <option value="Crazy Cultist">Crazy cultist</option> </select>
... <textarea cols="20" style="width: 100%" name="address">
... An address</textarea>
... <input type="radio" name="living" value="yes">
... <input type="radio" name="living" value="no">
... <input type="checkbox" name="nice_guy" checked="checked">''')
>>> parser.close()
>>> print (parser.text()) # doctest: +NORMALIZE_WHITESPACE
<input type="text" name="name" value="Bob Jones">
<select name="occupation">
<option value="">Default</option>
<option value="Crazy Cultist" selected="selected">Crazy cultist</option>
</select>
<textarea cols="20" style="width: 100%" name="address">14 W. Canal
New Guinea</textarea>
<input type="radio" name="living" value="yes">
<input type="radio" name="living" value="no" checked="checked">
<input type="checkbox" name="nice_guy">
"""
default_encoding = 'utf8'
text_input_types = set("text hidden search tel url email datetime date"
" month week time datetime-local number range color".split())
def __init__(self, defaults, errors=None, use_all_keys=False,
error_formatters=None, error_class='error',
add_attributes=None, listener=None,
auto_error_formatter=None,
text_as_default=False, checkbox_checked_if_present=False,
encoding=None, prefix_error=True,
force_defaults=True, skip_passwords=False,
data_formencode_form=None, data_formencode_ignore=None,
):
RewritingParser.__init__(self)
self.source = None
self.lines = None
self.source_pos = None
self.defaults = defaults
self.in_textarea = None
self.skip_textarea = False
self.last_textarea_name = None
self.in_select = None
self.skip_next = False
self.errors = errors or {}
if isinstance(self.errors, six.string_types):
self.errors = {None: self.errors}
self.in_error = None
self.skip_error = False
self.use_all_keys = use_all_keys
self.used_keys = set()
self.used_errors = set()
if error_formatters is None:
self.error_formatters = default_formatter_dict
else:
self.error_formatters = error_formatters
self.error_class = error_class
self.add_attributes = add_attributes or {}
self.listener = listener
self.auto_error_formatter = auto_error_formatter
self.text_as_default = text_as_default
self.checkbox_checked_if_present = checkbox_checked_if_present
self.encoding = encoding
self.prefix_error = prefix_error
self.force_defaults = force_defaults
self.skip_passwords = skip_passwords
self.data_formencode_form = data_formencode_form
self.data_formencode_ignore = data_formencode_ignore
def str_compare(self, str1, str2):
"""
Compare the two objects as strings (coercing to strings if necessary).
Also uses encoding to compare the strings.
"""
if not isinstance(str1, six.string_types):
if hasattr(str1, '__unicode__'):
str1 = six.text_type(str1)
else:
str1 = str(str1)
if type(str1) == type(str2):
return str1 == str2
if isinstance(str1, six.text_type):
str1 = str1.encode(self.encoding or self.default_encoding)
else:
str2 = str2.encode(self.encoding or self.default_encoding)
return str1 == str2
def close(self):
self.handle_misc(None)
RewritingParser.close(self)
unused_errors = self.errors.copy()
for key in self.used_errors:
if key in unused_errors:
del unused_errors[key]
if self.auto_error_formatter:
for key, value in six.iteritems(unused_errors):
error_message = self.auto_error_formatter(value)
error_message = '<!-- for: %s -->\n%s' % (key, error_message)
self.insert_at_marker(
key, error_message)
unused_errors = {}
if self.use_all_keys:
unused = self.defaults.copy()
for key in self.used_keys:
if key in unused:
del unused[key]
assert not unused, (
"These keys from defaults were not used in the form: %s"
% ', '.join(unused))
if unused_errors:
error_text = ['%s: %s' % (key, self.errors[key])
for key in sorted(unused_errors)]
assert False, (
"These errors were not used in the form: %s"
% ', '.join(error_text))
if self.encoding is not None:
new_content = []
for item in self._content:
if (six.text_type is not str # Python 2
and isinstance(item, str)):
item = item.decode(self.encoding)
new_content.append(item)
self._content = new_content
self._text = self._get_text()
def skip_output(self):
return (self.in_textarea and self.skip_textarea) or self.skip_error
def add_key(self, key):
self.used_keys.add(key)
def handle_starttag(self, tag, attrs, startend=False):
self.write_pos()
if self.data_formencode_form:
for a in attrs:
if a[0] == 'data-formencode-form':
if a[1] != self.data_formencode_form:
return
if self.data_formencode_ignore:
for a in attrs:
if a[0] == 'data-formencode-ignore':
return
if tag == 'input':
self.handle_input(attrs, startend)
elif tag == 'textarea':
self.handle_textarea(attrs)
elif tag == 'select':
self.handle_select(attrs)
elif tag == 'option':
self.handle_option(attrs)
return
elif tag == 'form:error':
self.handle_error(attrs)
return
elif tag == 'form:iferror':
self.handle_iferror(attrs)
return
else:
return
if self.listener:
self.listener.listen_input(self, tag, attrs)
def handle_endtag(self, tag):
self.write_pos()
if tag == 'textarea':
self.handle_end_textarea()
elif tag == 'select':
self.handle_end_select()
elif tag == 'form:error':
self.handle_end_error()
elif tag == 'form:iferror':
self.handle_end_iferror()
def handle_startendtag(self, tag, attrs):
return self.handle_starttag(tag, attrs, True)
def handle_iferror(self, attrs):
name = self.get_attr(attrs, 'name')
assert name, (
"Name attribute in <iferror> required at %i:%i" % self.getpos())
notted = name.startswith('not ')
if notted:
name = name.split(None, 1)[1]
self.in_error = name
ok = self.errors.get(name)
if notted:
ok = not ok
if not ok:
self.skip_error = True
self.skip_next = True
def handle_end_iferror(self):
self.in_error = None
self.skip_error = False
self.skip_next = True
def handle_error(self, attrs):
name = self.get_attr(attrs, 'name')
if name is None:
name = self.in_error
assert name is not None, (
"Name attribute in <form:error> required"
" if not contained in <form:iferror> at %i:%i" % self.getpos())
formatter = self.get_attr(attrs, 'format') or 'default'
error = self.errors.get(name, '')
if error:
error = self.error_formatters[formatter](error)
self.write_text(error)
self.skip_next = True
self.used_errors.add(name)
def handle_end_error(self):
self.skip_next = True
def handle_input(self, attrs, startend):
t = (self.get_attr(attrs, 'type') or 'text').lower()
name = self.get_attr(attrs, 'name')
if self.prefix_error:
self.write_marker(name)
value = self.defaults.get(name)
if (six.text_type is not str # Python 2
and isinstance(name, six.text_type) and isinstance(value, str)):
value = value.decode(self.encoding or self.default_encoding)
if name in self.add_attributes:
for attr_name, attr_value in six.iteritems(self.add_attributes[name]):
if attr_name.startswith('+'):
attr_name = attr_name[1:]
self.set_attr(attrs, attr_name,
self.get_attr(attrs, attr_name, '') + attr_value)
else:
self.set_attr(attrs, attr_name, attr_value)
if (self.error_class
and self.errors.get(self.get_attr(attrs, 'name'))):
self.add_class(attrs, self.error_class)
if t in self.text_input_types:
if value is None and not self.force_defaults:
value = self.get_attr(attrs, 'value', '')
self.set_attr(attrs, 'value', value)
self.write_tag('input', attrs, startend)
self.skip_next = True
self.add_key(name)
elif t == 'checkbox':
if self.force_defaults:
selected = False
else:
selected = self.get_attr(attrs, 'checked')
if not self.get_attr(attrs, 'value'):
if self.checkbox_checked_if_present:
selected = name in self.defaults
else:
selected = value
elif self.selected_multiple(value,
self.get_attr(attrs, 'value', '')):
selected = True
if selected:
self.set_attr(attrs, 'checked', 'checked')
else:
self.del_attr(attrs, 'checked')
self.write_tag('input', attrs, startend)
self.skip_next = True
self.add_key(name)
elif t == 'radio':
if self.str_compare(value, self.get_attr(attrs, 'value', '')):
self.set_attr(attrs, 'checked', 'checked')
elif self.force_defaults or name in self.defaults:
self.del_attr(attrs, 'checked')
self.write_tag('input', attrs, startend)
self.skip_next = True
self.add_key(name)
elif t == 'password':
if self.skip_passwords:
return
if value is None and not self.force_defaults:
value = value or self.get_attr(attrs, 'value', '')
self.set_attr(attrs, 'value', value)
self.write_tag('input', attrs, startend)
self.skip_next = True
self.add_key(name)
elif t in ('file', 'image'):
self.write_tag('input', attrs, startend)
self.skip_next = True
self.add_key(name)
elif t in ('submit', 'reset', 'button'):
self.set_attr(attrs, 'value', value or
self.get_attr(attrs, 'value', ''))
self.write_tag('input', attrs, startend)
self.skip_next = True
self.add_key(name)
elif self.text_as_default:
if value is None:
value = self.get_attr(attrs, 'value', '')
self.set_attr(attrs, 'value', value)
self.write_tag('input', attrs, startend)
self.skip_next = True
self.add_key(name)
else:
assert False, ("I don't know about this kind of <input>:"
" %s at %i:%i" % ((t,) + self.getpos()))
if not self.prefix_error:
self.write_marker(name)
def handle_textarea(self, attrs):
name = self.get_attr(attrs, 'name')
if self.prefix_error:
self.write_marker(name)
if (self.error_class
and self.errors.get(name)):
self.add_class(attrs, self.error_class)
value = self.defaults.get(name, '')
if value or self.force_defaults:
self.write_tag('textarea', attrs)
self.write_text(html_quote(value))
self.write_text('</textarea>')
self.skip_textarea = True
self.in_textarea = True
self.last_textarea_name = name
self.add_key(name)
def handle_end_textarea(self):
if self.skip_textarea:
self.skip_textarea = False
else:
self.write_text('</textarea>')
self.in_textarea = False
self.skip_next = True
if not self.prefix_error:
self.write_marker(self.last_textarea_name)
self.last_textarea_name = None
def handle_select(self, attrs):
name = self.get_attr(attrs, 'name', False)
if name and self.prefix_error:
self.write_marker(name)
if (self.error_class
and self.errors.get(name)):
self.add_class(attrs, self.error_class)
self.in_select = self.get_attr(attrs, 'name', False)
self.write_tag('select', attrs)
self.skip_next = True
self.add_key(self.in_select)
def handle_end_select(self):
self.write_text('</select>')
self.skip_next = True
if not self.prefix_error and self.in_select:
self.write_marker(self.in_select)
self.in_select = None
def handle_option(self, attrs):
assert self.in_select is not None, (
"<option> outside of <select> at %i:%i" % self.getpos())
if self.in_select is not False:
if self.force_defaults or self.in_select in self.defaults:
if self.selected_multiple(self.defaults.get(self.in_select),
self.get_attr(attrs, 'value', '')):
self.set_attr(attrs, 'selected', 'selected')
self.add_key(self.in_select)
else:
self.del_attr(attrs, 'selected')
self.write_tag('option', attrs)
self.skip_next = True
def selected_multiple(self, obj, value):
"""
Returns true/false if obj indicates that value should be
selected. If obj has a __contains__ method it is used, otherwise
identity is used.
"""
if obj is None:
return False
if isinstance(obj, six.string_types):
return obj == value
if hasattr(obj, '__contains__'):
if value in obj:
return True
if hasattr(obj, '__iter__'):
for inner in obj:
if self.str_compare(inner, value):
return True
return self.str_compare(obj, value)
def write_marker(self, marker):
self._content.append((marker,))
def insert_at_marker(self, marker, text):
for i, item in enumerate(self._content):
if item == (marker,):
self._content.insert(i, text)
break
else:
self._content.insert(0, text)
# This can potentially be extended globally
default_formatter_dict = dict(
default=default_formatter,
none=none_formatter,
escape=escape_formatter,
escapenl=escapenl_formatter,
ignore=ignore_formatter)
|
formencode/formencode
|
src/formencode/htmlfill.py
|
Python
|
mit
| 23,202 | 0.000431 |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
# import codecs
# import json
# class stokeScrapyPipeline(object):
# def __init__(self):
# self.file=codecs.open("stokeScrapy.json",mode="wb",encoding='utf-8')
# self.file.write('{"hah"'+':[')
import pymongo
from scrapy.conf import settings
from scrapy.exceptions import DropItem
from scrapy import log
#MongoDBPipeline
class MongoDBPipeline( object ):
def __init__( self ):
connection = pymongo.MongoClient(
settings[ 'MONGODB_SERVER' ],
settings[ 'MONGODB_PORT' ]
)
db = connection[settings[ 'MONGODB_DB' ]]
self .collection = db[settings[ 'MONGODB_COLLECTION' ]]
def process_item( self , item, spider):
valid = True
for data in item:
if not data:
valid = False
raise DropItem( "Missing {0}!" . format (data))
if valid:
self .collection.insert( dict (item))
log.msg( "Stoke added to MongoDB database!" ,
level = log.DEBUG, spider = spider)
return item
# def process_item(self, item, spider):
# line = json.dumps(dict(item))+","
# self.file.write(line.decode("unicode_escape"))
# return item
|
disappearedgod/stokeScrapy
|
stokeScrapy/pipelines.py
|
Python
|
gpl-2.0
| 1,353 | 0.031042 |
# Copyright 2017 BrainPad Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import json
import numpy
class AdjustForPictureToRobot(object):
def __init__(self):
in_data_json = os.path.join(os.path.dirname(__file__), 'adjust_data.json')
if not os.path.isfile(in_data_json):
self.make_adjust_data()
try:
in_data = open(in_data_json).read()
except Exception as e:
raise e
self.adjust_data = json.loads(in_data)
def make_adjust_data(self):
try:
in_meas_json = os.path.join(os.path.dirname(__file__), 'adjust_measurement.json')
in_data = open(in_meas_json).read()
except Exception as e:
raise e
measurement = json.loads(in_data)
from_point = numpy.array([[-0.3, 1.5, 1],
[-0.3, -1.5, 1],
[0.3, 0, 1]])
to_point = numpy.array([[measurement['-0.3,1.5']['x'], measurement['-0.3,1.5']['y'], 1],
[measurement['-0.3,-1.5']['x'], measurement['-0.3,-1.5']['y'], 1],
[measurement['0.3,0']['x'], measurement['0.3,0']['y'], 1]])
inv_to_point = numpy.linalg.inv(to_point.T)
trans = numpy.dot(from_point.T, inv_to_point)
out_data = {}
for key, value in sorted(measurement.items()):
x_in, y_in = key.split(',')
x_picture = float(x_in)
y_picture = float(y_in)
new_key = '%s,%s' % (round(x_picture, 1), round(y_picture, 1))
if value:
x_robot = value['x']
y_robot = value['y']
temp_point = numpy.dot(numpy.array([x_robot, y_robot, 1]), trans.T)
x_picture_conv = float(temp_point[0])
y_picture_conv = float(temp_point[1])
x_picture_diff = float(x_picture - x_picture_conv)
y_picture_diff = float(y_picture - y_picture_conv)
out_data.update({new_key: {'x_picture': x_picture,
'x_picture_conv': x_picture_conv,
'x_picture_diff': x_picture_diff,
'x_robot': x_robot,
'y_picture': y_picture,
'y_picture_conv': y_picture_conv,
'y_picture_diff': y_picture_diff,
'y_robot': y_robot}})
else:
out_data.update({new_key: None})
try:
out_data_json = os.path.join(os.path.dirname(__file__), 'adjust_data.json')
f = open(out_data_json, 'w')
f.write(json.dumps(out_data, sort_keys=True, indent=4))
f.close()
except Exception as e:
raise e
def adjust(self, x, y):
if -1 <= x <= 1 and -1.5 <= y <= 1.5:
pass
else:
message = "Error: x=%s y=%s coordinate is out of range in sheet." % (x, y)
raise Exception(message)
x_round = round(x, 1)
y_round = round(y, 1)
if x_round == -0.0:
x_round = 0.0
if y_round == -0.0:
y_round = 0.0
key = '%s,%s' % (x_round, y_round)
try:
self.adjust_data[key]
except:
message = "Error: x=%s y=%s coordinate is out of range in robot arm." % (x_round, y_round)
raise Exception(message)
x_diff = self.adjust_data[key]['x_picture_diff']
y_diff = self.adjust_data[key]['y_picture_diff']
x_adjust = x - x_diff
y_adjust = y - y_diff
return x_adjust, y_adjust
|
BrainPad/FindYourCandy
|
robot-arm/calibration/adjust.py
|
Python
|
apache-2.0
| 4,494 | 0.00267 |
# -*- coding: utf-8 -*-
from django import forms
from django.core.validators import RegexValidator
from ...models import EighthBlock
block_letter_validator = RegexValidator(r"^[a-z A-Z0-9_-]{1,10}$",
"A block letter must be less than 10 characters long, and include only alphanumeric characters and spaces.")
class BlockDisplayField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return "{}: {}".format(obj.id, str(obj))
class BlockSelectionForm(forms.Form):
def __init__(self, label="Block", exclude_before_date=None, only_locked=False, *args, **kwargs):
super(BlockSelectionForm, self).__init__(*args, **kwargs)
filter_params = {}
if exclude_before_date is not None:
filter_params["date__gte"] = exclude_before_date
if only_locked:
filter_params["locked"] = True
queryset = EighthBlock.objects.filter(**filter_params)
self.fields["block"] = BlockDisplayField(queryset=queryset, label=label, empty_label="Select a block")
class QuickBlockForm(forms.ModelForm):
block_letter = forms.CharField(max_length=10, validators=[block_letter_validator])
class Meta:
model = EighthBlock
fields = ["date", "block_letter"]
class BlockForm(forms.ModelForm):
block_letter = forms.CharField(max_length=10, validators=[block_letter_validator])
class Meta:
model = EighthBlock
fields = [
"date",
"block_letter",
"locked",
# "override_blocks",
"signup_time",
"comments"
]
|
jacobajit/ion
|
intranet/apps/eighth/forms/admin/blocks.py
|
Python
|
gpl-2.0
| 1,644 | 0.003041 |
# bgscan tests
# Copyright (c) 2014, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import time
import logging
logger = logging.getLogger()
import os
import hostapd
def test_bgscan_simple(dev, apdev):
"""bgscan_simple"""
hostapd.add_ap(apdev[0]['ifname'], { "ssid": "bgscan" })
hostapd.add_ap(apdev[1]['ifname'], { "ssid": "bgscan" })
dev[0].connect("bgscan", key_mgmt="NONE", scan_freq="2412",
bgscan="simple:1:-20:2")
dev[1].connect("bgscan", key_mgmt="NONE", scan_freq="2412",
bgscan="simple:1:-45:2")
dev[2].connect("bgscan", key_mgmt="NONE", scan_freq="2412",
bgscan="simple:1:-45")
dev[2].request("REMOVE_NETWORK all")
dev[2].wait_disconnected()
dev[2].connect("bgscan", key_mgmt="NONE", scan_freq="2412",
bgscan="simple:0:0")
dev[2].request("REMOVE_NETWORK all")
dev[2].wait_disconnected()
dev[2].connect("bgscan", key_mgmt="NONE", scan_freq="2412",
bgscan="simple")
dev[2].request("REMOVE_NETWORK all")
dev[2].wait_disconnected()
dev[2].connect("bgscan", key_mgmt="NONE", scan_freq="2412",
bgscan="simple:1")
dev[2].request("REMOVE_NETWORK all")
dev[2].wait_disconnected()
ev = dev[0].wait_event(["CTRL-EVENT-SIGNAL-CHANGE"], timeout=10)
if ev is None:
raise Exception("dev0 did not indicate signal change event")
if "above=0" not in ev:
raise Exception("Unexpected signal change event contents from dev0: " + ev)
ev = dev[1].wait_event(["CTRL-EVENT-SIGNAL-CHANGE"], timeout=10)
if ev is None:
raise Exception("dev1 did not indicate signal change event")
if "above=1" not in ev:
raise Exception("Unexpected signal change event contents from dev1: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=3)
if ev is None:
raise Exception("dev0 did not start a scan")
ev = dev[1].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=3)
if ev is None:
raise Exception("dev1 did not start a scan")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], 5)
if ev is None:
raise Exception("dev0 did not complete a scan")
ev = dev[1].wait_event(["CTRL-EVENT-SCAN-RESULTS"], 5)
if ev is None:
raise Exception("dev1 did not complete a scan")
def test_bgscan_learn(dev, apdev):
"""bgscan_learn"""
hostapd.add_ap(apdev[0]['ifname'], { "ssid": "bgscan" })
hostapd.add_ap(apdev[1]['ifname'], { "ssid": "bgscan" })
try:
os.remove("/tmp/test_bgscan_learn.bgscan")
except:
pass
try:
dev[0].connect("bgscan", key_mgmt="NONE", scan_freq="2412",
bgscan="learn:1:-20:2")
id = dev[1].connect("bgscan", key_mgmt="NONE", scan_freq="2412",
bgscan="learn:1:-45:2:/tmp/test_bgscan_learn.bgscan")
dev[2].connect("bgscan", key_mgmt="NONE", scan_freq="2412",
bgscan="learn:1:-45")
dev[2].request("REMOVE_NETWORK all")
dev[2].wait_disconnected()
dev[2].connect("bgscan", key_mgmt="NONE", scan_freq="2412",
bgscan="learn:0:0")
dev[2].request("REMOVE_NETWORK all")
dev[2].wait_disconnected()
dev[2].connect("bgscan", key_mgmt="NONE", scan_freq="2412",
bgscan="learn")
dev[2].request("REMOVE_NETWORK all")
dev[2].wait_disconnected()
dev[2].connect("bgscan", key_mgmt="NONE", scan_freq="2412",
bgscan="learn:1")
dev[2].request("REMOVE_NETWORK all")
dev[2].wait_disconnected()
ev = dev[0].wait_event(["CTRL-EVENT-SIGNAL-CHANGE"], timeout=10)
if ev is None:
raise Exception("dev0 did not indicate signal change event")
if "above=0" not in ev:
raise Exception("Unexpected signal change event contents from dev0: " + ev)
ev = dev[1].wait_event(["CTRL-EVENT-SIGNAL-CHANGE"], timeout=10)
if ev is None:
raise Exception("dev1 did not indicate signal change event")
if "above=1" not in ev:
raise Exception("Unexpected signal change event contents from dev1: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=3)
if ev is None:
raise Exception("dev0 did not start a scan")
ev = dev[1].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=3)
if ev is None:
raise Exception("dev1 did not start a scan")
ev = dev[0].wait_event(["CTRL-EVENT-SCAN-RESULTS"], 5)
if ev is None:
raise Exception("dev0 did not complete a scan")
ev = dev[1].wait_event(["CTRL-EVENT-SCAN-RESULTS"], 5)
if ev is None:
raise Exception("dev1 did not complete a scan")
dev[0].request("DISCONNECT")
dev[1].request("DISCONNECT")
dev[0].request("REMOVE_NETWORK all")
with open("/tmp/test_bgscan_learn.bgscan", "r") as f:
lines = f.read().splitlines()
if lines[0] != "wpa_supplicant-bgscan-learn":
raise Exception("Unexpected bgscan header line")
if 'BSS 02:00:00:00:03:00 2412' not in lines:
raise Exception("Missing BSS1")
if 'BSS 02:00:00:00:04:00 2412' not in lines:
raise Exception("Missing BSS2")
if 'NEIGHBOR 02:00:00:00:03:00 02:00:00:00:04:00' not in lines:
raise Exception("Missing BSS1->BSS2 neighbor entry")
if 'NEIGHBOR 02:00:00:00:04:00 02:00:00:00:03:00' not in lines:
raise Exception("Missing BSS2->BSS1 neighbor entry")
dev[1].set_network(id, "scan_freq", "")
dev[1].connect_network(id)
ev = dev[1].wait_event(["CTRL-EVENT-SCAN-STARTED"], timeout=10)
if ev is None:
raise Exception("dev1 did not start a scan")
ev = dev[1].wait_event(["CTRL-EVENT-SCAN-RESULTS"], 10)
if ev is None:
raise Exception("dev1 did not complete a scan")
dev[1].request("REMOVE_NETWORK all")
finally:
try:
os.remove("/tmp/test_bgscan_learn.bgscan")
except:
pass
|
wangybgit/Chameleon
|
hostapd-OpenWrt/tests/hwsim/test_bgscan.py
|
Python
|
apache-2.0
| 6,284 | 0.003024 |
"""
WSGI config for lot project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
application = get_wsgi_application()
|
Ecotrust/COMPASS
|
mp/wsgi.py
|
Python
|
apache-2.0
| 378 | 0 |
"""
Django settings for systematic_review project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')
TEMPLATE_DIRS = (
TEMPLATE_PATH,
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
)
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin
'django.contrib.auth.backends.ModelBackend',
)
SITE_ID = 1
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'wad2.systematicreview@gmail.com'
EMAIL_HOST_PASSWORD = 'wed46-sysrev'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_+ntbtxcjxj5u)av$wt4q!lsad58v-7x_%zb1lc9f*$_#=p^f%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['iliawnek.pythonanywhere.com', '127.0.0.1', 'localhost']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.formtools',
'sysrev',
'registration',
'bootstrapform'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'systematic_review.urls'
WSGI_APPLICATION = 'systematic_review.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
# USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_PATH = os.path.join(BASE_DIR, 'static')
STATIC_ROOT = os.path.join(BASE_DIR, 'static-root')
STATIC_URL = '/static/' # You may find this is already defined as such.
STATICFILES_DIRS = (
STATIC_PATH,
)
# Registration
REGISTRATION_OPEN = True
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/accounts/login/'
|
iliawnek/SystematicReview
|
systematic_review/settings.py
|
Python
|
mit
| 2,999 | 0 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RSpeedglm(RPackage):
"""Fitting linear models and generalized linear models to
large data sets by updating algorithms."""
homepage = "https://cran.r-project.org/package=speedglm"
url = "https://cran.rstudio.com/src/contrib/speedglm_0.3-2.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/speedglm"
version('0.3-2', 'c4874d4c2a677d657a335186ebb63131')
depends_on('r-mass', type=('build', 'run'))
depends_on('r-matrix', type=('build', 'run'))
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-speedglm/package.py
|
Python
|
lgpl-2.1
| 1,759 | 0.000569 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 Douglas S. Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# $Id$
register(TOOL,
id = 'SetAttribute',
name = _("Set Attribute"),
description = _("Set an attribute to a given value."),
version = '0.0.32',
gramps_target_version = "5.1",
status = STABLE, # not yet tested with python 3
fname = 'SetAttributeTool.py',
authors = ["Douglas S. Blank"],
authors_email = ["doug.blank@gmail.com"],
category = TOOL_DBPROC,
toolclass = 'SetAttributeWindow',
optionclass = 'SetAttributeOptions',
tool_modes = [TOOL_MODE_GUI],
)
|
gramps-project/addons-source
|
SetAttributeTool/SetAttributeTool.gpr.py
|
Python
|
gpl-2.0
| 1,411 | 0.02197 |
import signal
import weakref
from functools import wraps
__unittest = True
class _InterruptHandler(object):
def __init__(self, default_handler):
self.called = False
self.original_handler = default_handler
if isinstance(default_handler, (int, long)):
if default_handler == signal.SIG_DFL:
# Pretend it's signal.default_int_handler instead.
default_handler = signal.default_int_handler
elif default_handler == signal.SIG_IGN:
# Not quite the same thing as SIG_IGN, but the closest we
# can make it: do nothing.
def default_handler(unused_signum, unused_frame):
pass
else:
raise TypeError("expected SIGINT signal handler to be "
"signal.SIG_IGN, signal.SIG_DFL, or a "
"callable object")
self.default_handler = default_handler
def __call__(self, signum, frame):
installed_handler = signal.getsignal(signal.SIGINT)
if installed_handler is not self:
# if we aren't the installed handler, then delegate immediately
# to the default handler
self.default_handler(signum, frame)
if self.called:
self.default_handler(signum, frame)
self.called = True
for result in _results.keys():
result.stop()
_results = weakref.WeakKeyDictionary()
def registerResult(result):
_results[result] = 1
def removeResult(result):
return bool(_results.pop(result, None))
_interrupt_handler = None
def installHandler():
global _interrupt_handler
if _interrupt_handler is None:
default_handler = signal.getsignal(signal.SIGINT)
_interrupt_handler = _InterruptHandler(default_handler)
signal.signal(signal.SIGINT, _interrupt_handler)
def removeHandler(method=None):
if method is not None:
@wraps(method)
def inner(*args, **kwargs):
initial = signal.getsignal(signal.SIGINT)
removeHandler()
try:
return method(*args, **kwargs)
finally:
signal.signal(signal.SIGINT, initial)
return inner
global _interrupt_handler
if _interrupt_handler is not None:
signal.signal(signal.SIGINT, _interrupt_handler.original_handler)
|
HiSPARC/station-software
|
user/python/Lib/unittest/signals.py
|
Python
|
gpl-3.0
| 2,411 | 0.002074 |
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see htt"://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
#
import copy
import inspect
import numpy as np
import ray.tune as tune
import torch
from .base import DEFAULT
"""
An initial set of good parameters for sparse networks, i.e. Release 1.
The sparse 100 configuration here gets between 80.5 to 82% in top-1 accuracy, after 60
epochs and has about 77.5% average weight sparsity.
The sparse 1000 configuration, with the same weight sparsities, gets about 72.23% in
top-1 accuracy after 120 epochs.
"""
def my_auto_sparse_conv_params(in_channels, out_channels, kernel_size):
"""
Custom weight params.
:return: a dict to pass to `SparseWeights2d`
"""
weights_per_channel = kernel_size * kernel_size * in_channels
if weights_per_channel < 100:
weights_density = 0.7
elif weights_per_channel < 200:
weights_density = 0.5
elif weights_per_channel < 500:
weights_density = 0.4
elif weights_per_channel < 1000:
weights_density = 0.3
elif weights_per_channel < 2000:
weights_density = 0.2
elif weights_per_channel < 4000:
weights_density = 0.2
else:
weights_density = 0.15
return dict(
weight_sparsity=weights_density,
)
def my_auto_sparse_activation_params(in_channels, out_channels, kernel_size):
"""
A custom auto sparse params function.
:return: a dict to pass to `KWinners2d` as params.
"""
percent_on = 1.0
if kernel_size != 1:
if out_channels >= 128:
percent_on = 0.3
if percent_on >= 1.0:
return None
else:
return dict(
percent_on=percent_on,
boost_strength=1.0,
boost_strength_factor=0.9,
local=True,
k_inference_factor=1.0,
)
def my_auto_sparse_linear_params(input_size, output_size):
"""
Custom weight params.
:return: a dict to pass to `SparseWeights`
"""
return dict(
weight_sparsity=0.25,
)
# This configuration gets between 80.5 to 82% after 60 epochs
SPARSE100_R1 = copy.deepcopy(DEFAULT)
SPARSE100_R1.update(dict(
# No weight decay from batch norm modules
batch_norm_weight_decay=False,
init_batch_norm=True,
epochs=60,
checkpoint_freq=1,
keep_checkpoints_num=2,
checkpoint_score_attr="training_iteration",
checkpoint_at_end=True,
seed=tune.sample_from(lambda spec: np.random.randint(2, 10000)),
num_classes=100,
model_args=dict(config=dict(
num_classes=100,
defaults_sparse=True,
activation_params_func=my_auto_sparse_activation_params,
conv_params_func=my_auto_sparse_conv_params,
linear_params_func=my_auto_sparse_linear_params
)),
# Use a higher learning rate and no momentum for sparse superconvergence
lr_scheduler_class=torch.optim.lr_scheduler.OneCycleLR,
lr_scheduler_args=dict(
max_lr=6.0,
div_factor=6, # initial_lr = 1.0
final_div_factor=4000, # min_lr = 0.00025
pct_start=5.0 / 60.0,
epochs=60,
anneal_strategy="linear",
max_momentum=0.01,
cycle_momentum=False,
),
optimizer_args=dict(
lr=0.1,
weight_decay=0.0001,
momentum=0.0,
nesterov=False,
),
weight_params=inspect.getsource(my_auto_sparse_conv_params),
activation_params=inspect.getsource(my_auto_sparse_activation_params),
linear_params=inspect.getsource(my_auto_sparse_linear_params),
))
# Try much longer number of epochs that 100 category version and a lower
# weight decay. (see https://arxiv.org/abs/1711.04291)
# With 120 epochs this gets 72.23% top-1 accuracy. Earlier version got
# about 73% with 200 epochs.
SPARSE1000_R1 = copy.deepcopy(SPARSE100_R1)
SPARSE1000_R1.update(dict(
num_classes=1000,
epochs=120,
model_args=dict(config=dict(
num_classes=1000,
defaults_sparse=True,
activation_params_func=my_auto_sparse_activation_params,
conv_params_func=my_auto_sparse_conv_params,
linear_params_func=my_auto_sparse_linear_params
)),
lr_scheduler_args=dict(
max_lr=6.0,
div_factor=6, # initial_lr = 1.0
final_div_factor=4000, # min_lr = 0.00025
pct_start=5.0 / 120.0,
epochs=120,
anneal_strategy="linear",
max_momentum=0.01,
cycle_momentum=False,
),
optimizer_args=dict(
lr=0.1,
weight_decay=0.00005,
momentum=0.0,
nesterov=False,
),
))
CONFIGS = dict(
sparse_100_r1=SPARSE100_R1,
sparse_1000_r1=SPARSE1000_R1,
)
|
numenta/nupic.research
|
projects/imagenet/experiments/sparse_r1.py
|
Python
|
agpl-3.0
| 5,411 | 0.00037 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import multiprocessing
import gunicorn.app.base
from gunicorn.six import iteritems
def number_of_workers():
return (multiprocessing.cpu_count() * 2) + 1
class StandaloneApplication(gunicorn.app.base.BaseApplication):
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super(StandaloneApplication, self).__init__()
def load_config(self):
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
for key, value in iteritems(config):
self.cfg.set(key.lower(), value)
def load(self):
return self.application
if __name__ == '__main__':
from .wsgi import create_app
options = {
'bind': '%s:%s' % ('127.0.0.1', '8081'),
'workers': number_of_workers(),
}
StandaloneApplication(create_app, options).run()
|
radical-software/mongrey
|
mongrey/web/worker.py
|
Python
|
bsd-3-clause
| 1,033 | 0.003872 |
# flake8: noqa
from . import html, richtext, snippet
try:
import requests
except ImportError: # pragma: no cover
pass
else:
from . import external
try:
import imagefield
except ImportError: # pragma: no cover
pass
else:
from . import image
|
matthiask/feincms3
|
feincms3/plugins/__init__.py
|
Python
|
bsd-3-clause
| 269 | 0 |
#!/usr/bin/env python
"""
Match input spectrum to ID lines
"""
from __future__ import (print_function, absolute_import, division, unicode_literals)
import pdb
try: # Python 3
ustr = unicode
except NameError:
ustr = str
def parser(options=None):
import argparse
# Parse
parser = argparse.ArgumentParser(
description='Match input spectrum to arclines line lists')
parser.add_argument("spectrum", type=str, help="Spectrum file (.ascii, .fits, .json)")
parser.add_argument("wvcen", type=float, help="Guess at central wavelength (within 1000A)")
parser.add_argument("disp", type=float, help="Accurate dispersion (Ang/pix)")
parser.add_argument("lines", type=str, help="Comma separated list of lamps")
parser.add_argument("--outroot", type=str, help="Root filename for plot, IDs")
parser.add_argument("--min_ampl", default=100., type=float, help="Minimum amplitude for line analysis [default: 100.]")
parser.add_argument("--debug", default=False, action='store_true', help="Debug")
parser.add_argument("--fit", default=False, action='store_true', help="Fit the lines?")
parser.add_argument("--brute", default=False, action='store_true', help="Use semi_brute?")
parser.add_argument("--show_spec", default=False, action='store_true', help="Show the input spectrum?")
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(pargs=None):
""" Run
Parameters
----------
args
Returns
-------
"""
import numpy as np
from matplotlib import pyplot as plt
from linetools import utils as ltu
from arclines import io as arcl_io
from arclines.holy import utils as arch_utils
from arclines.holy.grail import general, semi_brute
from arclines.holy import patterns as arch_patt
from arclines.holy import fitting as arch_fit
if pargs.outroot is None:
pargs.outroot = 'tmp_matches'
# Defaults
# Load spectrum
spec = arcl_io.load_spectrum(pargs.spectrum)
if pargs.show_spec:
plt.clf()
ax = plt.gca()
ax.plot(spec)
plt.show()
# Arc lines
lines = pargs.lines.split(',')
# Call brute
if pargs.brute:
best_dict, final_fit = semi_brute(spec, lines, pargs.wvcen, pargs.disp, min_ampl=pargs.min_ampl,
debug=pargs.debug, outroot=pargs.outroot, do_fit=pargs.fit,
verbose=True)
#best_dict, final_fit = grail.semi_brute(spec, lines, wv_cen, disp, siglev=siglev,
# min_ampl=min_ampl, min_nmatch=min_match, outroot=outroot)
else:
best_dict, final_fit = general(spec, lines, do_fit=pargs.fit, verbose=True, debug=pargs.debug,
min_ampl=pargs.min_ampl, outroot=pargs.outroot)
if pargs.debug:
pdb.set_trace()
if pargs.fit:
ltu.savejson(pargs.outroot+'_fit.json', ltu.jsonify(final_fit), easy_to_read=True, overwrite=True)
|
PYPIT/arclines
|
arclines/scripts/match.py
|
Python
|
bsd-3-clause
| 3,106 | 0.007727 |
#!/usrbin/python
#encoding:utf-8
'''
Author: wangxu
Email: wangxu@oneniceapp.com
任务更新
'''
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import logging
import tornado.web
import json
import os
import time
CURRENTPATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(CURRENTPATH, '../../'))
from job_define import Job
#指标处理类
class JobUpdateHandler(tornado.web.RequestHandler):
#统一调用post方法
def get(self):
self.post()
#action为操作类型
def post(self):
#更新完跳转到列表页
title = '调度任务列表'
#需要先从azkaban登陆
session_id = self.get_argument('session_id','')
login_user = self.get_argument('login_user','')
if session_id=='' or login_user=='':
self.render('to_login.html')
return
#参数
query_name = self.get_argument('query_name','')
query_project_name = self.get_argument('query_project_name','')
query_server_host = self.get_argument('query_server_host','')
query_user = self.get_argument('query_user','')
#生成job
attr_list = Job.get_attr_list()
dependencies_box = self.get_argument('dependencies_box','')
logging.info('>>>>>>>>>>>'+str(type(dependencies_box)))
logging.info('>>>>>>>>>>>'+str(dependencies_box))
job = Job()
#动态加载字段,默认均为字符串
for attr in attr_list:
value = str(self.get_argument(attr,'')).strip()
if value!='':
setattr(job,attr,value)
logging.info(attr+':'+value)
#默认设置
job.name = job.name.replace('.','-')
job.updater = login_user
job.update_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
if job.creator == '':
job.creator = job.updater
job.create_time = job.update_time
#更新
job.update_job()
#列表
jobs = Job.get_alljobs(query_name,query_project_name,query_server_host,query_user,login_user)
query_dict = {
'query_name':query_name,
'query_project_name':query_project_name,
'query_server_host':query_server_host,
'query_user':query_user,
'session_id':session_id,
'login_user':login_user
}
logging.info('[%s] update job [%s]' % (login_user,job.name))
self.render('list.html',title=title,jobs=jobs,query_dict=query_dict)
|
cocofree/azkaban_assistant
|
schedule/webapp/handler/job_update.py
|
Python
|
apache-2.0
| 2,583 | 0.021624 |
#from distutils.core import setup
from setuptools import setup, find_packages
# http://guide.python-distribute.org/quickstart.html
# python setup.py sdist
# python setup.py register
# python setup.py sdist upload
# pip install epub_meta
# pip install epub_meta --upgrade --no-deps
# Manual upload to PypI
# http://pypi.python.org/pypi/epub_meta
# Go to 'edit' link
# Update version and save
# Go to 'files' link and upload the file
VERSION = '0.0.7'
tests_require = [
]
install_requires = [
]
# from pip.req import parse_requirements
# install_requires = parse_requirements('requirements.txt')
# tests_require = parse_requirements('requirements-dev.txt')
setup(name='epub_meta',
url='https://github.com/paulocheque/epub-meta',
author='Pluralsight',
keywords='python epub metadata',
description='',
license='MIT',
classifiers=[
'Operating System :: OS Independent',
'Topic :: Software Development',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: PyPy',
],
version=VERSION,
install_requires=install_requires,
test_suite='tests',
tests_require=tests_require,
extras_require={'test': tests_require},
packages=find_packages(),
)
|
paulocheque/epub-meta
|
setup.py
|
Python
|
agpl-3.0
| 1,554 | 0.001931 |
#!/usr/bin/env python
#
# Problem definition:
# A-R Hedar and M Fukushima, "Derivative-Free Filter Simulated Annealing
# Method for Constrained Continuous Global Optimization", Journal of
# Global Optimization, 35(4), 521-549 (2006).
#
# Original Matlab code written by A. Hedar (Nov. 23, 2005)
# http://www-optima.amp.i.kyoto-u.ac.jp/member/student/hedar/Hedar_files/go.htm
# and ported to Python by Mike McKerns (December 2014)
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
def objective(x):
x0,x1,x2,x3,x4,x5,x6 = x
return (x0-10)**2 + 5*(x1-12)**2 + x2**4 + 3*(x3-11)**2 + \
10*x4**6 + 7*x5**2 + x6**4 - 4*x5*x6 - 10*x5 - 8*x6
bounds = [(-10.,10.)]*7
# with penalty='penalty' applied, solution is:
xs = [2.330499, 1.951372, -0.4775414, 4.365726, -0.6244870, 1.038131, 1.594227]
ys = 680.6300573
from mystic.symbolic import generate_constraint, generate_solvers, solve
from mystic.symbolic import generate_penalty, generate_conditions
equations = """
2*x0**2 + 3*x1**4 + x2 + 4*x3**2 + 5*x4 - 127.0 <= 0.0
7*x0 + 3*x1 + 10*x2**2 + x3 - x4 - 282.0 <= 0.0
23*x0 + x1**2 + 6*x5**2 - 8*x6 - 196.0 <= 0.0
4*x0**2 + x1**2 - 3*x0*x1 + 2*x2**2 + 5*x5 - 11*x6 <= 0.0
"""
#cf = generate_constraint(generate_solvers(solve(equations))) #XXX: inequalities
pf = generate_penalty(generate_conditions(equations), k=1e12)
from mystic.constraints import as_constraint
cf = as_constraint(pf)
if __name__ == '__main__':
from mystic.solvers import diffev2
from mystic.math import almostEqual
result = diffev2(objective, x0=bounds, bounds=bounds, penalty=pf, npop=40, gtol=200, disp=False, full_output=True)
assert almostEqual(result[0], xs, rel=1e-2)
assert almostEqual(result[1], ys, rel=1e-2)
# EOF
|
jcfr/mystic
|
examples2/g09.py
|
Python
|
bsd-3-clause
| 1,961 | 0.009179 |
#!/usr/bin/env python
import os
from setuptools import setup
setup(name='pymarkdown',
version='0.1.4',
description='Evaluate code in markdown',
url='http://github.com/mrocklin/pymarkdown',
author='Matthew Rocklin',
author_email='mrocklin@gmail.com',
license='BSD',
keywords='markdown documentation',
packages=['pymarkdown'],
install_requires=['toolz'],
long_description=(open('README.rst').read() if os.path.exists('README.rst')
else ''),
zip_safe=False,
scripts=[os.path.join('bin', 'pymarkdown')])
|
leosartaj/pymarkdown
|
setup.py
|
Python
|
bsd-3-clause
| 599 | 0.001669 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright: (2013-2017) Michael Till Beck <Debianguru@gmx.de>
# License: GPL-2.0+
import http.server
import socketserver
import importlib
import sys
import getopt
bind = 'localhost'
port = 8000
configMod = 'config'
try:
opts, args = getopt.getopt(sys.argv[1:], 'hc:b:p:', ['help', 'config=', 'bind=', 'port='])
except getopt.GetoptError:
print('Usage: FeedServer.py --config=config --port=8000 --bind=localhost')
sys.exit(1)
for opt, arg in opts:
if opt == '-h':
print('Usage: FeedServer.py --config=config --bind=localhost --port=8000')
exit()
elif opt in ('-c', '--config'):
configMod = arg
elif opt in ('-b', '--bind'):
bind = arg
elif opt in ('-p', '--port'):
port = int(arg)
config = importlib.import_module(configMod)
handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer((bind, port), handler)
print('Bond to ' + bind + ', listening on port ' + str(port))
httpd.serve_forever()
|
mtill/MailWebsiteChanges
|
mwcfeedserver.py
|
Python
|
gpl-2.0
| 1,039 | 0.002887 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# TerminalRoastDB, released under GPLv3
# Roaster Set Time
import Pyro4
import sys
new_roaster_time = sys.argv[1]
roast_control = Pyro4.Proxy("PYRONAME:roaster.sr700")
if int(new_roaster_time) > 0 and int(new_roaster_time) <1200:
roast_control.set_time(new_roaster_time)
|
infinigrove/TerminalRoastDB
|
cmds/Roaster_Set_Time.py
|
Python
|
gpl-3.0
| 324 | 0.003086 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wiki', '0001_initial'),
('userstories', '0009_remove_userstory_is_archived'),
('issues', '0005_auto_20150623_1923'),
('tasks', '0006_auto_20150623_1923'),
]
operations = [
migrations.RunSQL(
"""
CREATE INDEX "userstories_full_text_idx" ON userstories_userstory USING gin(to_tsvector('simple', coalesce(subject, '') || ' ' || coalesce(ref) || ' ' || coalesce(description, '')));
""",
reverse_sql="""DROP INDEX IF EXISTS "userstories_full_text_idx";"""
),
migrations.RunSQL(
"""
CREATE INDEX "tasks_full_text_idx" ON tasks_task USING gin(to_tsvector('simple', coalesce(subject, '') || ' ' || coalesce(ref) || ' ' || coalesce(description, '')));
""",
reverse_sql="""DROP INDEX IF EXISTS "tasks_full_text_idx";"""
),
migrations.RunSQL(
"""
CREATE INDEX "issues_full_text_idx" ON issues_issue USING gin(to_tsvector('simple', coalesce(subject, '') || ' ' || coalesce(ref) || ' ' || coalesce(description, '')));
""",
reverse_sql="""DROP INDEX IF EXISTS "issues_full_text_idx";"""
),
migrations.RunSQL(
"""
CREATE INDEX "wikipages_full_text_idx" ON wiki_wikipage USING gin(to_tsvector('simple', coalesce(slug, '') || ' ' || coalesce(content, '')));
""",
reverse_sql="""DROP INDEX IF EXISTS "wikipages_full_text_idx";"""
),
]
|
coopsource/taiga-back
|
taiga/searches/migrations/0001_initial.py
|
Python
|
agpl-3.0
| 1,689 | 0.002368 |
import os
import json
from urlparse import urlparse
from pymongo import uri_parser
def get_private_key():
with open('mistt-solution-d728e8f21f47.json') as f:
return json.loads(f.read()).items()
# Flask
CSRF_SESSION_KEY = os.getenv('FLASK_SESSION_KEY', 'notsecret')
SECRET_KEY = os.getenv('FLASK_SECRET_KEY', 'notsecret')
CSRF_ENABLED = True
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
ADMIN_EMAIL = os.getenv('ADMIN_EMAIL', 'notsecret')
ADMIN_USERNAME = os.getenv('ADMIN_USERNAME', 'notsecret')
ADMIN_PASSWORD = os.getenv('ADMIN_PASSWORD', 'notsecret')
SECURITY_PASSWORD_SALT = os.getenv('SECURITY_PASSWORD_SALT', 'notsecret')
# postgres
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_MIGRATE_REPO = os.path.join(BASE_DIR, 'db')
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'postgres://localhost:5432/mistt')
# dumps
PAGE_DUMP_PATH = os.getenv('PAGE_DUMP_PATH', 'page_dump_110116')
PATH_TO_CASE_EXPORTS = os.getenv('PATH_TO_CASE_EXPORTS')
# Drupal
DRUPAL_URL = os.getenv('DRUPAL_URL')
DRUPAL_LOGIN = os.getenv('DRUPAL_LOGIN')
DRUPAL_PASSWORD = os.getenv('DRUPAL_PASSWORD')
DRUPAL_USERNAME = os.getenv('DRUPAL_USERNAME')
# flask-mail settings
MAIL_SERVER = os.getenv('MAIL_SERVER')
MAIL_PORT=465
MAIL_USE_TLS=False
MAIL_USE_SSL=True
MAIL_USERNAME = os.getenv('MAIL_USERNAME')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD')
MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER')
# cases
MONGODB_URI = os.getenv('MONGODB_URI')
uri = uri_parser.parse_uri(MONGODB_URI)
MONGODB_DATABASE = uri['database']
MONGODB_HOST, MONGODB_PORT = uri['nodelist'][0]
MONGODB_USERNAME = uri['username']
MONGODB_PASSWORD = uri['password']
#
# GOOGLE_SERVICE_ACCOUNT_EMAIL=os.getenv('GOOGLE_SERVICE_ACCOUNT_EMAIL')
# GOOGLE_SERVICE_ACCOUNT_PRIVATE_KEY=get_private_key()[0][1]
GOOGLE_ANALYTICS_CLIENT_SECRET=os.getenv('GOOGLE_ANALYTICS_CLIENT_SECRET')
GOOGLE_ANALYTICS_CLIENT_ID=os.getenv('GOOGLE_ANALYTICS_CLIENT_ID')
UPLOAD_FOLDER=os.path.join(os.getcwd(),'uploads')
|
michaelnetbiz/mistt-solution
|
config.py
|
Python
|
mit
| 1,994 | 0.005015 |
from ovito import *
from ovito.io import *
from ovito.modifiers import *
from ovito.vis import *
import matplotlib
# Activate 'agg' backend for off-screen plotting.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import PyQt5.QtGui
node = import_file("../../files/CFG/fcc_coherent_twin.0.cfg")
node.modifiers.append(CoordinationNumberModifier())
node.modifiers.append(HistogramModifier())
node.add_to_scene()
vp = dataset.viewports.active_vp
def render(painter, **args):
# Find the existing HistogramModifier in the pipeline
# and get its histogram data.
for mod in ovito.dataset.selected_node.modifiers:
if isinstance(mod, HistogramModifier):
x = mod.histogram[:,0]
y = mod.histogram[:,1]
break
if not 'x' in locals():
raise RuntimeError('Histogram modifier not found.')
# Get size of rendered viewport image in pixels.
viewport_width = painter.window().width()
viewport_height = painter.window().height()
# Compute plot size in inches (DPI determines label size)
dpi = 80
plot_width = 0.5 * viewport_width / dpi
plot_height = 0.5 * viewport_height / dpi
# Create figure
fig, ax = plt.subplots(figsize=(plot_width,plot_height), dpi=dpi)
fig.patch.set_alpha(0.5)
plt.title('Coordination')
# Plot histogram data
ax.bar(x, y)
plt.tight_layout()
# Render figure to an in-memory buffer.
buf = fig.canvas.print_to_buffer()
# Create a QImage from the memory buffer
res_x, res_y = buf[1]
img = PyQt5.QtGui.QImage(buf[0], res_x, res_y, PyQt5.QtGui.QImage.Format_RGBA8888)
# Paint QImage onto rendered viewport
painter.drawImage(0,0,img)
print("Overlay function was executed")
overlay = PythonViewportOverlay()
overlay.function = render
vp.overlays.append(overlay)
if ovito.headless_mode:
ovito.dataset.render_settings.renderer = TachyonRenderer(ambient_occlusion = False, antialiasing = False)
vp.render()
|
srinath-chakravarthy/ovito
|
tests/scripts/test_suite/python_data_plot_overlay.py
|
Python
|
gpl-3.0
| 1,873 | 0.034704 |
# -*- coding: utf-8 -*-
{
'\n\nThank you!': '\n\nThank you!',
'\n\nWe will wait and let you know when your payment is confirmed.': '\n\nWe will wait and let you know when your payment is confirmed.',
'\n- %s from %s to %s': '\n- %s from %s to %s',
'\nAmount: R$%.2f': '\nAmount: R$%.2f',
"\nSomething happened and we couldn't verify your payment.\n": "\nSomething happened and we couldn't verify your payment.\n",
'\nThank you for your purchase!': '\nThank you for your purchase!',
'\nThank you!': '\nThank you!',
'\nThank you.': '\nThank you.',
'\nThe total amount was R$%.2f.': '\nThe total amount was R$%.2f.',
'\nWe will wait and let you know when your payment is confirmed.\n': '\nWe will wait and let you know when your payment is confirmed.\n',
'\nYou can check your payment history after login in to your profile.': '\nYou can check your payment history after login in to your profile.',
'!=': '!=',
'!langcode!': 'ro',
'!langname!': 'Română',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" (actualizează) este o expresie opțională precum "câmp1=\'valoare_nouă\'". Nu puteți actualiza sau șterge rezultatele unui JOIN',
'%(month)s %(day)sth': '%(month)s %(day)sth',
'%(nrows)s records found': '%(nrows)s înregistrări găsite',
'%02d/%02d': '%02d/%02d',
'%B %d, %Y': '%B %d, %Y',
'%d days ago': '%d days ago',
'%d weeks ago': '%d weeks ago',
'%d%% OFF': '%d%% OFF',
'%d/%d': '%d/%d',
'%m-%d-%Y': '%m-%d-%Y',
'%s %%{row} deleted': '%s linii șterse',
'%s %%{row} updated': '%s linii actualizate',
'%s %dth': '%s %dth',
'%s Certificate': '%s Certificate',
'%s of %s': '%s of %s',
'%s selected': '%s selectat(e)',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(something like "it-it")': '(ceva ce seamănă cu "it-it")',
'- %s from %s to %s': '- %s from %s to %s',
'- %s from %s to %s\n': '- %s from %s to %s\n',
'1 day ago': '1 day ago',
'1 week ago': '1 week ago',
'<': '<',
'<=': '<=',
'=': '=',
'>': '>',
'>=': '>=',
'?': '?',
'A new version of web2py is available': 'O nouă versiune de web2py este disponibilă',
'A new version of web2py is available: %s': 'O nouă versiune de web2py este disponibilă: %s',
'About': 'Despre',
'about': 'despre',
'About application': 'Despre aplicație',
'Access': 'Access',
'Access Control': 'Control acces',
'Access the /appadmin to make at least one teacher user:': 'Access the /appadmin to make at least one teacher user:',
'Actions': 'Actions',
'Add': 'Adaugă',
'Add more': 'Add more',
'additional code for your application': 'cod suplimentar pentru aplicația dvs.',
'admin disabled because no admin password': 'administrare dezactivată deoarece parola de administrator nu a fost furnizată',
'admin disabled because not supported on google app engine': 'administrare dezactivată deoarece funcționalitatea nu e suportat pe Google App Engine',
'admin disabled because unable to access password file': 'administrare dezactivată deoarece nu există acces la fișierul cu parole',
'Admin is disabled because insecure channel': 'Adminstrarea este dezactivată deoarece conexiunea nu este sigură',
'Admin is disabled because unsecure channel': 'Administrarea este dezactivată deoarece conexiunea nu este securizată',
'Administration': 'Administrare',
'Administrative Interface': 'Interfață administrare',
'Administrator Password:': 'Parolă administrator:',
'Ajax Recipes': 'Rețete Ajax',
'All certificates sent!': 'All certificates sent!',
'All Classes': 'All Classes',
'Alternative A': 'Alternative A',
'Alternative B': 'Alternative B',
'Alternative C': 'Alternative C',
'Alternative D': 'Alternative D',
'Amount': 'Amount',
'Amount: R$%.2f': 'Amount: R$%.2f',
'Amount: R$%.2f\n': 'Amount: R$%.2f\n',
'And': 'Și',
'and enroll!': 'and enroll!',
'and go to': 'and go to',
'and rename it (required):': 'și renumiți (obligatoriu):',
'and rename it:': ' și renumiți:',
'Announcements': 'Announcements',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'appadmin dezactivat deoarece conexiunea nu e sigură',
'application "%s" uninstalled': 'aplicația "%s" a fost dezinstalată',
'application compiled': 'aplicația a fost compilată',
'application is compiled and cannot be designed': 'aplicația este compilată și nu poate fi editată',
'Are you sure you want to delete file "%s"?': 'Sigur ștergeți fișierul "%s"?',
'Are you sure you want to delete this object?': 'Sigur ștergeți acest obiect?',
'Are you sure you want to uninstall application "%s"': 'Sigur dezinstalați aplicația "%s"',
'Are you sure you want to uninstall application "%s"?': 'Sigur dezinstalați aplicația "%s"?',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATENȚIE: Nu vă puteți conecta decât utilizând o conexiune securizată (HTTPS) sau rulând aplicația pe computerul local.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATENȚIE: Nu puteți efectua mai multe teste o dată deoarece lansarea în execuție a mai multor subpocese nu este sigură.',
'ATTENTION: you cannot edit the running application!': 'ATENȚIE: nu puteți edita o aplicație în curs de execuție!',
'Authentication': 'Autentificare',
'Available Databases and Tables': 'Baze de date și tabele disponibile',
'Available Until': 'Available Until',
'Back': 'Înapoi',
'Banner': 'Banner',
'Body': 'Body',
'Buy Now': 'Buy Now',
'Buy this book': 'Cumpără această carte',
'Cache': 'Cache',
'cache': 'cache',
'Cache Cleared': 'Cache Cleared',
'Cache Keys': 'Chei cache',
'cache, errors and sessions cleaned': 'cache, erori și sesiuni golite',
'Calendar': 'Calendar',
'Cannot be empty': 'Nu poate fi vid',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'Compilare imposibilă: aplicația conține erori. Debogați aplicația și încercați din nou.',
'cannot create file': 'fișier imposibil de creat',
'cannot upload file "%(filename)s"': 'imposibil de încărcat fișierul "%(filename)s"',
'Certificates': 'Certificates',
'Change password': 'Schimbare parolă',
'Change Password': 'Schimbare parolă',
'change password': 'schimbare parolă',
'check all': 'coșați tot',
'Check to delete': 'Coșați pentru a șterge',
'Class %s': 'Class %s',
'Class Id': 'Class Id',
'Classes': 'Classes',
'clean': 'golire',
'Clear': 'Golește',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'click to check for upgrades': 'Clic pentru a verifica dacă există upgrade-uri',
'Client IP': 'IP client',
'Closed': 'Closed',
'Community': 'Comunitate',
'compile': 'compilare',
'compiled application removed': 'aplicația compilată a fost ștearsă',
'Components and Plugins': 'Componente și plugin-uri',
'Confirmation Time': 'Confirmation Time',
'Confirmed': 'Confirmed',
'Contact': 'Contact',
'contains': 'conține',
'Continue Shopping': 'Continue Shopping',
'Controller': 'Controlor',
'Controllers': 'Controlori',
'controllers': 'controlori',
'Copyright': 'Drepturi de autor',
'Correct Alternative': 'Correct Alternative',
'Course': 'Course',
'Course Announcements': 'Course Announcements',
'Course Id': 'Course Id',
"Course's end": "Course's end",
"Course's start": "Course's start",
'Courses': 'Courses',
'create file with filename:': 'crează fișier cu numele:',
'Create new application': 'Creați aplicație nouă',
'create new application:': 'crează aplicație nouă:',
'crontab': 'crontab',
'Current request': 'Cerere curentă',
'Current response': 'Răspuns curent',
'Current session': 'Sesiune curentă',
'currently saved or': 'în prezent salvat sau',
'customize me!': 'Personalizează-mă!',
'DASHBOARD': 'DASHBOARD',
'Dashboard': 'Dashboard',
'data uploaded': 'date încărcate',
'Database': 'bază de date',
'Database %s select': 'selectare bază de date %s',
'database administration': 'administrare bază de date',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'Date': 'Date',
'Date and Time': 'Data și ora',
'db': 'db',
'DB Model': 'Model bază de date',
'defines tables': 'definire tabele',
'Delete': 'Șterge',
'delete': 'șterge',
'delete all checked': 'șterge tot ce e coșat',
'Delete:': 'Șterge:',
'Demo': 'Demo',
'Denied': 'Denied',
'Deploy on Google App Engine': 'Instalare pe Google App Engine',
'Deployment Recipes': 'Rețete de instalare',
'Description': 'Descriere',
'design': 'design',
'DESIGN': 'DESIGN',
'Design for': 'Design pentru',
'Details': 'Details',
'Discount': 'Discount',
'DISK': 'DISK',
'Disk Cache Keys': 'Chei cache de disc',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentație',
"Don't know what to do?": 'Nu știți ce să faceți?',
'done!': 'gata!',
'Download': 'Descărcare',
'E-mail': 'E-mail',
'E-mail invalid': 'E-mail invalid',
'edit': 'editare',
'EDIT': 'EDITARE',
'Edit': 'Editare',
'Edit application': 'Editare aplicație',
'edit controller': 'editare controlor',
'Edit current record': 'Editare înregistrare curentă',
'Edit Profile': 'Editare profil',
'edit profile': 'editare profil',
'Edit This App': 'Editați această aplicație',
'Editing file': 'Editare fișier',
'Editing file "%s"': 'Editare fișier "%s"',
'Email and SMS': 'E-mail și SMS',
'End': 'End',
'End date': 'End date',
'End Date': 'End Date',
'Enroll now!': 'Enroll now!',
'enter a number between %(min)g and %(max)g': 'introduceți un număr între %(min)g și %(max)g',
'enter an integer between %(min)g and %(max)g': 'introduceți un întreg între %(min)g și %(max)g',
'Enter the auth_membership table and associate your new user to the "Teacher" group': 'Enter the auth_membership table and associate your new user to the "Teacher" group',
'Enter the auth_user table and create a new record': 'Enter the auth_user table and create a new record',
'Enter with your teacher user and create your course, classes and lessons': 'Enter with your teacher user and create your course, classes and lessons',
'Error logs for "%(app)s"': 'Log erori pentru "%(app)s"',
'errors': 'erori',
'Errors': 'Erori',
'Erros no formulário!': 'Erros no formulário!',
'Export': 'Export',
'export as csv file': 'exportă ca fișier csv',
'exposes': 'expune',
'extends': 'extinde',
'failed to reload module': 'reîncarcare modul nereușită',
'False': 'Neadevărat',
'FAQ': 'Întrebări frecvente',
'file "%(filename)s" created': 'fișier "%(filename)s" creat',
'file "%(filename)s" deleted': 'fișier "%(filename)s" șters',
'file "%(filename)s" uploaded': 'fișier "%(filename)s" încărcat',
'file "%(filename)s" was not deleted': 'fișierul "%(filename)s" n-a fost șters',
'file "%s" of %s restored': 'fișier "%s" de %s restaurat',
'file changed on disk': 'fișier modificat pe disc',
'file does not exist': 'fișier inexistent',
'file saved on %(time)s': 'fișier salvat %(time)s',
'file saved on %s': 'fișier salvat pe %s',
'First name': 'Prenume',
'First, import a template and signature!': 'First, import a template and signature!',
'Forbidden': 'Interzis',
'Form has errors!': 'Form has errors!',
'Forms and Validators': 'Formulare și validatori',
'Forum': 'Forum',
'FREE': 'FREE',
'Free Applications': 'Aplicații gratuite',
'from %s to %s': 'from %s to %s',
'FULL!': 'FULL!',
'Functions with no doctests will result in [passed] tests.': 'Funcțiile fără doctests vor genera teste [trecute].',
'Generate Certificate': 'Generate Certificate',
'Graph Model': 'Graph Model',
'Group %(group_id)s created': 'Grup %(group_id)s creat',
'Group ID': 'ID grup',
'Group uniquely assigned to user %(id)s': 'Grup asociat în mod unic utilizatorului %(id)s',
'Groups': 'Grupuri',
'has satisfactorily completed the course': 'has satisfactorily completed the course',
'Hello World': 'Salutare lume',
'help': 'ajutor',
'Home': 'Acasă',
'hours': 'hours',
'How did you get here?': 'Cum ați ajuns aici?',
'htmledit': 'editare html',
'Icon': 'Icon',
'If you want to test, just': 'If you want to test, just',
"If you're sure you paid the order, please contact us. Otherwise, try to pay again later.": "If you're sure you paid the order, please contact us. Otherwise, try to pay again later.",
"If you're sure you paid the order, please contact us. Otherwise, try to pay again later.\n": "If you're sure you paid the order, please contact us. Otherwise, try to pay again later.\n",
'import': 'import',
'Import/Export': 'Import/Export',
'in a total of %d hours.': 'in a total of %d hours.',
'In Progress': 'In Progress',
'includes': 'include',
'Index': 'Index',
'insert new': 'adaugă nou',
'insert new %s': 'adaugă nou %s',
'Installed applications': 'Aplicații instalate',
'Interested? Submit your email below to be notified for the next open class.': 'Interested? Submit your email below to be notified for the next open class.',
'Interests': 'Interests',
'internal error': 'eroare internă',
'Internal State': 'Stare internă',
'Introduction': 'Introducere',
'Invalid action': 'Acțiune invalidă',
'Invalid email': 'E-mail invalid',
'invalid password': 'parolă invalidă',
'Invalid password': 'Parolă invalidă',
'Invalid Query': 'Interogare invalidă',
'invalid request': 'cerere invalidă',
'invalid ticket': 'tichet invalid',
'Key': 'Key',
'language file "%(filename)s" created/updated': 'fișier de limbă "%(filename)s" creat/actualizat',
'Language files (static strings) updated': 'Fișierele de limbă (șirurile statice de caractere) actualizate',
'languages': 'limbi',
'Languages': 'Limbi',
'languages updated': 'limbi actualizate',
'Last name': 'Nume',
'Last saved on:': 'Ultima salvare:',
'Layout': 'Șablon',
'Layout Plugins': 'Șablon plugin-uri',
'Layouts': 'Șabloane',
'Lesson': 'Lesson',
'Lesson Id': 'Lesson Id',
'Lesson scheduled for:': 'Lesson scheduled for:',
'Lesson Type': 'Lesson Type',
'License for': 'Licență pentru',
'Limit date:': 'Limit date:',
'limited to': 'limited to',
'Live Chat': 'Chat live',
'loading...': 'încarc...',
'Log In': 'Log In',
'Logged in': 'Logat',
'Logged out': 'Delogat',
'Login': 'Autentificare',
'login': 'autentificare',
'Login to the Administrative Interface': 'Logare interfață de administrare',
'logout': 'ieșire',
'Logout': 'Ieșire',
'Lost Password': 'Parolă pierdută',
'Lost password?': 'Parolă pierdută?',
'Main Menu': 'Meniu principal',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Cache': 'Manage Cache',
'Manage courses': 'Manage courses',
'Max Students': 'Max Students',
'Max. Students': 'Max. Students',
'Memberships': 'Memberships',
'Menu Model': 'Model meniu',
'merge': 'unește',
'Models': 'Modele',
'models': 'modele',
'Module': 'Module',
'Modules': 'Module',
'modules': 'module',
'My Calendar': 'My Calendar',
'My Certificates': 'My Certificates',
'My Courses': 'My Courses',
'My courses': 'My courses',
'My Sites': 'Site-urile mele',
'My Work': 'My Work',
'Name': 'Nume',
'New': 'Nou',
'New announcement': 'New announcement',
'New announcement on %s class': 'New announcement on %s class',
'new application "%s" created': 'aplicația nouă "%s" a fost creată',
'New Class': 'New Class',
'New Course': 'New Course',
'New lesson': 'New lesson',
'New module': 'New module',
'New password': 'Parola nouă',
'New Record': 'Înregistrare nouă',
'new record inserted': 'înregistrare nouă adăugată',
'New Topic': 'New Topic',
'New topic': 'New topic',
'next %s rows': 'next %s rows',
'next 100 rows': 'următoarele 100 de linii',
'NO': 'NU',
'No announcements yet!': 'No announcements yet!',
'No databases in this application': 'Aplicație fără bază de date',
"Now, you won't be able to see the lessons anymore. But the forum, announcements and other resources are still available.": "Now, you won't be able to see the lessons anymore. But the forum, announcements and other resources are still available.",
'Object or table name': 'Obiect sau nume de tabel',
'Old password': 'Parola veche',
'Online examples': 'Exemple online',
'Open classes': 'Open classes',
'Open Enrollment': 'Open Enrollment',
'Or': 'Sau',
'or import from csv file': 'sau importă din fișier csv',
'or provide application url:': 'sau furnizează adresă url:',
'Order': 'Order',
'Order Date': 'Order Date',
'Order date': 'Order date',
'Order details': 'Order details',
'Order Id': 'Order Id',
'Order Nº': 'Order Nº',
'Origin': 'Origine',
'Original/Translation': 'Original/Traducere',
'Other Plugins': 'Alte plugin-uri',
'Other Recipes': 'Alte rețete',
'Overview': 'Prezentare de ansamblu',
'Owner': 'Owner',
'pack all': 'împachetează toate',
'pack compiled': 'pachet compilat',
'Password': 'Parola',
"Password fields don't match": 'Câmpurile de parolă nu se potrivesc',
'Payment completed! Congratulations for your purchase!': 'Payment completed! Congratulations for your purchase!',
'Payment confirmed!': 'Payment confirmed!',
'Payment History': 'Payment History',
'Peeking at file': 'Vizualizare fișier',
'Pending': 'Pending',
'Pending Id': 'Pending Id',
'Permission': 'Permission',
'Permissions': 'Permissions',
'Place': 'Place',
'please input your password again': 'introduceți parola din nou',
'Please, select which type of lesson you want to create.': 'Please, select which type of lesson you want to create.',
'Plugins': 'Plugin-uri',
'Post': 'Post',
'Powered by': 'Pus în mișcare de',
'Preface': 'Prefață',
'Preview': 'Preview',
'previous %s rows': 'previous %s rows',
'previous 100 rows': '100 de linii anterioare',
'Price': 'Price',
'Products': 'Products',
'Professor': 'Professor',
'Profile': 'Profil',
'pygraphviz library not found': 'pygraphviz library not found',
'Python': 'Python',
'Query': 'Interogare',
'Query:': 'Interogare:',
'Question': 'Question',
'Quick Examples': 'Exemple rapide',
'RAM': 'RAM',
'RAM Cache Keys': 'Chei cache RAM',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Rețete',
'Record': 'înregistrare',
'record does not exist': 'înregistrare inexistentă',
'Record ID': 'ID înregistrare',
'Record id': 'id înregistrare',
'register': 'înregistrare',
'Register': 'Înregistrare',
'register a normal user': 'register a normal user',
'Registered On': 'Registered On',
'Registration identifier': 'Identificator de autentificare',
'Registration key': 'Cheie înregistrare',
'Registration successful': 'Autentificare reușită',
'Release Date': 'Release Date',
'Remember me (for 30 days)': 'Ține-mă minte (timp de 30 de zile)',
'remove compiled': 'șterge compilate',
'Replies': 'Replies',
'Reply this user': 'Reply this user',
'Request reset password': 'Cerere resetare parolă',
'Reset Password key': 'Cheie restare parolă',
'Resolve Conflict file': 'Fișier rezolvare conflict',
'restore': 'restaurare',
'revert': 'revenire',
'Role': 'Rol',
'Roles': 'Roles',
'Rows in Table': 'Linii în tabel',
'Rows selected': 'Linii selectate',
'said': 'said',
'save': 'salvare',
'Save model as...': 'Save model as...',
'Save profile': 'Salvează profil',
'Saved file hash:': 'Hash fișier salvat:',
'Schedule Date': 'Schedule Date',
'Schedule event': 'Schedule event',
'Search': 'Căutare',
'Semantic': 'Semantică',
'Send to Students': 'Send to Students',
'Services': 'Servicii',
'session expired': 'sesiune expirată',
'Settings': 'Settings',
'shell': 'line de commandă',
'Shopping Cart': 'Shopping Cart',
'Short Description': 'Short Description',
'Sign Up': 'Sign Up',
'Signature': 'Signature',
'site': 'site',
'Size of cache:': 'Size of cache:',
'some files could not be removed': 'anumite fișiere n-au putut fi șterse',
"Something happened and we couldn't verify your payment.": "Something happened and we couldn't verify your payment.",
'Something went wrong!': 'Something went wrong!',
'Sorry! Something bad happened!': 'Sorry! Something bad happened!',
'Start': 'Start',
'Start Date': 'Start Date',
'Start date': 'Start date',
'Starting on': 'Starting on',
'starts with': 'începe cu',
'state': 'stare',
'static': 'static',
'Static files': 'Fișiere statice',
'Statistics': 'Statistics',
'Status': 'Status',
'Student': 'Student',
'students max': 'students max',
'Stylesheet': 'Foaie de stiluri',
'Submit': 'Înregistrează',
'submit': 'submit',
'Support': 'Suport',
'Sure you want to delete this object?': 'Sigur ștergeți acest obiect?',
'Table': 'tabel',
'Table name': 'Nume tabel',
'Take a look at our Courses': 'Take a look at our Courses',
'Template': 'Template',
'test': 'test',
'Testing application': 'Testare aplicație',
'Text': 'Text',
'Thank you for your purchase!': 'Thank you for your purchase!',
'Thank you!': 'Thank you!',
'Thank you.': 'Thank you.',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Interogarea (query)" este o condiție de tipul "db.tabel1.câmp1==\'valoare\'". Ceva de genul "db.tabel1.câmp1==db.tabel2.câmp2" generează un JOIN SQL.',
'the application logic, each URL path is mapped in one exposed function in the controller': 'logica aplicației, fiecare rută URL este mapată într-o funcție expusă de controlor',
'The Core': 'Nucleul',
'the data representation, define database tables and sets': 'reprezentarea datelor, definește tabelele bazei de date și seturile (de date)',
'The following classes have a limit date of conclusion:': 'The following classes have a limit date of conclusion:',
'The output of the file is a dictionary that was rendered by the view %s': 'Fișierul produce un dicționar care a fost prelucrat de vederea %s',
'the presentations layer, views are also known as templates': 'nivelul de prezentare, vederile sunt de asemenea numite și șabloane',
'The total amount was R$%.2f.': 'The total amount was R$%.2f.',
'The total amount was R$%.2f.\n': 'The total amount was R$%.2f.\n',
'The Views': 'Vederile',
'There are no controllers': 'Nu există controlori',
'There are no models': 'Nu există modele',
'There are no modules': 'Nu există module',
'There are no static files': 'Nu există fișiere statice',
'There are no translators, only default language is supported': 'Nu există traduceri, doar limba implicită este suportată',
'There are no views': 'Nu există vederi',
'There is a new announcement on %s class.': 'There is a new announcement on %s class.',
'There was a problem with this video!': 'There was a problem with this video!',
'There was a problem with your payment!': 'There was a problem with your payment!',
'There was a problem with your payment!\n': 'There was a problem with your payment!\n',
'these files are served without processing, your images go here': 'aceste fișiere sunt servite fără procesare, imaginea se plasează acolo',
'These people are interested in your course %s': 'These people are interested in your course %s',
'This App': 'Această aplicație',
'This class has a limit date for conclusion.': 'This class has a limit date for conclusion.',
'This class reached the limit date': 'This class reached the limit date',
'This course is already on your shopping cart!': 'This course is already on your shopping cart!',
'This is a copy of the scaffolding application': 'Aceasta este o copie a aplicației schelet',
'This is the %(filename)s template': 'Aceasta este șablonul fișierului %(filename)s',
'This is to certify that': 'This is to certify that',
"This means that, after the limit date, you won't be able to see the lessons anymore. Forum, announcements and other resources will still be available.": "This means that, after the limit date, you won't be able to see the lessons anymore. Forum, announcements and other resources will still be available.",
'Ticket': 'Tichet',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Moment în timp (timestamp)',
'Title': 'Title',
'to previous version.': 'la versiunea anterioară.',
'to finish your payment.': 'to finish your payment.',
'Token': 'Token',
'too short': 'prea scurt',
'Total': 'Total',
'Total Hours': 'Total Hours',
'Traceback': 'Traceback',
'translation strings for the application': 'șiruri de caractere folosite la traducerea aplicației',
'True': 'Adevărat',
'try': 'încearcă',
'try something like': 'încearcă ceva de genul',
'Twitter': 'Twitter',
'Unable to check for upgrades': 'Imposibil de verificat dacă există actualizări',
'unable to create application "%s"': 'imposibil de creat aplicația "%s"',
'unable to delete file "%(filename)s"': 'imposibil de șters fișierul "%(filename)s"',
'Unable to download': 'Imposibil de descărcat',
'Unable to download app': 'Imposibil de descărcat aplicația',
'unable to parse csv file': 'imposibil de analizat fișierul csv',
'unable to uninstall "%s"': 'imposibil de dezinstalat "%s"',
'uncheck all': 'decoșează tot',
'uninstall': 'dezinstalează',
'update': 'actualizează',
'update all languages': 'actualizează toate limbile',
'Update:': 'Actualizare:',
'upload application:': 'incarcă aplicația:',
'Upload existing application': 'Încarcă aplicația existentă',
'upload file:': 'încarcă fișier:',
'Upload Video': 'Upload Video',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Folosiți (...)&(...) pentru AND, (...)|(...) pentru OR, și ~(...) pentru NOT, pentru a crea interogări complexe.',
'User': 'User',
'User %(id)s Logged-in': 'Utilizator %(id)s autentificat',
'User %(id)s Logged-out': 'Utilizator %(id)s delogat',
'User %(id)s Password changed': 'Parola utilizatorului %(id)s a fost schimbată',
'User %(id)s Password reset': 'Resetare parola utilizator %(id)s',
'User %(id)s Profile updated': 'Profil utilizator %(id)s actualizat',
'User %(id)s Registered': 'Utilizator %(id)s înregistrat',
'User Area - Courses': 'User Area - Courses',
'User avatar': 'User avatar',
'User Class': 'User Class',
'User Id': 'User Id',
'User ID': 'ID utilizator',
'User Lesson': 'User Lesson',
'Users': 'Users',
'value already in database or empty': 'Valoare existentă în baza de date sau vidă',
'Verify Password': 'Verifică parola',
'versioning': 'versiuni',
'Video': 'Video',
'Video file': 'Video file',
'Video URL': 'Video URL',
'Videos': 'Video-uri',
'View': 'Vedere',
'view': 'vedere',
'View course': 'View course',
'views': 'vederi',
'Views': 'Vederi',
'Wait a few seconds.': 'Wait a few seconds.',
"We couldn't find any video here. Please, alert your instructor about this problem!": "We couldn't find any video here. Please, alert your instructor about this problem!",
'We just confirmed your payment for order number %s.': 'We just confirmed your payment for order number %s.',
'We just confirmed your payment for order number %s.\n': 'We just confirmed your payment for order number %s.\n',
'We just received your order number %s:': 'We just received your order number %s:',
'We just received your order number %s:\n': 'We just received your order number %s:\n',
'We received your order!': 'We received your order!',
'We will wait and let you know when your payment is confirmed.': 'We will wait and let you know when your payment is confirmed.',
'web2py is up to date': 'web2py este la zi',
'web2py Recent Tweets': 'Ultimele tweet-uri web2py',
'Welcome': 'Bine ați venit',
'Welcome %s': 'Bine ați venit %s',
'Welcome to web2py': 'Bun venit la web2py',
'Welcome to web2py!': 'Bun venit la web2py!',
'Which called the function %s located in the file %s': 'Care a apelat funcția %s prezentă în fișierul %s',
"Why don't you follow this steps to start making your courses?": "Why don't you follow this steps to start making your courses?",
'Working...': 'Working...',
'YES': 'DA',
'You are already enrolled in this class to %s, so we removed it from your shopping cart.': 'You are already enrolled in this class to %s, so we removed it from your shopping cart.',
'You are already on the list for this course!': 'You are already on the list for this course!',
'You are already on this class!': 'You are already on this class!',
'You are already registered!': 'You are already registered!',
'You are successfully running web2py': 'Rulați cu succes web2py',
'You can access it here: %s': 'You can access it here: %s',
'You can check your payment history after login in to your profile.': 'You can check your payment history after login in to your profile.',
'You can check your payment history after login in to your profile.\n': 'You can check your payment history after login in to your profile.\n',
'You can modify this application and adapt it to your needs': 'Puteți modifica și adapta aplicația nevoilor dvs.',
'You have nothing in your shopping cart yet!': 'You have nothing in your shopping cart yet!',
'You visited the url %s': 'Ați vizitat adresa %s',
"You're beeing redirected to a secure enviroment on Paypal": "You're beeing redirected to a secure enviroment on Paypal",
'Your browser does not support the video tag.': 'Your browser does not support the video tag.',
'Your Certificate of Conclusion of %s is attached to this email. For more info, contact your teacher.\n\nCongratulations!': 'Your Certificate of Conclusion of %s is attached to this email. For more info, contact your teacher.\n\nCongratulations!',
'Your email': 'Your email',
}
|
juliarizza/web2courses
|
languages/ro.py
|
Python
|
mit
| 29,001 | 0.024039 |
# urllib3/_collections.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import deque
from threading import RLock
__all__ = ['RecentlyUsedContainer']
class AccessEntry(object):
__slots__ = ('key', 'is_valid')
def __init__(self, key, is_valid=True):
self.key = key
self.is_valid = is_valid
class RecentlyUsedContainer(dict):
"""
Provides a dict-like that maintains up to ``maxsize`` keys while throwing
away the least-recently-used keys beyond ``maxsize``.
"""
# If len(self.access_log) exceeds self._maxsize * CLEANUP_FACTOR, then we
# will attempt to cleanup the invalidated entries in the access_log
# datastructure during the next 'get' operation.
CLEANUP_FACTOR = 10
def __init__(self, maxsize=10):
self._maxsize = maxsize
self._container = {}
# We use a deque to to store our keys ordered by the last access.
self.access_log = deque()
self.access_log_lock = RLock()
# We look up the access log entry by the key to invalidate it so we can
# insert a new authorative entry at the head without having to dig and
# find the old entry for removal immediately.
self.access_lookup = {}
# Trigger a heap cleanup when we get past this size
self.access_log_limit = maxsize * self.CLEANUP_FACTOR
def _invalidate_entry(self, key):
"If exists: Invalidate old entry and return it."
old_entry = self.access_lookup.get(key)
if old_entry:
old_entry.is_valid = False
return old_entry
def _push_entry(self, key):
"Push entry onto our access log, invalidate the old entry if exists."
self._invalidate_entry(key)
new_entry = AccessEntry(key)
self.access_lookup[key] = new_entry
self.access_log_lock.acquire()
self.access_log.appendleft(new_entry)
self.access_log_lock.release()
def _prune_entries(self, num):
"Pop entries from our access log until we popped ``num`` valid ones."
while num > 0:
self.access_log_lock.acquire()
p = self.access_log.pop()
self.access_log_lock.release()
if not p.is_valid:
continue # Invalidated entry, skip
dict.pop(self, p.key, None)
self.access_lookup.pop(p.key, None)
num -= 1
def _prune_invalidated_entries(self):
"Rebuild our access_log without the invalidated entries."
self.access_log_lock.acquire()
self.access_log = deque(e for e in self.access_log if e.is_valid)
self.access_log_lock.release()
def _get_ordered_access_keys(self):
"Return ordered access keys for inspection. Used for testing."
self.access_log_lock.acquire()
r = [e.key for e in self.access_log if e.is_valid]
self.access_log_lock.release()
return r
def __getitem__(self, key):
item = dict.get(self, key)
if not item:
raise KeyError(key)
# Insert new entry with new high priority, also implicitly invalidates
# the old entry.
self._push_entry(key)
if len(self.access_log) > self.access_log_limit:
# Heap is getting too big, try to clean up any tailing invalidated
# entries.
self._prune_invalidated_entries()
return item
def __setitem__(self, key, item):
# Add item to our container and access log
dict.__setitem__(self, key, item)
self._push_entry(key)
# Discard invalid and excess entries
self._prune_entries(len(self) - self._maxsize)
def __delitem__(self, key):
self._invalidate_entry(key)
self.access_lookup.pop(key, None)
dict.__delitem__(self, key)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
|
samabhi/pstHealth
|
venv/lib/python2.7/site-packages/requests/packages/urllib3/_collections.py
|
Python
|
mit
| 4,119 | 0.000243 |
"""Unit test for treadmill.runtime.
"""
import errno
import socket
import unittest
import mock
import treadmill
import treadmill.rulefile
import treadmill.runtime
from treadmill import exc
class RuntimeTest(unittest.TestCase):
"""Tests for treadmill.runtime."""
@mock.patch('socket.socket.bind', mock.Mock())
def test__allocate_sockets(self):
"""Test allocating sockets.
"""
# access protected module _allocate_sockets
# pylint: disable=w0212
socket.socket.bind.side_effect = [
socket.error(errno.EADDRINUSE, 'In use'),
mock.DEFAULT,
mock.DEFAULT,
mock.DEFAULT
]
sockets = treadmill.runtime._allocate_sockets(
'prod', '0.0.0.0', socket.SOCK_STREAM, 3
)
self.assertEqual(3, len(sockets))
@mock.patch('socket.socket.bind', mock.Mock())
def test__allocate_sockets_fail(self):
"""Test allocating sockets when all are taken.
"""
# access protected module _allocate_sockets
# pylint: disable=w0212
socket.socket.bind.side_effect = socket.error(errno.EADDRINUSE,
'In use')
with self.assertRaises(exc.ContainerSetupError):
treadmill.runtime._allocate_sockets(
'prod', '0.0.0.0', socket.SOCK_STREAM, 3
)
@mock.patch('socket.socket', mock.Mock(autospec=True))
@mock.patch('treadmill.runtime._allocate_sockets', mock.Mock())
def test_allocate_network_ports(self):
"""Test network port allocation.
"""
# access protected module _allocate_network_ports
# pylint: disable=w0212
treadmill.runtime._allocate_sockets.side_effect = \
lambda _x, _y, _z, count: [socket.socket()] * count
mock_socket = socket.socket.return_value
mock_socket.getsockname.side_effect = [
('unused', 50001),
('unused', 60001),
('unused', 10000),
('unused', 10001),
('unused', 10002),
('unused', 12345),
('unused', 54321),
]
manifest = {
'type': 'native',
'environment': 'dev',
'endpoints': [
{
'name': 'http',
'port': 8000,
'proto': 'tcp',
}, {
'name': 'ssh',
'port': 0,
'proto': 'tcp',
}, {
'name': 'dns',
'port': 5353,
'proto': 'udp',
}, {
'name': 'port0',
'port': 0,
'proto': 'udp',
}
],
'ephemeral_ports': {'tcp': 3, 'udp': 0},
}
treadmill.runtime.allocate_network_ports(
'1.2.3.4',
manifest
)
# in the updated manifest, make sure that real_port is specificed from
# the ephemeral range as returnd by getsockname.
self.assertEqual(
8000,
manifest['endpoints'][0]['port']
)
self.assertEqual(
50001,
manifest['endpoints'][0]['real_port']
)
self.assertEqual(
60001,
manifest['endpoints'][1]['port']
)
self.assertEqual(
60001,
manifest['endpoints'][1]['real_port']
)
self.assertEqual(
5353,
manifest['endpoints'][2]['port']
)
self.assertEqual(
12345,
manifest['endpoints'][2]['real_port']
)
self.assertEqual(
54321,
manifest['endpoints'][3]['port']
)
self.assertEqual(
54321,
manifest['endpoints'][3]['real_port']
)
self.assertEqual(
[10000, 10001, 10002],
manifest['ephemeral_ports']['tcp']
)
if __name__ == '__main__':
unittest.main()
|
keithhendry/treadmill
|
tests/runtime_test.py
|
Python
|
apache-2.0
| 4,097 | 0 |
"""
Author: Maneesh Divana <mdaneeshd77@gmail.com>
Interpreter: Python 3.6.8
Quick Sort
Worst Case: O(n^2)
Average Case: O(nlog n)
Best Case: O(nlog n)
"""
from random import shuffle
def partition(arr: list, left: int, right: int) -> int:
"""Partitions the given array based on a pivot element,
then sorts the sub-arrays and returns the partition index"""
# Take the right most element as pivot
pivot = arr[right]
# i tracks the smallest element, currently invalid
i = left - 1
for j in range(left, right):
# Check if the current element is smaller than pivot element
if arr[j] <= pivot:
i += 1
# If so, swap the smallest element and the current element
arr[i], arr[j] = arr[j], arr[i]
# One final swap to put pivot element at its correct position
arr[i + 1], arr[right] = arr[right], arr[i + 1]
# Return the partition index
return i + 1
def qsort(arr: list, left: int, right: int) -> None:
"""Recursively partitions the given array and sorts based on
QuickSort algorithm."""
if left < right:
# Partition the array and get the partition index
p_idx = partition(arr, left, right)
# Recursively partition and sort the sub-arrays
qsort(arr, left, p_idx - 1)
qsort(arr, p_idx + 1, right)
if __name__ == "__main__":
ARR = list(range(0, 10))
shuffle(ARR)
LEFT = 0
RIGHT = len(ARR) - 1
print("\nQuickSort\n")
print("Input array:", ARR)
qsort(ARR, LEFT, RIGHT)
print("\nSorted array:", ARR, "\n")
|
maneeshd/Algorithms-and-Data-Structures
|
algorithms/QuickSort.py
|
Python
|
mit
| 1,581 | 0 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/base/shared_base_sleeve_both.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","default_sleeves")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/wearables/base/shared_base_sleeve_both.py
|
Python
|
mit
| 465 | 0.045161 |
# see ex50.py
|
cohadar/learn-python-the-hard-way
|
ex51.py
|
Python
|
mit
| 13 | 0.076923 |
# Python 2 and 3:
try:
# Python 3 only:
from urllib.parse import urlencode, urlsplit, parse_qs, unquote
except ImportError:
# Python 2 only:
from urlparse import parse_qs, urlsplit
from urllib import urlencode, unquote
|
fasihahmad/django-rest-framework-related-views
|
rest_framework_related/py2_3.py
|
Python
|
gpl-3.0
| 239 | 0 |
# (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Fake HP client exceptions to use when mocking HP clients."""
class UnsupportedVersion(Exception):
"""Unsupported version of the client."""
pass
class ClientException(Exception):
"""The base exception class for these fake exceptions."""
_error_code = None
_error_desc = None
_error_ref = None
_debug1 = None
_debug2 = None
def __init__(self, error=None):
if error:
if 'code' in error:
self._error_code = error['code']
if 'desc' in error:
self._error_desc = error['desc']
if 'ref' in error:
self._error_ref = error['ref']
if 'debug1' in error:
self._debug1 = error['debug1']
if 'debug2' in error:
self._debug2 = error['debug2']
def get_code(self):
return self._error_code
def get_description(self):
return self._error_desc
def get_ref(self):
return self._error_ref
def __str__(self):
formatted_string = self.message
if self.http_status:
formatted_string += " (HTTP %s)" % self.http_status
if self._error_code:
formatted_string += " %s" % self._error_code
if self._error_desc:
formatted_string += " - %s" % self._error_desc
if self._error_ref:
formatted_string += " - %s" % self._error_ref
if self._debug1:
formatted_string += " (1: '%s')" % self._debug1
if self._debug2:
formatted_string += " (2: '%s')" % self._debug2
return formatted_string
class HTTPConflict(Exception):
http_status = 409
message = "Conflict"
def __init__(self, error=None):
if error and 'message' in error:
self._error_desc = error['message']
def get_description(self):
return self._error_desc
class HTTPNotFound(Exception):
http_status = 404
message = "Not found"
class HTTPForbidden(ClientException):
http_status = 403
message = "Forbidden"
class HTTPBadRequest(Exception):
http_status = 400
message = "Bad request"
class HTTPServerError(Exception):
http_status = 500
message = "Error"
def __init__(self, error=None):
if error and 'message' in error:
self._error_desc = error['message']
def get_description(self):
return self._error_desc
|
nikesh-mahalka/cinder
|
cinder/tests/unit/fake_hp_client_exceptions.py
|
Python
|
apache-2.0
| 3,077 | 0 |
#!/usr/bin/env python
import sys, re, getopt
class Menusystem:
types = {"run" : "OPT_RUN",
"inactive" : "OPT_INACTIVE",
"checkbox" : "OPT_CHECKBOX",
"radiomenu": "OPT_RADIOMENU",
"sep" : "OPT_SEP",
"invisible": "OPT_INVISIBLE",
"radioitem": "OPT_RADIOITEM",
"exitmenu" : "OPT_EXITMENU",
"login" : "login", # special type
"submenu" : "OPT_SUBMENU"}
entry_init = { "item" : "",
"info" : "",
"data" : "",
"ipappend" : 0, # flag to send in case of PXELINUX
"helpid" : 65535, # 0xFFFF
"shortcut":"-1",
"state" : 0, # initial state of checkboxes
"argsmenu": "", # name of menu containing arguments
"perms" : "", # permission required to execute this entry
"_updated" : None, # has this dictionary been updated
"type" : "run" }
menu_init = { "title" : "",
"row" : "0xFF", # let system decide position
"col" : "0xFF",
"_updated" : None,
"name" : "" }
system_init ={ "videomode" : "0xFF",
"title" : "Menu System",
"top" : "1",
"left" : "1" ,
"bot" : "21",
"right":"79",
"helpdir" : "/isolinux/help",
"pwdfile" : "",
"pwdrow" : "23",
"editrow" : "23",
"skipcondn" : "0",
"skipcmd" : ".exit",
"startfile": "",
"onerrorcmd":".repeat",
"exitcmd" : ".exit",
"exitcmdroot" : "",
"timeout" : "600",
"timeoutcmd":".beep",
"totaltimeout" : "0",
"totaltimeoutcmd" : ".wait"
}
shift_flags = { "alt" : "ALT_PRESSED",
"ctrl" : "CTRL_PRESSED",
"shift": "SHIFT_PRESSED",
"caps" : "CAPSLOCK_ON",
"num" : "NUMLOCK_ON",
"ins" : "INSERT_ON"
}
reqd_templates = ["item","login","menu","system"]
def __init__(self,template):
self.state = "system"
self.code_template_filename = template
self.menus = []
self.init_entry()
self.init_menu()
self.init_system()
self.vtypes = " OR ".join(list(self.types.keys()))
self.vattrs = " OR ".join([x for x in list(self.entry.keys()) if x[0] != "_"])
self.mattrs = " OR ".join([x for x in list(self.menu.keys()) if x[0] != "_"])
def init_entry(self):
self.entry = self.entry_init.copy()
def init_menu(self):
self.menu = self.menu_init.copy()
def init_system(self):
self.system = self.system_init.copy()
def add_menu(self,name):
self.add_item()
self.init_menu()
self.menu["name"] = name
self.menu["_updated"] = 1
self.menus.append( (self.menu,[]) )
def add_item(self):
if self.menu["_updated"]: # menu details have changed
self.menus[-1][0].update(self.menu)
self.init_menu()
if self.entry["_updated"]:
if not self.entry["info"]:
self.entry["info"] = self.entry["data"]
if not self.menus:
print("Error before line %d" % self.lineno)
print("REASON: menu must be declared before a menu item is declared")
sys.exit(1)
self.menus[-1][1].append(self.entry)
self.init_entry()
def set_item(self,name,value):
if name not in self.entry:
msg = ["Unknown attribute %s in line %d" % (name,self.lineno)]
msg.append("REASON: Attribute must be one of %s" % self.vattrs)
return "\n".join(msg)
if name=="type" and value not in self.types:
msg = [ "Unrecognized type %s in line %d" % (value,self.lineno)]
msg.append("REASON: Valid types are %s" % self.vtypes)
return "\n".join(msg)
if name=="shortcut":
if (value != "-1") and not re.match("^[A-Za-z0-9]$",value):
msg = [ "Invalid shortcut char '%s' in line %d" % (value,self.lineno) ]
msg.append("REASON: Valid values are [A-Za-z0-9]")
return "\n".join(msg)
elif value != "-1": value = "'%s'" % value
elif name in ["state","helpid","ipappend"]:
try:
value = int(value)
except:
return "Value of %s in line %d must be an integer" % (name,self.lineno)
self.entry[name] = value
self.entry["_updated"] = 1
return ""
def set_menu(self,name,value):
if name not in self.menu:
return "Error: Unknown keyword %s" % name
self.menu[name] = value
self.menu["_updated"] = 1
return ""
def set_system(self,name,value):
if name not in self.system:
return "Error: Unknown keyword %s" % name
if name == "skipcondn":
try: # is skipcondn a number?
a = int(value)
except: # it is a "-" delimited sequence
value = value.lower()
parts = [ self.shift_flags.get(x.strip(),None) for x in value.split("-") ]
self.system["skipcondn"] = " | ".join([_f for _f in parts if _f])
else:
self.system[name] = value
def set(self,name,value):
# remove quotes if given
if (value[0] == value[-1]) and (value[0] in ['"',"'"]): # remove quotes
value = value[1:-1]
if self.state == "system":
err = self.set_system(name,value)
if not err: return
if self.state == "menu":
err = self.set_menu(name,value)
# change state to entry it menu returns error
if err:
err = None
self.state = "item"
if self.state == "item":
err = self.set_item(name,value)
if not err: return
# all errors so return item's error message
print(err)
sys.exit(1)
def print_entry(self,entry,fd):
entry["type"] = self.types[entry["type"]]
if entry["type"] == "login": #special type
fd.write(self.templates["login"] % entry)
else:
fd.write(self.templates["item"] % entry)
def print_menu(self,menu,fd):
if menu["name"] == "main": self.foundmain = 1
fd.write(self.templates["menu"] % menu)
if (menu["row"] != "0xFF") or (menu["col"] != "0xFF"):
fd.write(' set_menu_pos(%(row)s,%(col)s);\n' % menu)
def output(self,filename):
curr_template = None
contents = []
self.templates = {}
regbeg = re.compile(r"^--(?P<name>[a-z]+) BEGINS?--\n$")
regend = re.compile(r"^--[a-z]+ ENDS?--\n$")
ifd = open(self.code_template_filename,"r")
for line in ifd.readlines():
b = regbeg.match(line)
e = regend.match(line)
if e: # end of template
if curr_template:
self.templates[curr_template] = "".join(contents)
curr_template = None
continue
if b:
curr_template = b.group("name")
contents = []
continue
if not curr_template: continue # lines between templates are ignored
contents.append(line)
ifd.close()
missing = None
for x in self.reqd_templates:
if x not in self.templates: missing = x
if missing:
print("Template %s required but not defined in %s" % (missing,self.code_template_filename))
if filename == "-":
fd = sys.stdout
else: fd = open(filename,"w")
self.foundmain = None
fd.write(self.templates["header"])
fd.write(self.templates["system"] % self.system)
for (menu,items) in self.menus:
self.print_menu(menu,fd)
for entry in items: self.print_entry(entry,fd)
fd.write(self.templates["footer"])
fd.close()
if not self.foundmain:
print("main menu not found")
print(self.menus)
sys.exit(1)
def input(self,filename):
if filename == "-":
fd = sys.stdin
else: fd = open(filename,"r")
self.lineno = 0
self.state = "system"
for line in fd.readlines():
self.lineno = self.lineno + 1
if line and line[-1] in ["\r","\n"]: line = line[:-1]
if line and line[-1] in ["\r","\n"]: line = line[:-1]
line = line.strip()
if line and line[0] in ["#",";"]: continue
try:
# blank line -> starting a new entry
if not line:
if self.state == "item": self.add_item()
continue
# starting a new section?
if line[0] == "[" and line[-1] == "]":
self.state = "menu"
self.add_menu(line[1:-1])
continue
# add property of current entry
pos = line.find("=") # find the first = in string
if pos < 0:
print("Syntax error in line %d" % self.lineno)
print("REASON: non-section lines must be of the form ATTRIBUTE=VALUE")
sys.exit(1)
attr = line[:pos].strip().lower()
value = line[pos+1:].strip()
self.set(attr,value)
except:
print("Error while parsing line %d: %s" % (self.lineno,line))
raise
fd.close()
self.add_item()
def usage():
print(sys.argv[0]," [options]")
print("--input=<file> is the name of the .menu file declaring the menu structure")
print("--output=<file> is the name of generated C source")
print("--template=<file> is the name of template to be used")
print()
print("input and output default to - (stdin and stdout respectively)")
print("template defaults to adv_menu.tpl")
sys.exit(1)
def main():
tfile = "adv_menu.tpl"
ifile = "-"
ofile = "-"
opts,args = getopt.getopt(sys.argv[1:], "hi:o:t:",["input=","output=","template=","help"])
if args:
print("Unknown options %s" % args)
usage()
for o,a in opts:
if o in ["-i","--input"]:
ifile = a
elif o in ["-o", "--output"]:
ofile = a
elif o in ["-t","--template"]:
tfile = a
elif o in ["-h","--help"]:
usage()
inst = Menusystem(tfile)
inst.input(ifile)
inst.output(ofile)
if __name__ == "__main__":
main()
|
ErwanAliasr1/syslinux
|
com32/cmenu/menugen.py
|
Python
|
gpl-2.0
| 10,693 | 0.033947 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-08-17 00:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('turnos', '0004_auto_20160519_0134'),
]
operations = [
migrations.RenameField('Turno', 'asistio', 'no_asistio'),
migrations.RenameField('Turno', 'aviso', 'no_aviso')
]
|
mava-ar/sgk
|
src/turnos/migrations/0005_auto_20160816_2140.py
|
Python
|
apache-2.0
| 415 | 0 |
# -*- coding: utf-8 -*-
from time import localtime, mktime, time, strftime
from datetime import datetime
from enigma import eEPGCache
from Screens.Screen import Screen
import ChannelSelection
from ServiceReference import ServiceReference
from Components.config import config, ConfigSelection, ConfigText, ConfigSubList, ConfigDateTime, ConfigClock, ConfigYesNo, getConfigListEntry
from Components.ActionMap import NumberActionMap, ActionMap
from Components.ConfigList import ConfigListScreen
from Components.MenuList import MenuList
from Components.Button import Button
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.SystemInfo import SystemInfo
from Components.UsageConfig import defaultMoviePath
from Components.Sources.Boolean import Boolean
from Screens.MovieSelection import getPreferredTagEditor
from Screens.LocationBox import MovieLocationBox
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.VirtualKeyBoard import VirtualKeyBoard
from Screens.Setup import SetupSummary
from RecordTimer import AFTEREVENT
class TimerEntry(Screen, ConfigListScreen):
def __init__(self, session, timer):
Screen.__init__(self, session)
self.setup_title = _("Timer entry")
self.timer = timer
self.entryDate = None
self.entryService = None
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self["VKeyIcon"] = Boolean(False)
self["description"] = Label("")
self["oktext"] = Label(_("OK"))
self["canceltext"] = Label(_("Cancel"))
self["ok"] = Pixmap()
self["cancel"] = Pixmap()
self.createConfig()
self["actions"] = NumberActionMap(["SetupActions", "GlobalActions", "PiPSetupActions", "ColorActions"],
{
"ok": self.keySelect,
"save": self.keyGo,
"cancel": self.keyCancel,
"volumeUp": self.incrementStart,
"volumeDown": self.decrementStart,
"size+": self.incrementEnd,
"size-": self.decrementEnd,
}, -2)
self["VirtualKB"] = ActionMap(["VirtualKeyboardActions"],
{
"showVirtualKeyboard": self.KeyText,
}, -2)
self["VirtualKB"].setEnabled(False)
self.onChangedEntry = [ ]
self.list = []
ConfigListScreen.__init__(self, self.list, session = session)
self.createSetup("config")
self.onLayoutFinish.append(self.layoutFinished)
if not self.selectionChanged in self["config"].onSelectionChanged:
self["config"].onSelectionChanged.append(self.selectionChanged)
self.selectionChanged()
def createConfig(self):
justplay = self.timer.justplay
always_zap = self.timer.always_zap
rename_repeat = self.timer.rename_repeat
afterevent = {
AFTEREVENT.NONE: "nothing",
AFTEREVENT.DEEPSTANDBY: "deepstandby",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.AUTO: "auto"
}[self.timer.afterEvent]
if self.timer.record_ecm and self.timer.descramble:
recordingtype = "descrambled+ecm"
elif self.timer.record_ecm:
recordingtype = "scrambled+ecm"
elif self.timer.descramble:
recordingtype = "normal"
weekday_table = ("mon", "tue", "wed", "thu", "fri", "sat", "sun")
# calculate default values
day = []
weekday = 0
for x in (0, 1, 2, 3, 4, 5, 6):
day.append(0)
if self.timer.repeated: # repeated
type = "repeated"
if self.timer.repeated == 31: # Mon-Fri
repeated = "weekdays"
elif self.timer.repeated == 127: # daily
repeated = "daily"
else:
flags = self.timer.repeated
repeated = "user"
count = 0
for x in (0, 1, 2, 3, 4, 5, 6):
if flags == 1: # weekly
# print "Set to weekday " + str(x)
weekday = x
if flags & 1 == 1: # set user defined flags
day[x] = 1
count += 1
else:
day[x] = 0
flags >>= 1
if count == 1:
repeated = "weekly"
else: # once
type = "once"
repeated = None
weekday = int(strftime("%u", localtime(self.timer.begin))) - 1
day[weekday] = 1
self.timerentry_justplay = ConfigSelection(choices = [
("zap", _("zap")), ("record", _("record")), ("zap+record", _("zap and record"))],
default = {0: "record", 1: "zap", 2: "zap+record"}[justplay + 2*always_zap])
if SystemInfo["DeepstandbySupport"]:
shutdownString = _("go to deep standby")
else:
shutdownString = _("shut down")
self.timerentry_afterevent = ConfigSelection(choices = [("nothing", _("do nothing")), ("standby", _("go to standby")), ("deepstandby", shutdownString), ("auto", _("auto"))], default = afterevent)
self.timerentry_recordingtype = ConfigSelection(choices = [("normal", _("normal")), ("descrambled+ecm", _("descramble and record ecm")), ("scrambled+ecm", _("don't descramble, record ecm"))], default = recordingtype)
self.timerentry_type = ConfigSelection(choices = [("once",_("once")), ("repeated", _("repeated"))], default = type)
self.timerentry_name = ConfigText(default = self.timer.name.replace('\xc2\x86', '').replace('\xc2\x87', '').encode("utf-8"), visible_width = 50, fixed_size = False)
self.timerentry_description = ConfigText(default = self.timer.description, visible_width = 50, fixed_size = False)
self.timerentry_tags = self.timer.tags[:]
# if no tags found, make name of event default tag set.
if not self.timerentry_tags:
tagname = self.timer.name.strip()
if tagname:
tagname = tagname[0].upper() + tagname[1:].replace(" ", "_")
self.timerentry_tags.append(tagname)
self.timerentry_tagsset = ConfigSelection(choices = [not self.timerentry_tags and "None" or " ".join(self.timerentry_tags)])
self.timerentry_repeated = ConfigSelection(default = repeated, choices = [("weekly", _("weekly")), ("daily", _("daily")), ("weekdays", _("Mon-Fri")), ("user", _("user defined"))])
self.timerentry_renamerepeat = ConfigYesNo(default = rename_repeat)
self.timerentry_date = ConfigDateTime(default = self.timer.begin, formatstring = _("%d %B %Y"), increment = 86400)
self.timerentry_starttime = ConfigClock(default = self.timer.begin)
self.timerentry_endtime = ConfigClock(default = self.timer.end)
self.timerentry_showendtime = ConfigSelection(default = False, choices = [(True, _("yes")), (False, _("no"))])
default = self.timer.dirname or defaultMoviePath()
tmp = config.movielist.videodirs.value
if default not in tmp:
tmp.append(default)
self.timerentry_dirname = ConfigSelection(default = default, choices = tmp)
self.timerentry_repeatedbegindate = ConfigDateTime(default = self.timer.repeatedbegindate, formatstring = _("%d.%B %Y"), increment = 86400)
self.timerentry_weekday = ConfigSelection(default = weekday_table[weekday], choices = [("mon",_("Monday")), ("tue", _("Tuesday")), ("wed",_("Wednesday")), ("thu", _("Thursday")), ("fri", _("Friday")), ("sat", _("Saturday")), ("sun", _("Sunday"))])
self.timerentry_day = ConfigSubList()
for x in (0, 1, 2, 3, 4, 5, 6):
self.timerentry_day.append(ConfigYesNo(default = day[x]))
# FIXME some service-chooser needed here
servicename = "N/A"
try: # no current service available?
servicename = str(self.timer.service_ref.getServiceName())
except:
pass
self.timerentry_service_ref = self.timer.service_ref
self.timerentry_service = ConfigSelection([servicename])
def createSetup(self, widget):
self.list = []
self.entryName = getConfigListEntry(_("Name"), self.timerentry_name, _("Set the name the recording will get."))
self.list.append(self.entryName)
self.entryDescription = getConfigListEntry(_("Description"), self.timerentry_description, _("Set the description of the recording."))
self.list.append(self.entryDescription)
self.timerJustplayEntry = getConfigListEntry(_("Timer type"), self.timerentry_justplay, _("Chose between record and ZAP."))
self.list.append(self.timerJustplayEntry)
self.timerTypeEntry = getConfigListEntry(_("Repeat type"), self.timerentry_type, _("A repeating timer or just once?"))
self.list.append(self.timerTypeEntry)
if self.timerentry_type.value == "once":
self.frequencyEntry = None
else: # repeated
self.frequencyEntry = getConfigListEntry(_("Repeats"), self.timerentry_repeated, _("Choose between Daily, Weekly, Weekdays or user defined."))
self.list.append(self.frequencyEntry)
self.repeatedbegindateEntry = getConfigListEntry(_("Starting on"), self.timerentry_repeatedbegindate, _("Set the date the timer must start."))
self.list.append(self.repeatedbegindateEntry)
if self.timerentry_repeated.value == "daily":
pass
if self.timerentry_repeated.value == "weekdays":
pass
if self.timerentry_repeated.value == "weekly":
self.list.append(getConfigListEntry(_("Weekday"), self.timerentry_weekday))
if self.timerentry_repeated.value == "user":
self.list.append(getConfigListEntry(_("Monday"), self.timerentry_day[0]))
self.list.append(getConfigListEntry(_("Tuesday"), self.timerentry_day[1]))
self.list.append(getConfigListEntry(_("Wednesday"), self.timerentry_day[2]))
self.list.append(getConfigListEntry(_("Thursday"), self.timerentry_day[3]))
self.list.append(getConfigListEntry(_("Friday"), self.timerentry_day[4]))
self.list.append(getConfigListEntry(_("Saturday"), self.timerentry_day[5]))
self.list.append(getConfigListEntry(_("Sunday"), self.timerentry_day[6]))
if self.timerentry_justplay.value != "zap":
self.list.append(getConfigListEntry(_("Rename name and description for new events"), self.timerentry_renamerepeat))
self.entryDate = getConfigListEntry(_("Date"), self.timerentry_date, _("Set the date the timer must start."))
if self.timerentry_type.value == "once":
self.list.append(self.entryDate)
self.entryStartTime = getConfigListEntry(_("Start time"), self.timerentry_starttime, _("Set the time the timer must start."))
self.list.append(self.entryStartTime)
self.entryShowEndTime = getConfigListEntry(_("Set end time"), self.timerentry_showendtime, _("Set the time the timer must stop."))
# if self.timerentry_justplay.value == "zap":
# self.list.append(self.entryShowEndTime)
self.entryEndTime = getConfigListEntry(_("End time"), self.timerentry_endtime, _("Set the time the timer must stop."))
if self.timerentry_justplay.value != "zap" or self.timerentry_showendtime.value:
self.list.append(self.entryEndTime)
self.channelEntry = getConfigListEntry(_("Channel"), self.timerentry_service, _("Set the channel for this timer."))
self.list.append(self.channelEntry)
self.dirname = getConfigListEntry(_("Location"), self.timerentry_dirname, _("Where should the recording be saved?"))
self.tagsSet = getConfigListEntry(_("Tags"), self.timerentry_tagsset, _("Choose a tag for easy finding a recording."))
if self.timerentry_justplay.value != "zap":
if config.usage.setup_level.index >= 2: # expert+
self.list.append(self.dirname)
if getPreferredTagEditor():
self.list.append(self.tagsSet)
self.list.append(getConfigListEntry(_("After event"), self.timerentry_afterevent, _("What action is required on complettion of the timer? 'Auto' lets the box return to the state it had when the timer started. 'Do nothing', 'Go to standby' and 'Go to deep standby' do ecaxtly that.")))
self.list.append(getConfigListEntry(_("Recording type"), self.timerentry_recordingtype, _("Descramble & record ECM' gives the option to descramble afterwards if descrambling on recording failed. 'Don't descramble, record ECM' save a scramble recording that can be descrambled on playback. 'Normal' means descramble the recording and don't record ECM.")))
self[widget].list = self.list
self[widget].l.setList(self.list)
def selectionChanged(self):
if self["config"].getCurrent():
if len(self["config"].getCurrent()) > 2 and self["config"].getCurrent()[2]:
self["description"].setText(self["config"].getCurrent()[2])
if isinstance(self["config"].getCurrent()[1], ConfigText):
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(True)
self["VKeyIcon"].boolean = True
if self.has_key("HelpWindow"):
if self["config"].getCurrent()[1].help_window and self["config"].getCurrent()[1].help_window.instance is not None:
helpwindowpos = self["HelpWindow"].getPosition()
from enigma import ePoint
self["config"].getCurrent()[1].help_window.instance.move(ePoint(helpwindowpos[0],helpwindowpos[1]))
else:
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(False)
self["VKeyIcon"].boolean = False
else:
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(False)
self["VKeyIcon"].boolean = False
def layoutFinished(self):
self.setTitle(_(self.setup_title))
def createSummary(self):
return SetupSummary
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent() and self["config"].getCurrent()[0] or ""
def getCurrentValue(self):
return self["config"].getCurrent() and str(self["config"].getCurrent()[1].getText()) or ""
def newConfig(self):
if self["config"].getCurrent() in (self.timerTypeEntry, self.timerJustplayEntry, self.frequencyEntry, self.entryShowEndTime):
self.createSetup("config")
def KeyText(self):
if self['config'].getCurrent()[0] in (_('Name'), _("Description")):
self.session.openWithCallback(self.renameEntryCallback, VirtualKeyBoard, title=self["config"].getCurrent()[2], text = self["config"].getCurrent()[1].value)
def keyLeft(self):
cur = self["config"].getCurrent()
if cur in (self.channelEntry, self.tagsSet):
self.keySelect()
elif cur in (self.entryName, self.entryDescription):
self.renameEntry()
else:
ConfigListScreen.keyLeft(self)
self.newConfig()
def keyRight(self):
cur = self["config"].getCurrent()
if cur in (self.channelEntry, self.tagsSet):
self.keySelect()
elif cur in (self.entryName, self.entryDescription):
self.renameEntry()
else:
ConfigListScreen.keyRight(self)
self.newConfig()
def renameEntry(self):
cur = self["config"].getCurrent()
if cur == self.entryName:
title_text = _("Please enter new name:")
old_text = self.timerentry_name.value
else:
title_text = _("Please enter new description:")
old_text = self.timerentry_description.value
self.session.openWithCallback(self.renameEntryCallback, VirtualKeyBoard, title=title_text, text=old_text)
def renameEntryCallback(self, answer):
if answer:
if self["config"].getCurrent() == self.entryName:
self.timerentry_name.value = answer
self["config"].invalidate(self.entryName)
else:
self.timerentry_description.value = answer
self["config"].invalidate(self.entryDescription)
def handleKeyFileCallback(self, answer):
if self["config"].getCurrent() in (self.channelEntry, self.tagsSet):
self.keySelect()
else:
ConfigListScreen.handleKeyFileCallback(self, answer)
self.newConfig()
def keySelect(self):
cur = self["config"].getCurrent()
if cur == self.channelEntry:
self.session.openWithCallback(
self.finishedChannelSelection,
ChannelSelection.SimpleChannelSelection,
_("Select channel to record from"),
currentBouquet=True
)
elif config.usage.setup_level.index >= 2 and cur == self.dirname:
self.session.openWithCallback(
self.pathSelected,
MovieLocationBox,
_("Select target folder"),
self.timerentry_dirname.value,
minFree = 100 # We require at least 100MB free space
)
elif getPreferredTagEditor() and cur == self.tagsSet:
self.session.openWithCallback(
self.tagEditFinished,
getPreferredTagEditor(),
self.timerentry_tags
)
else:
self.keyGo()
def finishedChannelSelection(self, *args):
if args:
self.timerentry_service_ref = ServiceReference(args[0])
self.timerentry_service.setCurrentText(self.timerentry_service_ref.getServiceName())
self["config"].invalidate(self.channelEntry)
def getTimestamp(self, date, mytime):
d = localtime(date)
dt = datetime(d.tm_year, d.tm_mon, d.tm_mday, mytime[0], mytime[1])
return int(mktime(dt.timetuple()))
def getBeginEnd(self):
date = self.timerentry_date.value
endtime = self.timerentry_endtime.value
starttime = self.timerentry_starttime.value
begin = self.getTimestamp(date, starttime)
end = self.getTimestamp(date, endtime)
# if the endtime is less than the starttime, add 1 day.
if end < begin:
end += 86400
# if the timer type is a Zap and no end is set, set duration to 1 second so time is shown in EPG's.
if self.timerentry_justplay.value == "zap":
if not self.timerentry_showendtime.value:
end = begin + (config.recording.margin_before.value*60) + 1
return begin, end
def selectChannelSelector(self, *args):
self.session.openWithCallback(
self.finishedChannelSelectionCorrection,
ChannelSelection.SimpleChannelSelection,
_("Select channel to record from")
)
def finishedChannelSelectionCorrection(self, *args):
if args:
self.finishedChannelSelection(*args)
self.keyGo()
def keyGo(self, result = None):
if not self.timerentry_service_ref.isRecordable():
self.session.openWithCallback(self.selectChannelSelector, MessageBox, _("You didn't select a channel to record from."), MessageBox.TYPE_ERROR)
return
self.timer.name = self.timerentry_name.value
self.timer.description = self.timerentry_description.value
self.timer.justplay = self.timerentry_justplay.value == "zap"
self.timer.always_zap = self.timerentry_justplay.value == "zap+record"
self.timer.rename_repeat = self.timerentry_renamerepeat.value
if self.timerentry_justplay.value == "zap":
if not self.timerentry_showendtime.value:
self.timerentry_endtime.value = self.timerentry_starttime.value
self.timer.resetRepeated()
self.timer.afterEvent = {
"nothing": AFTEREVENT.NONE,
"deepstandby": AFTEREVENT.DEEPSTANDBY,
"standby": AFTEREVENT.STANDBY,
"auto": AFTEREVENT.AUTO
}[self.timerentry_afterevent.value]
self.timer.descramble = {
"normal": True,
"descrambled+ecm": True,
"scrambled+ecm": False,
}[self.timerentry_recordingtype.value]
self.timer.record_ecm = {
"normal": False,
"descrambled+ecm": True,
"scrambled+ecm": True,
}[self.timerentry_recordingtype.value]
self.timer.service_ref = self.timerentry_service_ref
self.timer.tags = self.timerentry_tags
if self.timer.dirname or self.timerentry_dirname.value != defaultMoviePath():
self.timer.dirname = self.timerentry_dirname.value
config.movielist.last_timer_videodir.value = self.timer.dirname
config.movielist.last_timer_videodir.save()
if self.timerentry_type.value == "once":
self.timer.begin, self.timer.end = self.getBeginEnd()
if self.timerentry_type.value == "repeated":
if self.timerentry_repeated.value == "daily":
for x in (0, 1, 2, 3, 4, 5, 6):
self.timer.setRepeated(x)
if self.timerentry_repeated.value == "weekly":
self.timer.setRepeated(self.timerentry_weekday.index)
if self.timerentry_repeated.value == "weekdays":
for x in (0, 1, 2, 3, 4):
self.timer.setRepeated(x)
if self.timerentry_repeated.value == "user":
for x in (0, 1, 2, 3, 4, 5, 6):
if self.timerentry_day[x].value:
self.timer.setRepeated(x)
self.timer.repeatedbegindate = self.getTimestamp(self.timerentry_repeatedbegindate.value, self.timerentry_starttime.value)
if self.timer.repeated:
self.timer.begin = self.getTimestamp(self.timerentry_repeatedbegindate.value, self.timerentry_starttime.value)
self.timer.end = self.getTimestamp(self.timerentry_repeatedbegindate.value, self.timerentry_endtime.value)
else:
self.timer.begin = self.getTimestamp(time.time(), self.timerentry_starttime.value)
self.timer.end = self.getTimestamp(time.time(), self.timerentry_endtime.value)
# when a timer end is set before the start, add 1 day
if self.timer.end < self.timer.begin:
self.timer.end += 86400
if self.timer.eit is not None:
event = eEPGCache.getInstance().lookupEventId(self.timer.service_ref.ref, self.timer.eit)
if event:
n = event.getNumOfLinkageServices()
if n > 1:
tlist = []
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
parent = self.timer.service_ref.ref
selection = 0
for x in range(n):
i = event.getLinkageService(parent, x)
if i.toString() == ref.toString():
selection = x
tlist.append((i.getName(), i))
self.session.openWithCallback(self.subserviceSelected, ChoiceBox, title=_("Please select a subservice to record..."), list = tlist, selection = selection)
return
elif n > 0:
parent = self.timer.service_ref.ref
self.timer.service_ref = ServiceReference(event.getLinkageService(parent, 0))
self.saveTimer()
self.close((True, self.timer))
def changeTimerType(self):
self.timerentry_justplay.selectNext()
self.timerJustplayEntry = getConfigListEntry(_("Timer type"), self.timerentry_justplay)
self["config"].invalidate(self.timerJustplayEntry)
def incrementStart(self):
self.timerentry_starttime.increment()
self["config"].invalidate(self.entryStartTime)
if self.timerentry_type.value == "once" and self.timerentry_starttime.value == [0, 0]:
self.timerentry_date.value += 86400
self["config"].invalidate(self.entryDate)
def decrementStart(self):
self.timerentry_starttime.decrement()
self["config"].invalidate(self.entryStartTime)
if self.timerentry_type.value == "once" and self.timerentry_starttime.value == [23, 59]:
self.timerentry_date.value -= 86400
self["config"].invalidate(self.entryDate)
def incrementEnd(self):
if self.entryEndTime is not None:
self.timerentry_endtime.increment()
self["config"].invalidate(self.entryEndTime)
def decrementEnd(self):
if self.entryEndTime is not None:
self.timerentry_endtime.decrement()
self["config"].invalidate(self.entryEndTime)
def subserviceSelected(self, service):
if not service is None:
self.timer.service_ref = ServiceReference(service[1])
self.saveTimer()
self.close((True, self.timer))
def saveTimer(self):
self.session.nav.RecordTimer.saveTimer()
def keyCancel(self):
self.close((False,))
def pathSelected(self, res):
if res is not None:
if config.movielist.videodirs.value != self.timerentry_dirname.choices:
self.timerentry_dirname.setChoices(config.movielist.videodirs.value, default=res)
self.timerentry_dirname.value = res
def tagEditFinished(self, ret):
if ret is not None:
self.timerentry_tags = ret
self.timerentry_tagsset.setChoices([not ret and "None" or " ".join(ret)])
self["config"].invalidate(self.tagsSet)
class TimerLog(Screen):
def __init__(self, session, timer):
Screen.__init__(self, session)
self.timer = timer
self.log_entries = self.timer.log_entries[:]
self.fillLogList()
self["loglist"] = MenuList(self.list)
self["logentry"] = Label()
self["key_red"] = Button(_("Delete entry"))
self["key_green"] = Button()
self["key_blue"] = Button(_("Clear log"))
self.onShown.append(self.updateText)
self["actions"] = NumberActionMap(["OkCancelActions", "DirectionActions", "ColorActions"],
{
"ok": self.keyClose,
"cancel": self.keyClose,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right,
"red": self.deleteEntry,
"blue": self.clearLog
}, -1)
self.setTitle(_("Timer log"))
def deleteEntry(self):
cur = self["loglist"].getCurrent()
if cur is None:
return
self.log_entries.remove(cur[1])
self.fillLogList()
self["loglist"].l.setList(self.list)
self.updateText()
def fillLogList(self):
self.list = [(str(strftime("%Y-%m-%d %H-%M", localtime(x[0])) + " - " + x[2]), x) for x in self.log_entries]
def clearLog(self):
self.log_entries = []
self.fillLogList()
self["loglist"].l.setList(self.list)
self.updateText()
def keyClose(self):
if self.timer.log_entries != self.log_entries:
self.timer.log_entries = self.log_entries
self.close((True, self.timer))
else:
self.close((False,))
def up(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.moveUp)
self.updateText()
def down(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.moveDown)
self.updateText()
def left(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.pageUp)
self.updateText()
def right(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.pageDown)
self.updateText()
def updateText(self):
if self.list:
self["logentry"].setText(str(self["loglist"].getCurrent()[1][2]))
else:
self["logentry"].setText("")
class InstantRecordTimerEntry(TimerEntry):
def __init__(self, session, timer, zap):
Screen.__init__(self, session)
self.setup_title = ""
self.timer = timer
self.timer.justplay = zap
self.entryDate = None
self.entryService = None
self.keyGo()
def keyGo(self, result = None):
if self.timer.justplay:
self.timer.end = self.timer.begin + (config.recording.margin_before.value * 60) + 1
self.timer.resetRepeated()
self.saveTimer()
def retval(self):
return self.timer
def saveTimer(self):
self.session.nav.RecordTimer.saveTimer()
|
mrnamingo/vix4-34-enigma2-bcm
|
lib/python/Screens/TimerEntry.py
|
Python
|
gpl-2.0
| 25,002 | 0.027718 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for rpc.dispatcher
"""
from nova import context
from nova.rpc import dispatcher
from nova.rpc import common as rpc_common
from nova import test
class RpcDispatcherTestCase(test.TestCase):
class API1(object):
RPC_API_VERSION = '1.0'
def __init__(self):
self.test_method_ctxt = None
self.test_method_arg1 = None
def test_method(self, ctxt, arg1):
self.test_method_ctxt = ctxt
self.test_method_arg1 = arg1
class API2(object):
RPC_API_VERSION = '2.1'
def __init__(self):
self.test_method_ctxt = None
self.test_method_arg1 = None
def test_method(self, ctxt, arg1):
self.test_method_ctxt = ctxt
self.test_method_arg1 = arg1
class API3(object):
RPC_API_VERSION = '3.5'
def __init__(self):
self.test_method_ctxt = None
self.test_method_arg1 = None
def test_method(self, ctxt, arg1):
self.test_method_ctxt = ctxt
self.test_method_arg1 = arg1
def setUp(self):
self.ctxt = context.RequestContext('fake_user', 'fake_project')
super(RpcDispatcherTestCase, self).setUp()
def tearDown(self):
super(RpcDispatcherTestCase, self).tearDown()
def _test_dispatch(self, version, expectations):
v2 = self.API2()
v3 = self.API3()
disp = dispatcher.RpcDispatcher([v2, v3])
disp.dispatch(self.ctxt, version, 'test_method', arg1=1)
self.assertEqual(v2.test_method_ctxt, expectations[0])
self.assertEqual(v2.test_method_arg1, expectations[1])
self.assertEqual(v3.test_method_ctxt, expectations[2])
self.assertEqual(v3.test_method_arg1, expectations[3])
def test_dispatch(self):
self._test_dispatch('2.1', (self.ctxt, 1, None, None))
self._test_dispatch('3.5', (None, None, self.ctxt, 1))
def test_dispatch_lower_minor_version(self):
self._test_dispatch('2.0', (self.ctxt, 1, None, None))
self._test_dispatch('3.1', (None, None, self.ctxt, 1))
def test_dispatch_higher_minor_version(self):
self.assertRaises(rpc_common.UnsupportedRpcVersion,
self._test_dispatch, '2.6', (None, None, None, None))
self.assertRaises(rpc_common.UnsupportedRpcVersion,
self._test_dispatch, '3.6', (None, None, None, None))
def test_dispatch_lower_major_version(self):
self.assertRaises(rpc_common.UnsupportedRpcVersion,
self._test_dispatch, '1.0', (None, None, None, None))
def test_dispatch_higher_major_version(self):
self.assertRaises(rpc_common.UnsupportedRpcVersion,
self._test_dispatch, '4.0', (None, None, None, None))
def test_dispatch_no_version_uses_v1(self):
v1 = self.API1()
disp = dispatcher.RpcDispatcher([v1])
disp.dispatch(self.ctxt, None, 'test_method', arg1=1)
self.assertEqual(v1.test_method_ctxt, self.ctxt)
self.assertEqual(v1.test_method_arg1, 1)
|
usc-isi/extra-specs
|
nova/tests/rpc/test_dispatcher.py
|
Python
|
apache-2.0
| 3,730 | 0.001072 |
from unittest import TestCase
from unittest.mock import Mock
from grapher import errors
from grapher.parsers import QueryParser
from grapher.parsers import query
from nose_parameterized import parameterized
class QueryParserTest(TestCase):
def setUp(self):
r = Mock()
r.args = Mock()
r.args.get = Mock()
query.request = r
@parameterized.expand([
({},
{'query': {}, 'skip': 0, 'limit': None}),
({'skip': '2'},
{'query': {}, 'skip': 2, 'limit': None}),
({
'query': '{"test":"test 1"}',
'skip': '0',
'limit': '10'
},
{'query': {'test': 'test 1'}, 'skip': 0, 'limit': 10}),
])
def test_parse(self, request_query, expected):
query.request.args.get.side_effect = lambda e: request_query[e] if e in request_query else None
actual = QueryParser.parse()
self.assertEqual(actual, expected)
def test_invalid_query(self):
query.request.args.get.return_value = 'invalid$query:{{{}'
with self.assertRaises(errors.BadRequestError):
QueryParser.parse()
|
lucasdavid/grapher
|
tests/parsers/query_test.py
|
Python
|
mit
| 1,148 | 0.000871 |
# To make print working for Python2/3
from __future__ import print_function
import ystockquote as ysq
def _main():
for s in ["NA.TO", "XBB.TO", "NOU.V", "AP-UN.TO", "BRK-A", "AAPL"]:
print("=============================================")
print("s: {}".format(s))
print("get_name: {}".format(ysq.get_name(s)))
print("get_price: {}".format(ysq.get_price(s)))
print("get_volume: {}".format(ysq.get_volume(s)))
print("get_stock_exchange: {}".format(ysq.get_stock_exchange(s)))
print("get_market_cap: {}".format(ysq.get_market_cap(s)))
print("get_dividend_yield: {}".format(ysq.get_dividend_yield(s)))
print("get_price_earnings_ratio: {}".format(ysq.get_price_earnings_ratio(s)))
print("get_52_week_low: {}".format(ysq.get_52_week_low(s)))
print("get_52_week_high: {}".format(ysq.get_52_week_high(s)))
print("get_currency: {}".format(ysq.get_currency(s)))
if __name__ == '__main__':
_main()
|
mathieugouin/tradesim
|
demo/demo_ystockquote.py
|
Python
|
gpl-3.0
| 998 | 0.001002 |
# coding=utf-8
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from urllib import urlencode
try:
from urlparse import parse_qs
except ImportError:
from cgi import parse_qs
class SimplePaginator(object):
"""A simple wrapper around the Django paginator."""
def __init__(self, request, prefix, data, columns=None, per_page=20, orphans=1):
"""Initialize a Paginator and set some properties. Return a tuple
containing items and ordering key.
Keyword arguments:
request -- The request object
prefix -- The prefix for the controls' css-class
data -- Elements to paginate
columns -- A tuple of tuples containing column name and key (default None)
per_page -- How many elements to display per page (default 20)
orphans -- Whether to move orphans to the previous page (default 1)
"""
self.request = request
self.prefix = prefix
self.data = data
self.columns = columns
self.per_page = per_page
self.orphans = orphans
def get_base_url(self):
'''Get query string from request, remove all necessary parts and return
two variants - one for the page suffix, one for the order suffix.
'''
# Get querystring and path, initialize variables
qsd = parse_qs(self.request.META['QUERY_STRING'])
path = self.request.META['PATH_INFO']
qs_pa = qs_or = ''
qs = baseurl = {}
# Remove arguments that mighmight overwritten
if qsd:
if self.prefix + '_pa' in qsd:
qs_pa = qsd.pop(self.prefix + '_pa')[0]
if self.prefix + '_or' in qsd:
qs_or = qsd.pop(self.prefix + '_or')[0]
# Get querystring for both suffix variants
qs_base = [(k, qsd[k][0]) for k in qsd]
if qs_or:
qs['pa'] = urlencode(qs_base + [(self.prefix + '_or', qs_or)])
if qs_pa:
qs['or'] = urlencode(qs_base + [(self.prefix + '_pa', qs_pa)])
# Build base url
if 'pa' in qs:
baseurl['pa'] = '%s?%s&' % (path, qs['pa'])
if 'or' in qs:
baseurl['or'] = '%s?%s&' % (path, qs['or'])
if qsd:
if not 'pa' in baseurl:
baseurl['pa'] = '%s?%s&' % (path, urlencode(qs_base))
if not 'or' in baseurl:
baseurl['or'] = '%s?%s&' % (path, urlencode(qs_base))
else:
if not 'pa' in baseurl:
baseurl['pa'] = path + '?'
if not 'or' in baseurl:
baseurl['or'] = path + '?'
return baseurl
def paginate(self):
# Make sure page number is an int. If not, deliver first page.
try:
page = int(self.request.GET.get('%s_pa' % self.prefix, 1))
except ValueError:
page = 1
# Get sorting key
try:
order = int(self.request.GET.get('%s_or' % self.prefix, 0))
except ValueError:
order = 0
# Sort data
# First, check if data supports order_by (e.g. a queryset)
# TODO default ordering feature
if hasattr(self.data, 'order_by') and self.columns and order:
key = self.columns[abs(order) - 1][1]
model_attr = None
if hasattr(self.data, 'model') and hasattr(self.data.model, key):
model_attr = getattr(self.data.model, key)
if model_attr and callable(model_attr):
self.data = sorted(self.data, key=model_attr,
reverse=(order <= 0))
else:
order_str = '%s' if order > 0 else '-%s'
order_key = order_str % key
self.data = self.data.order_by(order_key)
# If data doesn't support order_by, sort by index
elif order:
sortfunc = lambda x: x[abs(order) - 1] * cmp(order, 0)
self.data = sorted(self.data, key=sortfunc)
# Initialize paginator
self.paginator = Paginator(self.data, self.per_page, self.orphans)
# Get pagination items for current page. If page request is out of
# range, deliver last page of results.
try:
items = self.paginator.page(page)
except (EmptyPage, InvalidPage):
items = self.paginator.page(self.paginator.num_pages)
# Get base url
baseurl = self.get_base_url()
return items, order, baseurl
def paginate(*args, **kwargs):
"""Shortcut function to avoid having to instantiate the SimplePaginator
Class."""
p = SimplePaginator(*args, **kwargs)
return p.paginate()
|
dbrgn/django-simplepaginator
|
simple_paginator/__init__.py
|
Python
|
lgpl-3.0
| 4,672 | 0.001712 |
from .discrete import DiscreteSimulation
|
Lucretiel/genetics
|
genetics/simulation/__init__.py
|
Python
|
lgpl-2.1
| 41 | 0 |
import asyncio
import gta.utils
# The following metadata will not be processed but is recommended
# Author name and E-Mail
__author__ = 'Full Name <email@example.com>'
# Status of the script: Use one of 'Prototype', 'Development', 'Production'
__status__ = 'Development'
# The following metadata will be parsed and should always be provided
# Version number: This should always be a string and formatted in the x.x.x notation
__version__ = '0.0.1'
# A list of dependencies in the requirement specifiers format
# See: https://pip.pypa.io/en/latest/reference/pip_install.html#requirement-specifiers
__dependencies__ = ('aiohttp>=0.15.3',)
@asyncio.coroutine
def main():
"""
Does absolutely nothing but show you how to provide metadata.
"""
logger = gta.utils.get_logger('gta.metadata')
logger.debug('Hello from the metadata example')
|
lgrahl/scripthookvpy3k
|
python/scripts/metadata.py
|
Python
|
mit
| 857 | 0.002334 |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Rackspace Hosting Inc. All Rights Reserved.
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from taskflow.utils import misc
from taskflow.utils import reflection
def _save_as_to_mapping(save_as):
"""Convert save_as to mapping name => index
Result should follow storage convention for mappings.
"""
# TODO(harlowja): we should probably document this behavior & convention
# outside of code so that its more easily understandable, since what a task
# returns is pretty crucial for other later operations.
if save_as is None:
return {}
if isinstance(save_as, six.string_types):
# NOTE(harlowja): this means that your task will only return one item
# instead of a dictionary-like object or a indexable object (like a
# list or tuple).
return {save_as: None}
elif isinstance(save_as, (tuple, list)):
# NOTE(harlowja): this means that your task will return a indexable
# object, like a list or tuple and the results can be mapped by index
# to that tuple/list that is returned for others to use.
return dict((key, num) for num, key in enumerate(save_as))
elif isinstance(save_as, set):
# NOTE(harlowja): in the case where a set is given we will not be
# able to determine the numeric ordering in a reliable way (since it is
# a unordered set) so the only way for us to easily map the result of
# the task will be via the key itself.
return dict((key, key) for key in save_as)
raise TypeError('Task provides parameter '
'should be str, set or tuple/list, not %r' % save_as)
def _build_rebind_dict(args, rebind_args):
if rebind_args is None:
return {}
elif isinstance(rebind_args, (list, tuple)):
rebind = dict(zip(args, rebind_args))
if len(args) < len(rebind_args):
rebind.update((a, a) for a in rebind_args[len(args):])
return rebind
elif isinstance(rebind_args, dict):
return rebind_args
else:
raise TypeError('Invalid rebind value: %s' % rebind_args)
def _check_args_mapping(task_name, rebind, args, accepts_kwargs):
args = set(args)
rebind = set(rebind.keys())
extra_args = rebind - args
missing_args = args - rebind
if not accepts_kwargs and extra_args:
raise ValueError('Extra arguments given to task %s: %s'
% (task_name, sorted(extra_args)))
if missing_args:
raise ValueError('Missing arguments for task %s: %s'
% (task_name, sorted(missing_args)))
def _build_arg_mapping(task_name, reqs, rebind_args, function, do_infer):
task_args = reflection.get_required_callable_args(function)
accepts_kwargs = reflection.accepts_kwargs(function)
result = {}
if reqs:
result.update((a, a) for a in reqs)
if do_infer:
result.update((a, a) for a in task_args)
result.update(_build_rebind_dict(task_args, rebind_args))
_check_args_mapping(task_name, result, task_args, accepts_kwargs)
return result
class BaseTask(object):
"""An abstraction that defines a potential piece of work that can be
applied and can be reverted to undo the work as a single unit.
"""
__metaclass__ = abc.ABCMeta
TASK_EVENTS = ('update_progress', )
def __init__(self, name, provides=None):
self._name = name
# An *immutable* input 'resource' name mapping this task depends
# on existing before this task can be applied.
#
# Format is input_name:arg_name
self.rebind = {}
# An *immutable* output 'resource' name dict this task
# produces that other tasks may depend on this task providing.
#
# Format is output index:arg_name
self.save_as = _save_as_to_mapping(provides)
# This identifies the version of the task to be ran which
# can be useful in resuming older versions of tasks. Standard
# major, minor version semantics apply.
self.version = (1, 0)
# List of callback functions to invoke when progress updated.
self._on_update_progress_notify = []
self._events_listeners = {}
@property
def name(self):
return self._name
def __str__(self):
return "%s==%s" % (self.name, misc.get_task_version(self))
@abc.abstractmethod
def execute(self, *args, **kwargs):
"""Activate a given task which will perform some operation and return.
This method can be used to apply some given context and given set
of args and kwargs to accomplish some goal. Note that the result
that is returned needs to be serializable so that it can be passed
back into this task if reverting is triggered.
"""
def revert(self, *args, **kwargs):
"""Revert this task using the given context, result that the apply
provided as well as any information which may have caused
said reversion.
"""
@property
def provides(self):
return set(self.save_as)
@property
def requires(self):
return set(self.rebind.values())
def update_progress(self, progress, **kwargs):
"""Update task progress and notify all registered listeners.
:param progress: task progress float value between 0 and 1
:param kwargs: task specific progress information
"""
self._trigger('update_progress', progress, **kwargs)
def _trigger(self, event, *args, **kwargs):
"""Execute all handlers for the given event type."""
if event in self._events_listeners:
for handler in self._events_listeners[event]:
event_data = self._events_listeners[event][handler]
handler(self, event_data, *args, **kwargs)
def bind(self, event, handler, **kwargs):
"""Attach a handler to an event for the task.
:param event: event type
:param handler: function to execute each time event is triggered
:param kwargs: optional named parameters that will be passed to the
event handler
:raises ValueError: if invalid event type passed
"""
if event not in self.TASK_EVENTS:
raise ValueError("Unknown task event %s" % event)
if event not in self._events_listeners:
self._events_listeners[event] = {}
self._events_listeners[event][handler] = kwargs
def unbind(self, event, handler=None):
"""Remove a previously-attached event handler from the task. If handler
function not passed, then unbind all event handlers.
:param event: event type
:param handler: previously attached to event function
"""
if event in self._events_listeners:
if not handler:
self._events_listeners[event] = {}
else:
if handler in self._events_listeners[event]:
self._events_listeners[event].pop(handler)
class Task(BaseTask):
"""Base class for user-defined tasks
Adds following features to Task:
- auto-generates name from type of self
- adds all execute argument names to task requirements
- items provided by the task may be specified via
'default_provides' class attribute or property
"""
default_provides = None
def __init__(self, name=None, provides=None, requires=None,
auto_extract=True, rebind=None):
"""Initialize task instance"""
if name is None:
name = reflection.get_callable_name(self)
if provides is None:
provides = self.default_provides
super(Task, self).__init__(name,
provides=provides)
self.rebind = _build_arg_mapping(self.name, requires, rebind,
self.execute, auto_extract)
class FunctorTask(BaseTask):
"""Adaptor to make task from a callable
Take any callable and make a task from it.
"""
def __init__(self, execute, name=None, provides=None,
requires=None, auto_extract=True, rebind=None, revert=None,
version=None):
"""Initialize FunctorTask instance with given callable and kwargs"""
if name is None:
name = reflection.get_callable_name(execute)
super(FunctorTask, self).__init__(name, provides=provides)
self._execute = execute
self._revert = revert
if version is not None:
self.version = version
self.rebind = _build_arg_mapping(self.name, requires, rebind,
execute, auto_extract)
def execute(self, *args, **kwargs):
return self._execute(*args, **kwargs)
def revert(self, *args, **kwargs):
if self._revert:
return self._revert(*args, **kwargs)
else:
return None
|
jessicalucci/TaskManagement
|
taskflow/task.py
|
Python
|
apache-2.0
| 9,682 | 0 |
# -*- coding: utf-8 -*-
"""
Disease Case Tracking and Contact Tracing
"""
if not settings.has_module(c):
raise HTTP(404, body="Module disabled: %s" % c)
# -----------------------------------------------------------------------------
def index():
"Module's Home Page"
module_name = settings.modules[c].get("name_nice")
response.title = module_name
return {"module_name": module_name,
}
# -----------------------------------------------------------------------------
def disease():
""" Disease Information Controller """
return s3_rest_controller(rheader = s3db.disease_rheader)
# -----------------------------------------------------------------------------
def case():
""" Case Tracking Controller """
def prep(r):
if settings.get_disease_case_id():
ptable = s3db.pr_person
ptable.pe_label.label = T("ID")
if r.record:
# Do not allow changing the person ID
person_id = r.table.person_id
person_id.writable = False
person_id.comment = None
else:
dtable = s3db.disease_disease
diseases = db(dtable.deleted == False).select(dtable.id,
limitby=(0, 2)
)
if len(diseases) == 1:
# Default to only disease
field = r.table.disease_id
field.default = diseases.first().id
field.writable = False
component_name = r.component_name
if component_name in ("contact", "exposure"):
field = r.component.table.tracing_id
field.readable = field.writable = False
if component_name == "contact":
# Adapt CRUD strings to perspective
s3.crud_strings["disease_exposure"] = Storage(
label_create = T("Add Close Contact"),
title_display = T("Close Contact Details"),
title_list = T("Close Contacts"),
title_update = T("Edit Contact"),
label_list_button = T("List Close Contacts"),
label_delete_button = T("Delete Contact"),
msg_record_created = T("Contact added"),
msg_record_modified = T("Contact updated"),
msg_record_deleted = T("Contact deleted"),
msg_list_empty = T("No Close Contacts currently registered"))
return True
s3.prep = prep
def postp(r, output):
if isinstance(output, dict) and "buttons" in output:
buttons = output["buttons"]
if "list_btn" in buttons and "summary_btn" in buttons:
buttons["list_btn"] = buttons["summary_btn"]
return output
s3.postp = postp
return s3_rest_controller(rheader = s3db.disease_rheader)
# -----------------------------------------------------------------------------
def person():
""" Delegated person-controller for case tab """
def prep(r):
resource = r.resource
table = resource.table
table.pe_label.label = T("ID")
get_vars = r.get_vars
if "viewing" in get_vars:
try:
vtablename, record_id = get_vars["viewing"].split(".")
except ValueError:
return False
if vtablename == "disease_case":
# Get the person_id from the case
ctable = s3db[vtablename]
query = (ctable.id == record_id)
row = db(query).select(ctable.person_id,
limitby = (0, 1),
).first()
if not row:
r.error(404, current.ERROR.BAD_RECORD)
# Update the request
request = s3base.S3Request("pr", "person",
args = [str(row.person_id)],
vars = {},
)
r.resource = resource = request.resource
r.record = request.record
r.id = request.id
# Name fields in name-format order
NAMES = ("first_name", "middle_name", "last_name")
keys = s3base.StringTemplateParser.keys(settings.get_pr_name_format())
name_fields = [fn for fn in keys if fn in NAMES]
# Fields in form
from s3 import S3SQLInlineComponent
crud_fields = name_fields + \
["gender",
"date_of_birth",
S3SQLInlineComponent(
"contact",
fields = [("", "value")],
filterby = {"field": "contact_method",
"options": "SMS",
},
label = settings.get_ui_label_mobile_phone(),
multiple = False,
name = "phone",
),
S3SQLInlineComponent(
"contact",
fields = [("", "value")],
filterby = {"field": "contact_method",
"options": "EMAIL",
},
label = T("Email"),
multiple = False,
name = "email",
),
]
resource.configure(crud_form = s3base.S3SQLCustomForm(*crud_fields),
deletable = False,
)
return True
else:
return False
s3.prep = prep
def postp(r, output):
# Remove list- and summary-buttons
if r.record and isinstance(output, dict):
buttons = output.get("buttons")
if buttons:
buttons.pop("list_btn", None)
buttons.pop("summary_btn", None)
return output
s3.postp = postp
return s3_rest_controller("pr", "person",
rheader = s3db.disease_rheader,
)
# -----------------------------------------------------------------------------
def tracing():
""" Contact Tracing Controller """
def prep(r):
if r.id and r.component_name == "exposure":
ctable = r.component.table
case_id = ctable.case_id
case_id.default = r.id
case_id.readable = case_id.writable = False
crud_strings = s3.crud_strings[r.component.tablename]
crud_strings["label_create"] = T("Add Contact Person")
crud_strings["label_delete_button"] = T("Delete Contact Person")
return True
s3.prep = prep
return s3_rest_controller(rheader = s3db.disease_rheader)
# -----------------------------------------------------------------------------
def testing_report():
""" Testing Site Daily Summary Report: RESTful CRUD Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def testing_device():
""" Testing Device Registry: RESTful CRUD Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def case_diagnostics():
""" Diagnostic Tests: RESTful CRUD Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def statistic():
""" RESTful CRUD Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def stats_data():
""" RESTful CRUD Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def stats_aggregate():
""" RESTful CRUD Controller """
return s3_rest_controller()
# END =========================================================================
|
flavour/eden
|
controllers/disease.py
|
Python
|
mit
| 8,481 | 0.008843 |
#!/usr/bin/env python
import sys
from fireplace import cards
from fireplace.exceptions import GameOver
from fireplace.utils import play_full_game
sys.path.append("..")
def test_full_game():
try:
play_full_game()
except GameOver:
print("Game completed normally.")
def main():
cards.db.initialize()
if len(sys.argv) > 1:
numgames = sys.argv[1]
if not numgames.isdigit():
sys.stderr.write("Usage: %s [NUMGAMES]\n" % (sys.argv[0]))
exit(1)
for i in range(int(numgames)):
test_full_game()
else:
test_full_game()
if __name__ == "__main__":
main()
|
jleclanche/fireplace
|
tests/full_game.py
|
Python
|
agpl-3.0
| 577 | 0.025997 |
# BlenderBIM Add-on - OpenBIM Blender Add-on
# Copyright (C) 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of BlenderBIM Add-on.
#
# BlenderBIM Add-on is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BlenderBIM Add-on is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BlenderBIM Add-on. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import bpy
import pytest
import webbrowser
import blenderbim
import ifcopenshell
import ifcopenshell.util.representation
from blenderbim.bim.ifc import IfcStore
from mathutils import Vector
# Monkey-patch webbrowser opening since we want to test headlessly
webbrowser.open = lambda x: True
variables = {"cwd": os.getcwd(), "ifc": "IfcStore.get_file()"}
class NewFile:
@pytest.fixture(autouse=True)
def setup(self):
IfcStore.purge()
bpy.ops.wm.read_homefile(app_template="")
if bpy.data.objects:
bpy.data.batch_remove(bpy.data.objects)
bpy.ops.outliner.orphans_purge(do_local_ids=True, do_linked_ids=True, do_recursive=True)
blenderbim.bim.handler.setDefaultProperties(None)
class NewIfc:
@pytest.fixture(autouse=True)
def setup(self):
IfcStore.purge()
bpy.ops.wm.read_homefile(app_template="")
bpy.data.batch_remove(bpy.data.objects)
bpy.ops.outliner.orphans_purge(do_local_ids=True, do_linked_ids=True, do_recursive=True)
blenderbim.bim.handler.setDefaultProperties(None)
bpy.ops.bim.create_project()
def scenario(function):
def subfunction(self):
run(function(self))
return subfunction
def scenario_debug(function):
def subfunction(self):
run_debug(function(self))
return subfunction
def an_empty_ifc_project():
bpy.ops.bim.create_project()
def i_add_a_cube():
bpy.ops.mesh.primitive_cube_add()
def i_add_a_cube_of_size_size_at_location(size, location):
bpy.ops.mesh.primitive_cube_add(size=float(size), location=[float(co) for co in location.split(",")])
def the_object_name_is_selected(name):
i_deselect_all_objects()
additionally_the_object_name_is_selected(name)
def additionally_the_object_name_is_selected(name):
obj = bpy.context.scene.objects.get(name)
if not obj:
assert False, 'The object "{name}" could not be selected'
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
def i_deselect_all_objects():
bpy.context.view_layer.objects.active = None
bpy.ops.object.select_all(action="DESELECT")
def i_am_on_frame_number(number):
bpy.context.scene.frame_set(int(number))
def i_set_prop_to_value(prop, value):
try:
eval(f"bpy.context.{prop}")
except:
assert False, "Property does not exist"
try:
exec(f'bpy.context.{prop} = "{value}"')
except:
exec(f"bpy.context.{prop} = {value}")
def prop_is_value(prop, value):
is_value = False
try:
exec(f'assert bpy.context.{prop} == "{value}"')
is_value = True
except:
try:
exec(f"assert bpy.context.{prop} == {value}")
is_value = True
except:
try:
exec(f"assert list(bpy.context.{prop}) == {value}")
is_value = True
except:
pass
if not is_value:
actual_value = eval(f"bpy.context.{prop}")
assert False, f"Value is {actual_value}"
def i_enable_prop(prop):
exec(f"bpy.context.{prop} = True")
def i_press_operator(operator):
if "(" in operator:
exec(f"bpy.ops.{operator}")
else:
exec(f"bpy.ops.{operator}()")
def i_rename_the_object_name1_to_name2(name1, name2):
the_object_name_exists(name1).name = name2
def the_object_name_exists(name):
obj = bpy.data.objects.get(name)
if not obj:
assert False, f'The object "{name}" does not exist'
return obj
def an_ifc_file_exists():
ifc = IfcStore.get_file()
if not ifc:
assert False, "No IFC file is available"
return ifc
def an_ifc_file_does_not_exist():
ifc = IfcStore.get_file()
if ifc:
assert False, "An IFC is available"
def the_object_name_does_not_exist(name):
assert bpy.data.objects.get(name) is None, "Object exists"
def the_object_name_is_an_ifc_class(name, ifc_class):
ifc = an_ifc_file_exists()
element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
assert element.is_a(ifc_class), f'Object "{name}" is an {element.is_a()}'
def the_object_name_is_not_an_ifc_element(name):
id = the_object_name_exists(name).BIMObjectProperties.ifc_definition_id
assert id == 0, f"The ID is {id}"
def the_object_name_is_in_the_collection_collection(name, collection):
assert collection in [c.name for c in the_object_name_exists(name).users_collection]
def the_object_name_is_not_in_the_collection_collection(name, collection):
assert collection not in [c.name for c in the_object_name_exists(name).users_collection]
def the_object_name_has_a_body_of_value(name, value):
assert the_object_name_exists(name).data.body == value
def the_collection_name1_is_in_the_collection_name2(name1, name2):
assert bpy.data.collections.get(name2).children.get(name1)
def the_collection_name1_is_not_in_the_collection_name2(name1, name2):
assert not bpy.data.collections.get(name2).children.get(name1)
def the_object_name_is_placed_in_the_collection_collection(name, collection):
obj = the_object_name_exists(name)
[c.objects.unlink(obj) for c in obj.users_collection]
bpy.data.collections.get(collection).objects.link(obj)
def the_object_name_has_a_type_representation_of_context(name, type, context):
ifc = an_ifc_file_exists()
element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
context, subcontext, target_view = context.split("/")
assert ifcopenshell.util.representation.get_representation(
element, context, subcontext or None, target_view or None
)
def the_object_name_is_contained_in_container_name(name, container_name):
ifc = an_ifc_file_exists()
element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
container = ifcopenshell.util.element.get_container(element)
if not container:
assert False, f'Object "{name}" is not in any container'
assert container.Name == container_name, f'Object "{name}" is in {container}'
def i_duplicate_the_selected_objects():
bpy.ops.object.duplicate_move()
blenderbim.bim.handler.active_object_callback()
def i_delete_the_selected_objects():
bpy.ops.object.delete()
blenderbim.bim.handler.active_object_callback()
def the_object_name1_and_name2_are_different_elements(name1, name2):
ifc = an_ifc_file_exists()
element1 = ifc.by_id(the_object_name_exists(name1).BIMObjectProperties.ifc_definition_id)
element2 = ifc.by_id(the_object_name_exists(name2).BIMObjectProperties.ifc_definition_id)
assert element1 != element2, f"Objects {name1} and {name2} have same elements {element1} and {element2}"
def the_file_name_should_contain_value(name, value):
with open(name, "r") as f:
assert value in f.read()
def the_object_name1_has_a_boolean_difference_by_name2(name1, name2):
obj = the_object_name_exists(name1)
for modifier in obj.modifiers:
if modifier.type == "BOOLEAN" and modifier.object and modifier.object.name == name2:
return True
assert False, "No boolean found"
def the_object_name1_has_no_boolean_difference_by_name2(name1, name2):
obj = the_object_name_exists(name1)
for modifier in obj.modifiers:
if modifier.type == "BOOLEAN" and modifier.object and modifier.object.name == name2:
assert False, "A boolean was found"
def the_object_name_is_voided_by_void(name, void):
ifc = IfcStore.get_file()
element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
for rel in element.HasOpenings:
if rel.RelatedOpeningElement.Name == void:
return True
assert False, "No void found"
def the_object_name_is_not_voided_by_void(name, void):
ifc = IfcStore.get_file()
element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
for rel in element.HasOpenings:
if rel.RelatedOpeningElement.Name == void:
assert False, "A void was found"
def the_object_name_is_not_voided(name):
ifc = IfcStore.get_file()
element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
if any(element.HasOpenings):
assert False, "An opening was found"
def the_object_name_is_not_a_void(name):
ifc = IfcStore.get_file()
element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
if any(element.VoidsElements):
assert False, "A void was found"
def the_void_name_is_filled_by_filling(name, filling):
ifc = IfcStore.get_file()
element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
if any(rel.RelatedBuildingElement.Name == filling for rel in element.HasFillings):
return True
assert False, "No filling found"
def the_void_name_is_not_filled_by_filling(name, filling):
ifc = IfcStore.get_file()
element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
if any(rel.RelatedBuildingElement.Name == filling for rel in element.HasFillings):
assert False, "A filling was found"
def the_object_name_is_not_a_filling(name):
ifc = IfcStore.get_file()
element = ifc.by_id(the_object_name_exists(name).BIMObjectProperties.ifc_definition_id)
if any(element.FillsVoids):
assert False, "A filling was found"
def the_object_name_should_display_as_mode(name, mode):
assert the_object_name_exists(name).display_type == mode
def the_object_name_has_number_vertices(name, number):
total = len(the_object_name_exists(name).data.vertices)
assert total == int(number), f"We found {total} vertices"
def the_object_name_is_at_location(name, location):
obj_location = the_object_name_exists(name).location
assert (
obj_location - Vector([float(co) for co in location.split(",")])
).length < 0.1, f"Object is at {obj_location}"
def the_variable_key_is_value(key, value):
variables[key] = eval(value)
definitions = {
'the variable "(.*)" is "(.*)"': the_variable_key_is_value,
"an empty IFC project": an_empty_ifc_project,
"I add a cube": i_add_a_cube,
'I add a cube of size "([0-9]+)" at "(.*)"': i_add_a_cube_of_size_size_at_location,
'the object "(.*)" is selected': the_object_name_is_selected,
'additionally the object "(.*)" is selected': additionally_the_object_name_is_selected,
"I deselect all objects": i_deselect_all_objects,
'I am on frame "([0-9]+)"': i_am_on_frame_number,
'I set "(.*)" to "(.*)"': i_set_prop_to_value,
'"(.*)" is "(.*)"': prop_is_value,
'I enable "(.*)"': i_enable_prop,
'I press "(.*)"': i_press_operator,
'I rename the object "(.*)" to "(.*)"': i_rename_the_object_name1_to_name2,
'the object "(.*)" exists': the_object_name_exists,
'the object "(.*)" does not exist': the_object_name_does_not_exist,
'the object "(.*)" is an "(.*)"': the_object_name_is_an_ifc_class,
'the object "(.*)" is not an IFC element': the_object_name_is_not_an_ifc_element,
'the object "(.*)" is in the collection "(.*)"': the_object_name_is_in_the_collection_collection,
'the object "(.*)" is not in the collection "(.*)"': the_object_name_is_not_in_the_collection_collection,
'the object "(.*)" has a body of "(.*)"': the_object_name_has_a_body_of_value,
'the collection "(.*)" is in the collection "(.*)"': the_collection_name1_is_in_the_collection_name2,
'the collection "(.*)" is not in the collection "(.*)"': the_collection_name1_is_not_in_the_collection_name2,
"an IFC file exists": an_ifc_file_exists,
"an IFC file does not exist": an_ifc_file_does_not_exist,
'the object "(.*)" has a "(.*)" representation of "(.*)"': the_object_name_has_a_type_representation_of_context,
'the object "(.*)" is placed in the collection "(.*)"': the_object_name_is_placed_in_the_collection_collection,
'the object "(.*)" is contained in "(.*)"': the_object_name_is_contained_in_container_name,
"I duplicate the selected objects": i_duplicate_the_selected_objects,
"I delete the selected objects": i_delete_the_selected_objects,
'the object "(.*)" and "(.*)" are different elements': the_object_name1_and_name2_are_different_elements,
'the file "(.*)" should contain "(.*)"': the_file_name_should_contain_value,
'the object "(.*)" has a boolean difference by "(.*)"': the_object_name1_has_a_boolean_difference_by_name2,
'the object "(.*)" has no boolean difference by "(.*)"': the_object_name1_has_no_boolean_difference_by_name2,
'the object "(.*)" is voided by "(.*)"': the_object_name_is_voided_by_void,
'the object "(.*)" is not voided by "(.*)"': the_object_name_is_not_voided_by_void,
'the object "(.*)" is not a void': the_object_name_is_not_a_void,
'the object "(.*)" is not voided': the_object_name_is_not_voided,
'the object "(.*)" should display as "(.*)"': the_object_name_should_display_as_mode,
'the object "(.*)" has "([0-9]+)" vertices': the_object_name_has_number_vertices,
'the object "(.*)" is at "(.*)"': the_object_name_is_at_location,
"nothing interesting happens": lambda: None,
'the void "(.*)" is filled by "(.*)"': the_void_name_is_filled_by_filling,
'the void "(.*)" is not filled by "(.*)"': the_void_name_is_not_filled_by_filling,
'the object "(.*)" is not a filling': the_object_name_is_not_a_filling,
}
# Super lightweight Gherkin implementation
def run(scenario):
keywords = ["Given", "When", "Then", "And", "But"]
for line in scenario.split("\n"):
for key, value in variables.items():
line = line.replace("{" + key + "}", str(value))
for keyword in keywords:
line = line.replace(keyword, "")
line = line.strip()
if not line:
continue
match = None
for definition, callback in definitions.items():
match = re.search("^" + definition + "$", line)
if match:
try:
callback(*match.groups())
except AssertionError as e:
assert False, f"Failed: {line}, with error: {e}"
break
if not match:
assert False, f"Definition not implemented: {line}"
return True
def run_debug(scenario, blend_filepath=None):
try:
result = run(scenario)
except Exception as e:
if blend_filepath:
bpy.ops.wm.save_as_mainfile(filepath=blend_filepath)
assert False, e
if blend_filepath:
bpy.ops.wm.save_as_mainfile(filepath=blend_filepath)
return result
|
IfcOpenShell/IfcOpenShell
|
src/blenderbim/test/bim/bootstrap.py
|
Python
|
lgpl-3.0
| 15,500 | 0.003032 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from .. import base
from girder.api import access, describe
from girder.api.rest import Resource
OrderedRoutes = [
('GET', (), ''),
('GET', (':id',), '/{id}'),
('UNKNOWN', (':id',), '/{id}'),
('GET', (':id', 'action'), '/{id}/action'),
('GET', ('action',), '/action'),
('PUT', ('action',), '/action'),
('POST', ('action',), '/action'),
('PATCH', ('action',), '/action'),
('DELETE', ('action',), '/action'),
('NEWMETHOD', ('action',), '/action'),
('UNKNOWN', ('action',), '/action'),
('GET', ('action', ':id'), '/action/{id}'),
('GET', ('noaction',), '/noaction')
]
class DummyResource(Resource):
def __init__(self):
self.resourceName = 'foo'
for method, pathElements, testPath in OrderedRoutes:
self.route(method, pathElements, self.handler)
@access.public
def handler(self, **kwargs):
return kwargs
handler.description = describe.Description('Does nothing')
def setUpModule():
server = base.startServer()
server.root.api.v1.accesstest = DummyResource()
def tearDownModule():
base.stopServer()
class ApiDescribeTestCase(base.TestCase):
"""
Makes sure our swagger auto API docs are working.
"""
def testInvalidResource(self):
methods = ['DELETE', 'GET', 'PATCH', 'POST', 'PUT']
for m in methods:
resp = self.request(path='/not_valid', method=m, isJson=False)
self.assertStatus(resp, 404)
methods.remove('GET')
for m in methods:
resp = self.request(path='', method=m, isJson=False)
self.assertStatus(resp, 405)
def testApiDescribe(self):
# Get coverage for serving the static swagger page
resp = self.request(path='', method='GET', isJson=False)
self.assertStatusOk(resp)
# Test top level describe endpoint
resp = self.request(path='/describe', method='GET')
self.assertStatusOk(resp)
self.assertEqual(resp.json['swaggerVersion'], describe.SWAGGER_VERSION)
self.assertEqual(resp.json['apiVersion'], describe.API_VERSION)
self.assertTrue({'path': '/group'} in resp.json['apis'])
# Request a specific resource's description, sanity check
resp = self.request(path='/describe/user', method='GET')
self.assertStatusOk(resp)
for routeDoc in resp.json['apis']:
self.assertHasKeys(('path', 'operations'), routeDoc)
self.assertTrue(len(routeDoc['operations']) > 0)
# Request an unknown resource's description to get an error
resp = self.request(path='/describe/unknown', method='GET')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid resource: unknown')
def testRouteOrder(self):
# Check that the resources and operations are listed in the order we
# expect
resp = self.request(path='/describe/foo', method='GET')
self.assertStatusOk(resp)
listedRoutes = [(method['httpMethod'], route['path'])
for route in resp.json['apis']
for method in route['operations']]
expectedRoutes = [(method, '/foo'+testPath)
for method, pathElements, testPath in OrderedRoutes]
self.assertEqual(listedRoutes, expectedRoutes)
|
chrismattmann/girder
|
tests/cases/api_describe_test.py
|
Python
|
apache-2.0
| 4,148 | 0 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009,
# 2010, 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
"""
BibIndex indexing engine implementation.
See bibindex executable for entry point.
"""
__revision__ = "$Id$"
import re
import sys
import time
import fnmatch
import inspect
from datetime import datetime
from six import iteritems
from invenio.config import CFG_SOLR_URL
from invenio.legacy.bibindex.engine_config import CFG_MAX_MYSQL_THREADS, \
CFG_MYSQL_THREAD_TIMEOUT, \
CFG_CHECK_MYSQL_THREADS, \
CFG_BIBINDEX_INDEX_TABLE_TYPE, \
CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR, \
CFG_BIBINDEX_UPDATE_MESSAGE, \
CFG_BIBINDEX_UPDATE_MODE, \
CFG_BIBINDEX_TOKENIZER_TYPE, \
CFG_BIBINDEX_WASH_INDEX_TERMS, \
CFG_BIBINDEX_SPECIAL_TAGS
from invenio.legacy.bibauthority.config import \
CFG_BIBAUTHORITY_CONTROLLED_FIELDS_BIBLIOGRAPHIC
from invenio.legacy.bibauthority.engine import get_index_strings_by_control_no,\
get_control_nos_from_recID
from invenio.legacy.search_engine import perform_request_search, \
get_index_stemming_language, \
get_synonym_terms, \
search_pattern, \
search_unit_in_bibrec
from invenio.legacy.dbquery import run_sql, DatabaseError, serialize_via_marshal, \
deserialize_via_marshal, wash_table_column_name
from invenio.legacy.bibindex.engine_washer import wash_index_term
from invenio.legacy.bibsched.bibtask import task_init, write_message, get_datetime, \
task_set_option, task_get_option, task_get_task_param, \
task_update_progress, task_sleep_now_if_required
from intbitset import intbitset
from invenio.ext.logging import register_exception
from invenio.legacy.bibrank.adminlib import get_def_name
from invenio.legacy.miscutil.solrutils_bibindex_indexer import solr_commit
from invenio.modules.indexer.tokenizers.BibIndexJournalTokenizer import \
CFG_JOURNAL_TAG, \
CFG_JOURNAL_PUBINFO_STANDARD_FORM, \
CFG_JOURNAL_PUBINFO_STANDARD_FORM_REGEXP_CHECK
from invenio.legacy.bibindex.termcollectors import TermCollector
from invenio.legacy.bibindex.engine_utils import load_tokenizers, \
get_all_index_names_and_column_values, \
get_index_tags, \
get_field_tags, \
get_marc_tag_indexes, \
get_nonmarc_tag_indexes, \
get_all_indexes, \
get_index_virtual_indexes, \
get_virtual_index_building_blocks, \
get_index_id_from_index_name, \
run_sql_drop_silently, \
get_min_last_updated, \
remove_inexistent_indexes, \
get_all_synonym_knowledge_bases, \
get_index_remove_stopwords, \
get_index_remove_html_markup, \
get_index_remove_latex_markup, \
filter_for_virtual_indexes, \
get_records_range_for_index, \
make_prefix, \
list_union, \
recognize_marc_tag
from invenio.modules.records.api import get_record
from invenio.utils.memoise import Memoise
from invenio.legacy.bibindex.termcollectors import \
TermCollector, \
NonmarcTermCollector
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
# precompile some often-used regexp for speed reasons:
re_subfields = re.compile('\$\$\w')
re_datetime_shift = re.compile("([-\+]{0,1})([\d]+)([dhms])")
re_prefix = re.compile('__[a-zA-Z1-9]*__')
nb_char_in_line = 50 # for verbose pretty printing
chunksize = 1000 # default size of chunks that the records will be treated by
base_process_size = 4500 # process base size
_last_word_table = None
_TOKENIZERS = load_tokenizers()
def list_unique(_list):
"""Returns a _list with duplicates removed."""
_dict = {}
for e in _list:
_dict[e] = 1
return _dict.keys()
# safety function for killing slow DB threads:
def kill_sleepy_mysql_threads(max_threads=CFG_MAX_MYSQL_THREADS,
thread_timeout=CFG_MYSQL_THREAD_TIMEOUT):
"""Check the number of DB threads and if there are more than
MAX_THREADS of them, lill all threads that are in a sleeping
state for more than THREAD_TIMEOUT seconds. (This is useful
for working around the the max_connection problem that appears
during indexation in some not-yet-understood cases.) If some
threads are to be killed, write info into the log file.
"""
res = run_sql("SHOW FULL PROCESSLIST")
if len(res) > max_threads:
for row in res:
r_id, dummy, dummy, dummy, r_command, r_time, dummy, dummy = row
if r_command == "Sleep" and int(r_time) > thread_timeout:
run_sql("KILL %s", (r_id, ))
write_message("WARNING: too many DB threads, " + \
"killing thread %s" % r_id, verbose=1)
return
def get_associated_subfield_value(recID, tag, value, associated_subfield_code):
"""Return list of ASSOCIATED_SUBFIELD_CODE, if exists, for record
RECID and TAG of value VALUE. Used by fulltext indexer only.
Note: TAG must be 6 characters long (tag+ind1+ind2+sfcode),
otherwise en empty string is returned.
FIXME: what if many tag values have the same value but different
associated_subfield_code? Better use bibrecord library for this.
"""
out = ""
if len(tag) != 6:
return out
bibXXx = "bib" + tag[0] + tag[1] + "x"
bibrec_bibXXx = "bibrec_" + bibXXx
query = """SELECT bb.field_number, b.tag, b.value FROM %s AS b, %s AS bb
WHERE bb.id_bibrec=%%s AND bb.id_bibxxx=b.id AND tag LIKE
%%s%%""" % (bibXXx, bibrec_bibXXx)
res = run_sql(query, (recID, tag[:-1]))
field_number = -1
for row in res:
if row[1] == tag and row[2] == value:
field_number = row[0]
if field_number > 0:
for row in res:
if row[0] == field_number and row[1] == tag[:-1] + associated_subfield_code:
out = row[2]
break
return out
def get_author_canonical_ids_for_recid(recID):
"""
Return list of author canonical IDs (e.g. `J.Ellis.1') for the
given record. Done by consulting BibAuthorID module.
"""
return []
def swap_temporary_reindex_tables(index_id, reindex_prefix="tmp_"):
"""Atomically swap reindexed temporary table with the original one.
Delete the now-old one."""
write_message("Putting new tmp index tables " + \
"for id %s into production" % index_id)
run_sql(
"RENAME TABLE " +
"idxWORD%02dR TO old_idxWORD%02dR," % (index_id, index_id) +
"%sidxWORD%02dR TO idxWORD%02dR," % (reindex_prefix, index_id, index_id) +
"idxWORD%02dF TO old_idxWORD%02dF," % (index_id, index_id) +
"%sidxWORD%02dF TO idxWORD%02dF," % (reindex_prefix, index_id, index_id) +
"idxPAIR%02dR TO old_idxPAIR%02dR," % (index_id, index_id) +
"%sidxPAIR%02dR TO idxPAIR%02dR," % (reindex_prefix, index_id, index_id) +
"idxPAIR%02dF TO old_idxPAIR%02dF," % (index_id, index_id) +
"%sidxPAIR%02dF TO idxPAIR%02dF," % (reindex_prefix, index_id, index_id) +
"idxPHRASE%02dR TO old_idxPHRASE%02dR," % (index_id, index_id) +
"%sidxPHRASE%02dR TO idxPHRASE%02dR," % (reindex_prefix, index_id, index_id) +
"idxPHRASE%02dF TO old_idxPHRASE%02dF," % (index_id, index_id) +
"%sidxPHRASE%02dF TO idxPHRASE%02dF;" % (reindex_prefix, index_id, index_id)
)
write_message("Dropping old index tables for id %s" % index_id)
run_sql_drop_silently("""DROP TABLE old_idxWORD%02dR,
old_idxWORD%02dF,
old_idxPAIR%02dR,
old_idxPAIR%02dF,
old_idxPHRASE%02dR,
old_idxPHRASE%02dF""" % ((index_id, )* 6)
) # kwalitee: disable=sql
def init_temporary_reindex_tables(index_id, reindex_prefix="tmp_"):
"""Create reindexing temporary tables."""
write_message("Creating new tmp index tables for id %s" % index_id)
query = """DROP TABLE IF EXISTS %sidxWORD%02dF""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxWORD%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(50) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxWORD%02dR""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxWORD%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPAIR%02dF""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPAIR%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(100) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPAIR%02dR""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPAIR%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPHRASE%02dF""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPHRASE%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term text default NULL,
hitlist longblob,
PRIMARY KEY (id),
KEY term (term(50))
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
query = """DROP TABLE IF EXISTS %sidxPHRASE%02dR""" % \
(wash_table_column_name(reindex_prefix), index_id)
run_sql_drop_silently(query) # kwalitee: disable=sql
run_sql("""CREATE TABLE %sidxPHRASE%02dR (
id_bibrec mediumint(9) unsigned NOT NULL default '0',
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
def remove_subfields(s):
"Removes subfields from string, e.g. 'foo $$c bar' becomes 'foo bar'."
return re_subfields.sub(' ', s)
def get_field_indexes(field):
"""Returns indexes names and ids corresponding to the given field"""
if recognize_marc_tag(field):
#field is actually a tag
return get_marc_tag_indexes(field, virtual=False)
else:
return get_nonmarc_tag_indexes(field, virtual=False)
get_field_indexes_memoised = Memoise(get_field_indexes)
def get_index_tokenizer(index_id):
"""Returns value of a tokenizer field from idxINDEX database table
@param index_id: id of the index
"""
query = "SELECT tokenizer FROM idxINDEX WHERE id=%s" % index_id
out = None
try:
res = run_sql(query)
if res:
out = _TOKENIZERS[res[0][0]]
except DatabaseError:
write_message("Exception caught for SQL statement: %s; " + \
"column tokenizer might not exist" % query, sys.stderr)
except KeyError:
write_message("Exception caught: there is no such tokenizer")
out = None
return out
def detect_tokenizer_type(tokenizer):
"""
Checks what is the main type of the tokenizer.
For more information on tokenizer types take
a look at BibIndexTokenizer class.
@param tokenizer: instance of a tokenizer
"""
from invenio.modules.indexer.tokenizers.BibIndexStringTokenizer import BibIndexStringTokenizer
from invenio.modules.indexer.tokenizers.BibIndexRecJsonTokenizer import BibIndexRecJsonTokenizer
from invenio.modules.indexer.tokenizers.BibIndexMultiFieldTokenizer import BibIndexMultiFieldTokenizer
tokenizer_inheritance_tree = inspect.getmro(tokenizer.__class__)
if BibIndexStringTokenizer in tokenizer_inheritance_tree:
return CFG_BIBINDEX_TOKENIZER_TYPE['string']
if BibIndexMultiFieldTokenizer in tokenizer_inheritance_tree:
return CFG_BIBINDEX_TOKENIZER_TYPE['multifield']
if BibIndexRecJsonTokenizer in tokenizer_inheritance_tree:
return CFG_BIBINDEX_TOKENIZER_TYPE['recjson']
return CFG_BIBINDEX_TOKENIZER_TYPE['unknown']
def get_last_updated_all_indexes():
"""Returns last modification date for all defined indexes"""
query= """SELECT name, last_updated FROM idxINDEX"""
res = run_sql(query)
return res
def split_ranges(parse_string):
"""Parse a string a return the list or ranges."""
recIDs = []
ranges = parse_string.split(",")
for arange in ranges:
tmp_recIDs = arange.split("-")
if len(tmp_recIDs) == 1:
recIDs.append([int(tmp_recIDs[0]), int(tmp_recIDs[0])])
else:
if int(tmp_recIDs[0]) > int(tmp_recIDs[1]): # sanity check
tmp = tmp_recIDs[0]
tmp_recIDs[0] = tmp_recIDs[1]
tmp_recIDs[1] = tmp
recIDs.append([int(tmp_recIDs[0]), int(tmp_recIDs[1])])
return recIDs
def get_word_tables(tables):
""" Given a list of table names it return a list of tuples
(index_id, index_name, index_tags).
"""
wordTables = []
if tables:
for index in tables:
index_id = get_index_id_from_index_name(index)
if index_id:
wordTables.append((index_id, index, get_index_tags(index)))
else:
write_message("Error: There is no %s words table." % \
index, sys.stderr)
return wordTables
def get_date_range(var):
"Returns the two dates contained as a low,high tuple"
limits = var.split(",")
if len(limits) == 1:
low = get_datetime(limits[0])
return low, None
if len(limits) == 2:
low = get_datetime(limits[0])
high = get_datetime(limits[1])
return low, high
return None, None
def create_range_list(res):
"""Creates a range list from a recID select query result contained
in res. The result is expected to have ascending numerical order."""
if not res:
return []
row = res[0]
if not row:
return []
else:
range_list = [[row, row]]
for row in res[1:]:
row_id = row
if row_id == range_list[-1][1] + 1:
range_list[-1][1] = row_id
else:
range_list.append([row_id, row_id])
return range_list
def beautify_range_list(range_list):
"""Returns a non overlapping, maximal range list"""
ret_list = []
for new in range_list:
found = 0
for old in ret_list:
if new[0] <= old[0] <= new[1] + 1 or new[0] - 1 <= old[1] <= new[1]:
old[0] = min(old[0], new[0])
old[1] = max(old[1], new[1])
found = 1
break
if not found:
ret_list.append(new)
return ret_list
def truncate_index_table(index_name):
"""Properly truncate the given index."""
index_id = get_index_id_from_index_name(index_name)
if index_id:
write_message('Truncating %s index table in order to reindex.' % \
index_name, verbose=2)
run_sql("""UPDATE idxINDEX SET last_updated='0000-00-00 00:00:00'
WHERE id=%s""", (index_id, ))
run_sql("TRUNCATE idxWORD%02dF" % index_id) # kwalitee: disable=sql
run_sql("TRUNCATE idxWORD%02dR" % index_id) # kwalitee: disable=sql
run_sql("TRUNCATE idxPHRASE%02dF" % index_id) # kwalitee: disable=sql
run_sql("TRUNCATE idxPHRASE%02dR" % index_id) # kwalitee: disable=sql
def update_index_last_updated(indexes, starting_time=None):
"""Update last_updated column of the index table in the database.
Puts starting time there so that if the task
was interrupted for record download,
the records will be reindexed next time.
@param indexes: list of indexes names
"""
if starting_time is None:
return None
for index_name in indexes:
write_message("updating last_updated to %s...for %s index" % \
(starting_time, index_name), verbose=9)
run_sql("UPDATE idxINDEX SET last_updated=%s WHERE name=%s",
(starting_time, index_name))
def get_percentage_completed(num_done, num_total):
""" Return a string containing the approx. percentage completed """
percentage_remaining = 100.0 * float(num_done) / float(num_total)
if percentage_remaining:
percentage_display = "(%.1f%%)" % (percentage_remaining, )
else:
percentage_display = ""
return percentage_display
def _fill_dict_of_indexes_with_empty_sets():
"""find_affected_records internal function.
Creates dict: {'index_name1':set([]), ...}
"""
index_dict = {}
tmp_all_indexes = get_all_indexes(virtual=False)
for index in tmp_all_indexes:
index_dict[index] = set([])
return index_dict
def find_affected_records_for_index(indexes=None, recIDs=None, force_all_indexes=False):
"""
Function checks which records need to be changed/reindexed
for given index/indexes.
Makes use of hstRECORD table where
different revisions of record are kept.
If parameter force_all_indexes is set
function will assign all recIDs to all indexes.
@param indexes: names of indexes for reindexation separated by coma
@param recIDs: recIDs for reindexation in form:
[[range1_down, range1_up],[range2_down, range2_up]..]
@param force_all_indexes: should we index all indexes?
"""
if indexes is None:
indexes = []
if recIDs is None:
recIDs = []
tmp_dates = dict(get_last_updated_all_indexes())
modification_dates = dict([(date, tmp_dates[date] or datetime(1000, 1, 1, 1, 1, 1))
for date in tmp_dates])
tmp_all_indexes = get_all_indexes(virtual=False)
indexes = remove_inexistent_indexes(indexes, leave_virtual=False)
if not indexes:
return {}
def _should_reindex_for_revision(index_name, revision_date):
try:
if modification_dates[index_name] < revision_date and \
index_name in indexes:
return True
return False
except KeyError:
return False
if force_all_indexes:
records_for_indexes = {}
all_recIDs = []
for recIDs_range in recIDs:
all_recIDs.extend(range(recIDs_range[0], recIDs_range[1]+1))
for index in indexes:
records_for_indexes[index] = all_recIDs
return records_for_indexes
min_last_updated = get_min_last_updated(indexes)[0][0] or \
datetime(1000, 1, 1, 1, 1, 1)
recIDs_info = []
for recIDs_range in recIDs:
# firstly, determine which records were updated since min_last_updated:
query = """SELECT id_bibrec,job_date,affected_fields FROM hstRECORD
WHERE id_bibrec BETWEEN %s AND %s AND
job_date > '%s'""" % \
(recIDs_range[0], recIDs_range[1], min_last_updated)
res = run_sql(query)
if res:
recIDs_info.extend(res)
# secondly, there may be newly inserted records which were
# uploaded with old timestamp (via 005), so let us detect
# those too, using their "real" modification_date:
res = run_sql("""SELECT bibrec.id,modification_date,''
FROM bibrec, hstRECORD
WHERE modification_date>%s
AND bibrec.id=id_bibrec
AND (SELECT COUNT(*) FROM hstRECORD WHERE id_bibrec=bibrec.id)=1""", (min_last_updated,))
if res:
recIDs_info.extend(res)
indexes_to_change = _fill_dict_of_indexes_with_empty_sets()
for recID_info in recIDs_info:
recID, revision, affected_fields = recID_info
affected_fields = affected_fields.split(",")
indexes_for_recID = set()
for field in affected_fields:
if field:
field_indexes = get_field_indexes_memoised(field) or []
indexes_names = set([idx[1] for idx in field_indexes])
indexes_for_recID |= indexes_names
else:
# record was inserted, all fields were changed,
# no specific affected fields
indexes_for_recID |= set(tmp_all_indexes)
indexes_for_recID_filtered = [ind for ind in indexes_for_recID if _should_reindex_for_revision(ind, revision)]
for index in indexes_for_recID_filtered:
indexes_to_change[index].add(recID)
indexes_to_change = dict((k, list(sorted(v))) for k, v in iteritems(indexes_to_change) if v)
return indexes_to_change
def chunk_generator(rng):
"""
Splits one range into several smaller ones
with respect to global chunksize variable.
@param rng: range of records
@type rng: list in the form: [1, 2000]
"""
global chunksize
current_low = rng[0]
current_high = rng[0]
if rng[0] == None or rng[1] == None:
raise StopIteration
if rng[1] - rng[0] + 1 <= chunksize:
yield rng
else:
while current_high - 1 < rng[1]:
current_high += chunksize
yield current_low, min(current_high - 1, rng[1])
current_low += chunksize
class AbstractIndexTable(object):
"""
This class represents an index table in database.
An index consists of three different kinds of tables:
table which stores only words in db,
table which stores pairs of words and
table which stores whole phrases.
The class represents only one table. Another instance of
the class must be created in order to store different
type of terms.
This class is an abstract class. It contains methods
to connect to db and methods which facilitate
inserting/modifing/removing terms from it. The class
also contains methods which help managing the memory.
All specific methods for indexing can be found in corresponding
classes for virtual and regular indexes.
"""
def __init__(self, index_name, table_type, table_prefix="", wash_index_terms=50):
self.index_name = index_name
self.index_id = get_index_id_from_index_name(index_name)
self.table_type = table_type
self.wash_index_terms = wash_index_terms
self.table_name = wash_table_column_name(table_prefix + \
"idx" + \
table_type + \
("%02d" % self.index_id) + "F")
self.table_prefix = table_prefix
self.value = {} # cache
self.recIDs_in_mem = []
def put_into_db(self, mode="normal"):
"""Updates the current words table in the corresponding DB
idxFOO table. Mode 'normal' means normal execution,
mode 'emergency' means words index reverting to old state.
"""
write_message("%s %s wordtable flush started" % \
(self.table_name, mode))
write_message('...updating %d words into %s started' % \
(len(self.value), self.table_name))
task_update_progress("(%s:%s) flushed %d/%d words" % \
(self.table_name, self.index_name, 0, len(self.value)))
self.recIDs_in_mem = beautify_range_list(self.recIDs_in_mem)
tab_name = self.table_name[:-1] + "R"
if mode == "normal":
for group in self.recIDs_in_mem:
query = """UPDATE %s SET type='TEMPORARY' WHERE id_bibrec
BETWEEN %%s AND %%s AND type='CURRENT'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
nb_words_total = len(self.value)
nb_words_report = int(nb_words_total / 10.0)
nb_words_done = 0
for word in self.value.keys():
self.put_word_into_db(word)
nb_words_done += 1
if nb_words_report != 0 and ((nb_words_done % nb_words_report) == 0):
write_message('......processed %d/%d words' % \
(nb_words_done, nb_words_total))
percentage_display = get_percentage_completed(nb_words_done, nb_words_total)
task_update_progress("(%s:%s) flushed %d/%d words %s" % \
(tab_name, self.index_name,
nb_words_done, nb_words_total,
percentage_display))
write_message('...updating %d words into %s ended' % \
(nb_words_total, tab_name))
write_message('...updating reverse table %s started' % tab_name)
if mode == "normal":
for group in self.recIDs_in_mem:
query = """UPDATE %s SET type='CURRENT' WHERE id_bibrec
BETWEEN %%s AND %%s AND type='FUTURE'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
query = """DELETE FROM %s WHERE id_bibrec
BETWEEN %%s AND %%s AND type='TEMPORARY'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
write_message('End of updating wordTable into %s' % \
tab_name, verbose=9)
elif mode == "emergency":
for group in self.recIDs_in_mem:
query = """UPDATE %s SET type='CURRENT' WHERE id_bibrec
BETWEEN %%s AND %%s AND type='TEMPORARY'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
query = """DELETE FROM %s WHERE id_bibrec
BETWEEN %%s AND %%s AND type='FUTURE'""" % tab_name
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
write_message('End of emergency flushing wordTable into %s' % \
tab_name, verbose=9)
write_message('...updating reverse table %s ended' % tab_name)
self.clean()
self.recIDs_in_mem = []
write_message("%s %s wordtable flush ended" % \
(self.table_name, mode))
task_update_progress("(%s:%s) flush ended" % \
(self.table_name, self.index_name))
def put_word_into_db(self, word):
"""Flush a single word to the database and delete it from memory"""
set = self.load_old_recIDs(word)
if set is not None: # merge the word recIDs found in memory:
hitlist_was_changed = self.merge_with_old_recIDs(word, set)
if not hitlist_was_changed:
# nothing to update:
write_message("......... unchanged hitlist for ``%s''" % \
word, verbose=9)
else:
# yes there were some new words:
write_message("......... updating hitlist for ``%s''" % \
word, verbose=9)
run_sql("UPDATE %s SET hitlist=%%s WHERE term=%%s" % wash_table_column_name(self.table_name), (set.fastdump(), word)) # kwalitee: disable=sql
else: # the word is new, will create new set:
write_message("......... inserting hitlist for ``%s''" % \
word, verbose=9)
set = intbitset(self.value[word].keys())
try:
run_sql("INSERT INTO %s (term, hitlist) VALUES (%%s, %%s)" % wash_table_column_name(self.table_name), (word, set.fastdump())) # kwalitee: disable=sql
except Exception, e:
## We send this exception to the admin only when is not
## already reparing the problem.
register_exception(prefix="Error when putting the term '%s' into db (hitlist=%s): %s\n" % (repr(word), set, e), alert_admin=(task_get_option('cmd') != 'repair'))
if not set: # never store empty words
run_sql("DELETE FROM %s WHERE term=%%s" % wash_table_column_name(self.table_name), (word,)) # kwalitee: disable=sql
def put(self, recID, word, sign):
"""Keeps track of changes done during indexing
and stores these changes in memory for further use.
Indexing process needs this information later while
filling in the database.
@param recID: recID of the record we want to update in memory
@param word: word we want to update
@param sing: sign of the word, 1 means keep this word in database,
-1 remove word from database
"""
value = self.value
try:
if self.wash_index_terms:
word = wash_index_term(word, self.wash_index_terms)
if word in value:
# the word 'word' exist already: update sign
value[word][recID] = sign
else:
value[word] = {recID: sign}
except Exception as e:
write_message("Error: Cannot put word %s with sign %d for recID %s." % \
(word, sign, recID))
def load_old_recIDs(self, word):
"""Load existing hitlist for the word from the database index files."""
query = "SELECT hitlist FROM %s WHERE term=%%s" % self.table_name
res = run_sql(query, (word, ))
if res:
return intbitset(res[0][0])
else:
return None
def merge_with_old_recIDs(self, word, set):
"""Merge the system numbers stored in memory
(hash of recIDs with value +1 or -1 according
to whether to add/delete them) with those stored
in the database index and received in set universe
of recIDs for the given word.
Return False in case no change was done to SET, return True in case SET
was changed.
"""
oldset = intbitset(set)
set.update_with_signs(self.value[word])
return set != oldset
def clean(self):
"Cleans the cache."
self.value = {}
class VirtualIndexTable(AbstractIndexTable):
"""
There are two types of indexes: virtual and regular/normal.
Check WordTable class for more on normal indexes.
This class represents a single index table for virtual index
(see also: AbstractIndexTable).
Virtual index doesn't store its own terms,
it accumulates terms from other indexes.
Good example of virtual index is the global index which stores
terms from title, abstract, keyword, author and so on.
This class contains methods for indexing virtual indexes.
See also: run_update()
"""
def __init__(self, index_name, table_type, table_prefix="", wash_index_terms=50):
"""
Creates VirtualIndexTable instance.
@param index_name: name of the index we want to reindex
@param table_type: words, pairs or phrases
@param table_prefix: add "tmp_" if you want to
reindex to temporary table
"""
AbstractIndexTable.__init__(self, index_name,
table_type,
table_prefix,
wash_index_terms)
self.mode = "normal"
self.dependent_indexes = dict(get_virtual_index_building_blocks(self.index_id))
def set_reindex_mode(self):
"""
Sets reindex mode. VirtualIndexTable will
remove all its content from database and
use insert_index function to repopulate it.
"""
self.mode = "reindex"
def run_update(self, flush=10000):
"""
Function starts all updating processes for virtual index.
It will take all information about pending changes from database
from queue tables (idxWORD/PAIR/PHRASExxQ), process them
and trigger appropriate indexing functions.
@param flush: how many records we will put in one go
into database (at most);
see also: opt_flush in WordTable class
"""
global chunksize
if self.mode == "reindex":
self.clean_database()
for index_id, index_name in self.dependent_indexes.iteritems():
rng = get_records_range_for_index(index_id)
flush_count = 0
if not rng:
continue
write_message('Virtual index: %s is being reindexed for %s index' % \
(self.index_name, index_name))
chunks = chunk_generator(rng)
try:
while True:
task_sleep_now_if_required()
chunk = chunks.next()
self.insert_index(index_id, chunk[0], chunk[1])
flush_count = flush_count + chunk[1] - chunk[0] + 1
self.recIDs_in_mem.append(list(chunk))
if flush_count >= flush:
flush_count = 0
self.put_into_db()
except StopIteration:
if flush_count > 0:
self.put_into_db()
self.clean_queue_table(index_name)
else:
for index_id, index_name in self.dependent_indexes.iteritems():
query = """SELECT id_bibrec_low, id_bibrec_high, mode FROM %s
WHERE index_name=%%s
ORDER BY runtime ASC""" % \
(self.table_name[:-1] + "Q")
entries = self.remove_duplicates(run_sql(query, (index_name, )))
if entries:
write_message('Virtual index: %s is being updated for %s index' % \
(self.index_name, index_name))
for entry in entries:
operation = None
recID_low, recID_high, mode = entry
if mode == CFG_BIBINDEX_UPDATE_MODE["Update"]:
operation = self.update_index
elif mode == CFG_BIBINDEX_UPDATE_MODE["Remove"]:
operation = self.remove_index
elif mode == CFG_BIBINDEX_UPDATE_MODE["Insert"]:
operation = self.insert_index
flush_count = 0
chunks = chunk_generator([recID_low, recID_high])
try:
while True:
task_sleep_now_if_required()
chunk = chunks.next()
operation(index_id, chunk[0], chunk[1])
flush_count = flush_count + chunk[1] - chunk[0] + 1
self.recIDs_in_mem.append(list(chunk))
if flush_count >= flush:
flush_count = 0
self.put_into_db()
except StopIteration:
if flush_count > 0:
self.put_into_db()
self.clean_queue_table(index_name)
def retrieve_new_values_from_index(self, index_id, records_range):
"""
Retrieves new values from dependent index
for specific range of records.
@param index_id: id of the dependent index
@param records_range: the smallest and the biggest id
in the range: [id_low, id_high]
"""
tab_name = "idx" + self.table_type + ("%02d" % index_id) + "R"
query = """SELECT id_bibrec, termlist FROM %s WHERE id_bibrec
BETWEEN %%s AND %%s""" % tab_name
new_regular_values = run_sql(query, (records_range[0], records_range[1]))
if new_regular_values:
zipped = zip(*new_regular_values)
new_regular_values = dict(zip(zipped[0], map(deserialize_via_marshal, zipped[1])))
else:
new_regular_values = dict()
return new_regular_values
def retrieve_old_values(self, records_range):
"""
Retrieves old values from database for this virtual index
for specific records range.
@param records_range: the smallest and the biggest id
in the range: [id_low, id_high]
"""
virtual_tab_name = self.table_name[:-1] + "R"
query = """SELECT id_bibrec, termlist FROM %s
WHERE type='CURRENT' AND
id_bibrec BETWEEN %%s AND %%s""" % virtual_tab_name
old_virtual_values = run_sql(query, (records_range[0], records_range[1]))
if old_virtual_values:
zipped = zip(*old_virtual_values)
old_virtual_values = dict(zip(zipped[0], map(deserialize_via_marshal, zipped[1])))
else:
old_virtual_values = dict()
return old_virtual_values
def update_index(self, index_id, recID_low, recID_high):
"""
Updates the state of virtual index for records in range:
recID_low, recID_high for index specified by index_id.
Function stores terms in idxWORD/PAIR/PHRASExxR tables with
prefixes for specific index, for example term 'ellis'
from author index will be stored in reversed table as:
'__author__ellis'. It allows fast operations on only part of terms
@param index_id: id of the dependent index we want to remove
@param recID_low: first recID from the range of considered recIDs
@param recID_high: last recID from the range of considered recIDs
"""
index_name = self.dependent_indexes[index_id]
update_cache_for_record = self.update_cache_for_record
virtual_tab_name = self.table_name[:-1] + "R"
# take new values
new_regular_values = self.retrieve_new_values_from_index(index_id, [recID_low, recID_high])
# take old values
old_virtual_values = self.retrieve_old_values([recID_low, recID_high])
# update reversed table
for recID in xrange(recID_low, recID_high + 1):
new_values = new_regular_values.get(recID) or []
old_values = old_virtual_values.get(recID) or []
to_serialize = update_cache_for_record(index_name, recID, old_values, new_values)
if len(to_serialize) == 0:
continue
run_sql("""INSERT INTO %s (id_bibrec,termlist,type)
VALUES (%%s,%%s,'FUTURE')""" % \
wash_table_column_name(virtual_tab_name),
(recID, serialize_via_marshal(to_serialize))) # kwalitee: disable=sql
try:
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
pass
def insert_index(self, index_id, recID_low, recID_high):
"""
Inserts terms from dependent index to virtual table
without looking what's inside the virtual table and
what terms are being added. It's faster than 'updating',
but it can only be used when virtual table is free of
terms from this dependent index.
@param index_id: id of the dependent index we want to remove
@param recID_low: first recID from the range of considered recIDs
@param recID_high: last recID from the range of considered recIDs
"""
index_name = self.dependent_indexes[index_id]
insert_to_cache_for_record = self.insert_to_cache_for_record
virtual_tab_name = self.table_name[:-1] + "R"
# take new values
new_regular_values = self.retrieve_new_values_from_index(index_id, [recID_low, recID_high])
# take old values
old_virtual_values = self.retrieve_old_values([recID_low, recID_high])
# update reversed table
for recID in xrange(recID_low, recID_high + 1):
new_values = new_regular_values.get(recID) or []
old_values = old_virtual_values.get(recID) or []
to_serialize = insert_to_cache_for_record(index_name, recID, old_values, new_values)
if len(to_serialize) == 0:
continue
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'FUTURE')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal(to_serialize))) # kwalitee: disable=sql
try:
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
pass
def remove_index(self, index_id, recID_low, recID_high):
"""
Removes words found in dependent index from reversed
table of virtual index. Updates the state of the memory
(for future removal from forward table).
Takes into account that given words can be found in more
that one dependent index and it won't mark these words
for the removal process.
@param index_id: id of the dependent index we want to remove
@param recID_low: first recID from the range of considered recIDs
@param recID_high: last recID from the range of considered recIDs
"""
index_name = self.dependent_indexes[index_id]
remove_from_cache_for_record = self.remove_from_cache_for_record
virtual_tab_name = self.table_name[:-1] + "R"
# take old values
old_virtual_values = self.retrieve_old_values([recID_low, recID_high])
# update reversed table
for recID in xrange(recID_low, recID_high + 1):
old_values = old_virtual_values.get(recID) or []
to_serialize = remove_from_cache_for_record(index_name, recID, old_values)
if len(to_serialize) == 0:
continue
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'FUTURE')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal(to_serialize))) # kwalitee: disable=sql
try:
run_sql("INSERT INTO %s (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(virtual_tab_name), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
pass
def update_cache_for_record(self, index_name, recID, old_values, new_values):
"""
Updates memory (cache) with information on what to
remove/add/modify in forward table for specified record.
It also returns new terms which should be indexed for given record.
@param index_name: index name of dependent index
@param recID: considered record
@param old_values: all old values from all dependent indexes
for this virtual index for recID
@param new_values: new values from some dependent index
which should be added
"""
prefix = make_prefix(index_name)
put = self.put
new_values_prefix = [prefix + term for term in new_values]
part_values = []
tmp_old_values_prefix = []
# split old values from v.index into those with 'prefix' and those without
for term in old_values:
if term.startswith(prefix):
term_without_prefix = re.sub(re_prefix, '', term)
part_values.append(term_without_prefix)
put(recID, term_without_prefix, -1)
else:
tmp_old_values_prefix.append(term)
# remember not to remove words that occur more than once
part_values = set(part_values)
for value in tmp_old_values_prefix:
term_without_prefix = re.sub(re_prefix, '', value)
if term_without_prefix in part_values:
put(recID, term_without_prefix, 1)
for term_without_prefix in new_values:
put(recID, term_without_prefix, 1)
tmp_new_values_prefix = list(tmp_old_values_prefix)
tmp_new_values_prefix.extend(new_values_prefix)
return tmp_new_values_prefix
def insert_to_cache_for_record(self, index_name, recID, old_values, new_values):
"""
Updates cache with terms which should be inserted to database.
Used in insert_index function. See also: update_cache_for_record
which is analogous for update_index function.
"""
prefix = make_prefix(index_name)
append = old_values.append
put = self.put
for term in new_values:
append(prefix + term)
put(recID, term, 1)
return old_values
def remove_from_cache_for_record(self, index_name, recID, old_values):
"""
Updates information in cache with terms which should be removed
from virtual table. Used in remove_index function.
"""
prefix = make_prefix(index_name)
tmp_rest = []
tmp_removed = []
tmp_new_values = []
append_to_new = tmp_new_values.append
append_to_rest = tmp_rest.append
append_to_removed = tmp_removed.append
put = self.put
for term in old_values:
if term.startswith(prefix):
term_without_prefix = re.sub(re_prefix, '', term)
append_to_removed(term_without_prefix)
put(recID, term_without_prefix, -1)
else:
append_to_rest(re.sub(re_prefix, '', term))
append_to_new(term)
to_remember = set(tmp_rest) & set(tmp_removed)
for term_without_prefix in to_remember:
put(recID, term_without_prefix, 1)
return tmp_new_values
def clean_database(self):
"""Removes all entries from corresponding tables in database"""
query = """DELETE FROM %s""" % self.table_name
run_sql(query)
query = """DELETE FROM %s""" % self.table_name[:-1] + "R"
run_sql(query)
def clean_queue_table(self, index_name):
"""
Cleans queue table (i.e. idxWORD/PAIR/PHRASExxQ)
for specific index. It means that function will remove
all entries from db from queue table for this index.
"""
query = "DELETE FROM %s WHERE index_name='%s'" % \
(self.table_name[:-1].lstrip(self.table_prefix) + "Q",
index_name)
run_sql(query)
def remove_duplicates(self, entries):
"""
Removes duplicates from a list of entries (taken from Queue table)
in order to process a single command only once.
Queue table may look like this:
id (..) id_bibrec_low id_bibrec_high index_name mode
...
12 1 100 title update
13 1 100 title update
We don't want to perform the same operation twice. First we want to
squash the same commands into one.
@param entries: list of entries taken from the database
"""
unique = set()
return [entry for entry in entries if entry not in unique and not unique.add(entry)]
def remove_dependent_index(self, index_name):
"""
Removes dependent index from this virtual index.
It means removing all words from all records with prefix:
__index_name__ from reversed table, and removing some of
them from forward table if they don't appear in another
dependent index.
@param index_name: name of the dependent index to remove
"""
flush = 10000
dependent = self.dependent_indexes.values()
if len(dependent) == 0:
write_message("Specified index is not virtual...")
return
if index_name not in dependent:
write_message("Dependent index already removed...")
return
index_id = get_index_id_from_index_name(index_name)
records_range = get_records_range_for_index(index_id)
write_message("Removing an index: %s" % index_name)
if records_range:
flush_count = 0
chunks = chunk_generator([records_range[0], records_range[1]])
try:
while True:
task_sleep_now_if_required()
chunk = chunks.next()
self.remove_index(index_id, chunk[0], chunk[1])
flush_count = flush_count + chunk[1] - chunk[0] + 1
self.recIDs_in_mem.append(chunk)
if flush_count >= flush:
flush_count = 0
self.put_into_db()
except StopIteration:
if flush_count > 0:
self.put_into_db()
class WordTable(AbstractIndexTable):
"""
This class represents a single index table of regular index
(regular means it doesn't accumulates data from other indexes,
but it takes data directly from metadata of records which
are being indexed; for other type of index check: VirtualIndexTable).
To start indexing process one need to invoke add_recIDs() method.
For furher reading see description of this method.
"""
def __init__(self, index_name, table_type, table_prefix="", wash_index_terms=50):
"""Creates words table instance.
@param index_name: the index name
@param index_id: the index integer identificator
@param fields_to_index: a list of fields to index
@param table_type: type of the wordtable: Words, Pairs, Phrases
@param table_prefix: prefix for table name, indexing will be performed
on table: <<table_prefix>>idx<<wordtable_type>>XXF
@param wash_index_terms: do we wash index terms, and if yes (when >0),
how many characters do we keep in the index terms; see
max_char_length parameter of wash_index_term()
"""
AbstractIndexTable.__init__(self, index_name, table_type, table_prefix, wash_index_terms)
self.tags = get_index_tags(index_name, virtual=False)
self.nonmarc_tags = get_index_tags(index_name,
virtual=False,
tagtype="nonmarc")
self.timestamp = datetime.now()
self.virtual_indexes = get_index_virtual_indexes(self.index_id)
self.virtual_index_update_mode = CFG_BIBINDEX_UPDATE_MODE["Update"]
try:
self.stemming_language = get_index_stemming_language(self.index_id)
except KeyError:
self.stemming_language = ''
self.remove_stopwords = get_index_remove_stopwords(self.index_id)
self.remove_html_markup = get_index_remove_html_markup(self.index_id)
self.remove_latex_markup = get_index_remove_latex_markup(self.index_id)
self.tokenizer = get_index_tokenizer(self.index_id)(self.stemming_language,
self.remove_stopwords,
self.remove_html_markup,
self.remove_latex_markup)
self.tokenizer_type = detect_tokenizer_type(self.tokenizer)
self.default_tokenizer_function = self.tokenizer.get_tokenizing_function(table_type)
self.special_tags = self._handle_special_tags()
if self.stemming_language and self.table_name.startswith('idxWORD'):
write_message('%s has stemming enabled, language %s' % (self.table_name, self.stemming_language))
def _handle_special_tags(self):
"""
Fills in a dict with special tags which
always use the same tokenizer and this
tokenizer is independent of index.
"""
special_tags = {}
fields = self.tags + self.nonmarc_tags
for tag in fields:
if tag in CFG_BIBINDEX_SPECIAL_TAGS:
for t in CFG_BIBINDEX_INDEX_TABLE_TYPE:
if self.table_type == CFG_BIBINDEX_INDEX_TABLE_TYPE[t]:
tokenizer_name = CFG_BIBINDEX_SPECIAL_TAGS[tag][t]
tokenizer = _TOKENIZERS[tokenizer_name]
instance = tokenizer(self.stemming_language,
self.remove_stopwords,
self.remove_html_markup,
self.remove_latex_markup)
special_tags[tag] = instance.get_tokenizing_function(self.table_type)
break
return special_tags
def turn_off_virtual_indexes(self):
"""
Prevents from reindexing related virtual indexes.
"""
self.virtual_indexes = []
def turn_on_virtual_indexes(self):
"""
Turns on indexing related virtual indexes.
"""
self.virtual_indexes = get_index_virtual_indexes(self.index_id)
def get_field(self, recID, tag):
"""Returns list of values of the MARC-21 'tag' fields for the
record 'recID'."""
out = []
bibXXx = "bib" + tag[0] + tag[1] + "x"
bibrec_bibXXx = "bibrec_" + bibXXx
query = """SELECT value FROM %s AS b, %s AS bb
WHERE bb.id_bibrec=%%s AND bb.id_bibxxx=b.id
AND tag LIKE %%s""" % (bibXXx, bibrec_bibXXx)
res = run_sql(query, (recID, tag))
for row in res:
out.append(row[0])
return out
def notify_virtual_indexes(self, recID_ranges):
"""
Informs all related virtual indexes about index change.
Function leaves information about the change for each index
in proper table in database (idxSOMETHINGxxQ).
@param recID_ranges: low and high recIDs of ranges
@type recID_ranges: list [[low_id1, high_id1], [low_id2, high_id2]...]
"""
query = """INSERT INTO %s (runtime, id_bibrec_low, id_bibrec_high, index_name, mode)
VALUES (%%s, %%s, %%s, %%s, %%s)"""
for index_id, index_name in self.virtual_indexes:
tab_name = "idx%s%02dQ" % (self.table_type, index_id)
full_query = query % tab_name
for recID_range in recID_ranges:
run_sql(full_query, (self.timestamp,
recID_range[0],
recID_range[1],
self.index_name,
self.virtual_index_update_mode))
def display(self):
"Displays the word table."
keys = self.value.keys()
keys.sort()
for k in keys:
write_message("%s: %s" % (k, self.value[k]))
def count(self):
"Returns the number of words in the table."
return len(self.value)
def info(self):
"Prints some information on the words table."
write_message("The words table contains %d words." % self.count())
def lookup_words(self, word=""):
"Lookup word from the words table."
if not word:
done = 0
while not done:
try:
word = raw_input("Enter word: ")
done = 1
except (EOFError, KeyboardInterrupt):
return
if word in self.value:
write_message("The word '%s' is found %d times." \
% (word, len(self.value[word])))
else:
write_message("The word '%s' does not exist in the word file."\
% word)
def add_recIDs(self, recIDs, opt_flush):
"""Fetches records which id in the recIDs range list and adds
them to the wordTable. The recIDs range list is of the form:
[[i1_low,i1_high],[i2_low,i2_high], ..., [iN_low,iN_high]].
"""
global chunksize, _last_word_table
flush_count = 0
records_done = 0
records_to_go = 0
for arange in recIDs:
records_to_go = records_to_go + arange[1] - arange[0] + 1
time_started = time.time() # will measure profile time
for arange in recIDs:
i_low = arange[0]
chunksize_count = 0
while i_low <= arange[1]:
task_sleep_now_if_required()
# calculate chunk group of recIDs and treat it:
i_high = min(i_low + opt_flush - flush_count - 1, arange[1])
i_high = min(i_low + chunksize - chunksize_count - 1, i_high)
try:
self.chk_recID_range(i_low, i_high)
except StandardError:
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
raise
write_message(CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR % \
(self.table_name, i_low, i_high))
if CFG_CHECK_MYSQL_THREADS:
kill_sleepy_mysql_threads()
percentage_display = get_percentage_completed(records_done, records_to_go)
task_update_progress("(%s:%s) adding recs %d-%d %s" % (self.table_name, self.index_name, i_low, i_high, percentage_display))
self.del_recID_range(i_low, i_high)
just_processed = self.add_recID_range(i_low, i_high)
flush_count = flush_count + i_high - i_low + 1
chunksize_count = chunksize_count + i_high - i_low + 1
records_done = records_done + just_processed
write_message(CFG_BIBINDEX_ADDING_RECORDS_STARTED_STR % \
(self.table_name, i_low, i_high))
if chunksize_count >= chunksize:
chunksize_count = 0
# flush if necessary:
if flush_count >= opt_flush:
self.put_into_db()
self.clean()
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
write_message("%s backing up" % (self.table_name))
flush_count = 0
self.log_progress(time_started, records_done, records_to_go)
# iterate:
i_low = i_high + 1
if flush_count > 0:
self.put_into_db()
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
self.log_progress(time_started, records_done, records_to_go)
self.notify_virtual_indexes(recIDs)
def add_recID_range(self, recID1, recID2):
"""Add records from RECID1 to RECID2."""
wlist = {}
self.recIDs_in_mem.append([recID1, recID2])
# special case of author indexes where we also add author
# canonical IDs:
if self.index_name in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor'):
for recID in range(recID1, recID2 + 1):
if recID not in wlist:
wlist[recID] = []
wlist[recID] = list_union(get_author_canonical_ids_for_recid(recID),
wlist[recID])
marc, nonmarc = self.find_nonmarc_records(recID1, recID2)
if marc:
collector = TermCollector(self.tokenizer,
self.tokenizer_type,
self.table_type,
self.tags,
[recID1, recID2])
collector.set_special_tags(self.special_tags)
wlist = collector.collect(marc, wlist)
if nonmarc:
collector = NonmarcTermCollector(self.tokenizer,
self.tokenizer_type,
self.table_type,
self.nonmarc_tags,
[recID1, recID2])
collector.set_special_tags(self.special_tags)
wlist = collector.collect(nonmarc, wlist)
# lookup index-time synonyms:
synonym_kbrs = get_all_synonym_knowledge_bases()
if self.index_name in synonym_kbrs:
if len(wlist) == 0: return 0
recIDs = wlist.keys()
for recID in recIDs:
for word in wlist[recID]:
word_synonyms = get_synonym_terms(word,
synonym_kbrs[self.index_name][0],
synonym_kbrs[self.index_name][1],
use_memoise=True)
if word_synonyms:
wlist[recID] = list_union(word_synonyms, wlist[recID])
# were there some words for these recIDs found?
recIDs = wlist.keys()
for recID in recIDs:
# was this record marked as deleted?
if "DELETED" in self.get_field(recID, "980__c"):
wlist[recID] = []
write_message("... record %d was declared deleted, removing its word list" % recID, verbose=9)
write_message("... record %d, termlist: %s" % (recID, wlist[recID]), verbose=9)
if len(wlist) == 0: return 0
# put words into reverse index table with FUTURE status:
for recID in recIDs:
run_sql("INSERT INTO %sR (id_bibrec,termlist,type) VALUES (%%s,%%s,'FUTURE')" % wash_table_column_name(self.table_name[:-1]), (recID, serialize_via_marshal(wlist[recID]))) # kwalitee: disable=sql
# ... and, for new records, enter the CURRENT status as empty:
try:
run_sql("INSERT INTO %sR (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % wash_table_column_name(self.table_name[:-1]), (recID, serialize_via_marshal([]))) # kwalitee: disable=sql
except DatabaseError:
# okay, it's an already existing record, no problem
pass
# put words into memory word list:
put = self.put
for recID in recIDs:
for w in wlist[recID]:
put(recID, w, 1)
return len(recIDs)
def find_nonmarc_records(self, recID1, recID2):
"""Divides recID range into two different tables,
first one contains only recIDs of the records that
are Marc type and the second one contains records
of nonMarc type"""
marc = range(recID1, recID2 + 1)
nonmarc = []
query = """SELECT id FROM %s WHERE master_format <> 'marc'
AND id BETWEEN %%s AND %%s""" % "bibrec"
res = run_sql(query, (recID1, recID2))
if res:
nonmarc = list(zip(*res)[0])
if len(nonmarc) == (recID2 - recID1 + 1):
nonmarc = xrange(recID1, recID2 + 1)
marc = []
else:
for recID in nonmarc:
marc.remove(recID)
else:
marc = xrange(recID1, recID2 + 1)
return [marc, nonmarc]
def log_progress(self, start, done, todo):
"""Calculate progress and store it.
start: start time,
done: records processed,
todo: total number of records"""
time_elapsed = time.time() - start
# consistency check
if time_elapsed == 0 or done > todo:
return
time_recs_per_min = done / (time_elapsed / 60.0)
write_message("%d records took %.1f seconds to complete.(%1.f recs/min)"\
% (done, time_elapsed, time_recs_per_min))
if time_recs_per_min:
write_message("Estimated runtime: %.1f minutes" % \
((todo - done) / time_recs_per_min))
def put(self, recID, word, sign):
"""Keeps track of changes done during indexing
and stores these changes in memory for further use.
Indexing process needs this information later while
filling in the database.
@param recID: recID of the record we want to update in memory
@param word: word we want to update
@param sing: sign of the word, 1 means keep this word in database,
-1 remove word from database
"""
value = self.value
try:
if self.wash_index_terms:
word = wash_index_term(word, self.wash_index_terms)
if word in self.value:
# the word 'word' exist already: update sign
value[word][recID] = sign
else:
value[word] = {recID: sign}
except:
write_message("Error: Cannot put word %s with sign %d for recID %s." % (word, sign, recID))
def del_recIDs(self, recIDs):
"""Fetches records which id in the recIDs range list and adds
them to the wordTable. The recIDs range list is of the form:
[[i1_low,i1_high],[i2_low,i2_high], ..., [iN_low,iN_high]].
"""
count = 0
for arange in recIDs:
task_sleep_now_if_required()
self.del_recID_range(arange[0], arange[1])
count = count + arange[1] - arange[0]
self.virtual_index_update_mode = CFG_BIBINDEX_UPDATE_MODE["Remove"]
self.put_into_db()
self.notify_virtual_indexes(recIDs)
if self.index_name == 'fulltext' and CFG_SOLR_URL:
solr_commit()
def del_recID_range(self, low, high):
"""Deletes records with 'recID' system number between low
and high from memory words index table."""
write_message("%s fetching existing words for records #%d-#%d started" % \
(self.table_name, low, high), verbose=3)
self.recIDs_in_mem.append([low, high])
query = """SELECT id_bibrec,termlist FROM %sR as bb WHERE bb.id_bibrec
BETWEEN %%s AND %%s""" % (self.table_name[:-1])
recID_rows = run_sql(query, (low, high))
for recID_row in recID_rows:
recID = recID_row[0]
wlist = deserialize_via_marshal(recID_row[1])
for word in wlist:
self.put(recID, word, -1)
write_message("%s fetching existing words for records #%d-#%d ended" % \
(self.table_name, low, high), verbose=3)
def check_bad_words(self):
"""
Finds bad words in reverse tables. Returns True in case of bad words.
"""
query = """SELECT 1 FROM %sR WHERE type IN ('TEMPORARY','FUTURE') LIMIT 1""" \
% (self.table_name[:-1],)
res = run_sql(query)
return bool(res)
def report_on_table_consistency(self):
"""Check reverse words index tables (e.g. idxWORD01R) for
interesting states such as 'TEMPORARY' state.
Prints small report (no of words, no of bad words).
"""
# find number of words:
query = """SELECT COUNT(1) FROM %s""" % (self.table_name)
res = run_sql(query, None, 1)
if res:
nb_words = res[0][0]
else:
nb_words = 0
# report stats:
write_message("%s contains %d words" % (self.table_name, nb_words))
# find possible bad states in reverse tables:
if self.check_bad_words():
write_message("EMERGENCY: %s needs to be repaired" %
(self.table_name, ))
else:
write_message("%s is in consistent state" % (self.table_name))
def repair(self, opt_flush):
"""Repair the whole table"""
# find possible bad states in reverse tables:
if not self.check_bad_words():
return
query = """SELECT id_bibrec FROM %sR WHERE type IN ('TEMPORARY','FUTURE')""" \
% (self.table_name[:-1])
res = intbitset(run_sql(query))
recIDs = create_range_list(list(res))
flush_count = 0
records_done = 0
records_to_go = 0
for arange in recIDs:
records_to_go = records_to_go + arange[1] - arange[0] + 1
time_started = time.time() # will measure profile time
for arange in recIDs:
i_low = arange[0]
chunksize_count = 0
while i_low <= arange[1]:
task_sleep_now_if_required()
# calculate chunk group of recIDs and treat it:
i_high = min(i_low + opt_flush - flush_count - 1, arange[1])
i_high = min(i_low + chunksize - chunksize_count - 1, i_high)
self.fix_recID_range(i_low, i_high)
flush_count = flush_count + i_high - i_low + 1
chunksize_count = chunksize_count + i_high - i_low + 1
records_done = records_done + i_high - i_low + 1
if chunksize_count >= chunksize:
chunksize_count = 0
# flush if necessary:
if flush_count >= opt_flush:
self.put_into_db("emergency")
self.clean()
flush_count = 0
self.log_progress(time_started, records_done, records_to_go)
# iterate:
i_low = i_high + 1
if flush_count > 0:
self.put_into_db("emergency")
self.log_progress(time_started, records_done, records_to_go)
write_message("%s inconsistencies repaired." % self.table_name)
def chk_recID_range(self, low, high):
"""Check if the reverse index table is in proper state"""
## check db
query = """SELECT 1 FROM %sR WHERE type IN ('TEMPORARY','FUTURE')
AND id_bibrec BETWEEN %%s AND %%s LIMIT 1""" % self.table_name[:-1]
res = run_sql(query, (low, high), 1)
if not res:
write_message("%s for %d-%d is in consistent state" % (self.table_name, low, high))
return # okay, words table is consistent
## inconsistency detected!
write_message("EMERGENCY: %s inconsistencies detected..." % self.table_name)
error_message = "Errors found. You should check consistency of the " \
"%s - %sR tables.\nRunning 'bibindex --repair' is " \
"recommended." % (self.table_name, self.table_name[:-1])
write_message("EMERGENCY: " + error_message, stream=sys.stderr)
raise StandardError(error_message)
def fix_recID_range(self, low, high):
"""Try to fix reverse index database consistency
(e.g. table idxWORD01R) in the low,high doc-id range.
Possible states for a recID follow:
CUR TMP FUT: very bad things have happened: warn!
CUR TMP : very bad things have happened: warn!
CUR FUT: delete FUT (crash before flushing)
CUR : database is ok
TMP FUT: add TMP to memory and del FUT from memory
flush (revert to old state)
TMP : very bad things have happened: warn!
FUT: very bad things have happended: warn!
"""
state = {}
query = "SELECT id_bibrec,type FROM %sR WHERE id_bibrec BETWEEN %%s AND %%s"\
% self.table_name[:-1]
res = run_sql(query, (low, high))
for row in res:
if row[0] not in state:
state[row[0]] = []
state[row[0]].append(row[1])
ok = 1 # will hold info on whether we will be able to repair
for recID in state.keys():
if not 'TEMPORARY' in state[recID]:
if 'FUTURE' in state[recID]:
if 'CURRENT' not in state[recID]:
write_message("EMERGENCY: Index record %d is in inconsistent state. Can't repair it." % recID)
ok = 0
else:
write_message("EMERGENCY: Inconsistency in index record %d detected" % recID)
query = """DELETE FROM %sR
WHERE id_bibrec=%%s""" % self.table_name[:-1]
run_sql(query, (recID,))
write_message("EMERGENCY: Inconsistency in record %d repaired." % recID)
else:
if 'FUTURE' in state[recID] and not 'CURRENT' in state[recID]:
self.recIDs_in_mem.append([recID, recID])
# Get the words file
query = """SELECT type,termlist FROM %sR
WHERE id_bibrec=%%s""" % self.table_name[:-1]
write_message(query, verbose=9)
res = run_sql(query, (recID,))
for row in res:
wlist = deserialize_via_marshal(row[1])
write_message("Words are %s " % wlist, verbose=9)
if row[0] == 'TEMPORARY':
sign = 1
else:
sign = -1
for word in wlist:
self.put(recID, word, sign)
else:
write_message("EMERGENCY: %s for %d is in inconsistent "
"state. Couldn't repair it." % (self.table_name,
recID), stream=sys.stderr)
ok = 0
if not ok:
error_message = "Unrepairable errors found. You should check " \
"consistency of the %s - %sR tables. Deleting affected " \
"TEMPORARY and FUTURE entries from these tables is " \
"recommended; see the BibIndex Admin Guide." % \
(self.table_name, self.table_name[:-1])
write_message("EMERGENCY: " + error_message, stream=sys.stderr)
raise StandardError(error_message)
def main():
"""Main that construct all the bibtask."""
task_init(authorization_action='runbibindex',
authorization_msg="BibIndex Task Submission",
description="""Examples:
\t%s -a -i 234-250,293,300-500 -u admin@localhost
\t%s -a -w author,fulltext -M 8192 -v3
\t%s -d -m +4d -A on --flush=10000\n""" % ((sys.argv[0],) * 3), help_specific_usage=""" Indexing options:
-a, --add\t\tadd or update words for selected records
-d, --del\t\tdelete words for selected records
-i, --id=low[-high]\t\tselect according to doc recID
-m, --modified=from[,to]\tselect according to modification date
-c, --collection=c1[,c2]\tselect according to collection
-R, --reindex\treindex the selected indexes from scratch
Repairing options:
-k, --check\t\tcheck consistency for all records in the table(s)
-r, --repair\t\ttry to repair all records in the table(s)
Specific options:
-w, --windex=w1[,w2]\tword/phrase indexes to consider (all)
-M, --maxmem=XXX\tmaximum memory usage in kB (no limit)
-f, --flush=NNN\t\tfull consistent table flush after NNN records (10000)
--force\t\tforce indexing of all records for provided indexes
-Z, --remove-dependent-index=w name of an index for removing from virtual index
-l --all-virtual\t\t set of all virtual indexes; the same as: -w virtual_ind1, virtual_ind2, ...
""",
version=__revision__,
specific_params=("adi:m:c:w:krRM:f:oZ:l", [
"add",
"del",
"id=",
"modified=",
"collection=",
"windex=",
"check",
"repair",
"reindex",
"maxmem=",
"flush=",
"force",
"remove-dependent-index=",
"all-virtual"
]),
task_stop_helper_fnc=task_stop_table_close_fnc,
task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,
task_run_fnc=task_run_core,
task_submit_check_options_fnc=task_submit_check_options)
def task_submit_check_options():
"""Check for options compatibility."""
if task_get_option("reindex"):
if task_get_option("cmd") != "add" or task_get_option('id') or task_get_option('collection'):
print("ERROR: You can use --reindex only when adding modified record.", file=sys.stderr)
return False
return True
def task_submit_elaborate_specific_parameter(key, value, opts, args):
""" Given the string key it checks it's meaning, eventually using the
value. Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
eg:
if key in ['-n', '--number']:
self.options['number'] = value
return True
return False
"""
if key in ("-a", "--add"):
task_set_option("cmd", "add")
if ("-x", "") in opts or ("--del", "") in opts:
raise StandardError("Can not have --add and --del at the same time!")
elif key in ("-k", "--check"):
task_set_option("cmd", "check")
elif key in ("-r", "--repair"):
task_set_option("cmd", "repair")
elif key in ("-d", "--del"):
task_set_option("cmd", "del")
elif key in ("-i", "--id"):
task_set_option('id', task_get_option('id') + split_ranges(value))
elif key in ("-m", "--modified"):
task_set_option("modified", get_date_range(value))
elif key in ("-c", "--collection"):
task_set_option("collection", value)
elif key in ("-R", "--reindex"):
task_set_option("reindex", True)
elif key in ("-w", "--windex"):
task_set_option("windex", value)
elif key in ("-M", "--maxmem"):
task_set_option("maxmem", int(value))
if task_get_option("maxmem") < base_process_size + 1000:
raise StandardError("Memory usage should be higher than %d kB" % \
(base_process_size + 1000))
elif key in ("-f", "--flush"):
task_set_option("flush", int(value))
elif key in ("-o", "--force"):
task_set_option("force", True)
elif key in ("-Z", "--remove-dependent-index",):
task_set_option("remove-dependent-index", value)
elif key in ("-l", "--all-virtual",):
task_set_option("all-virtual", True)
else:
return False
return True
def task_stop_table_close_fnc():
""" Close tables to STOP. """
global _last_word_table
if _last_word_table:
_last_word_table.put_into_db()
def get_recIDs_by_date_bibliographic(dates, index_name, force_all=False):
""" Finds records that were modified between DATES[0] and DATES[1]
for given index.
If DATES is not set, then finds records that were modified since
the last update of the index.
@param wordtable_type: can be 'Words', 'Pairs' or 'Phrases'
"""
index_id = get_index_id_from_index_name(index_name)
if not dates:
query = """SELECT last_updated FROM idxINDEX WHERE id=%s"""
res = run_sql(query, (index_id,))
if not res:
return set([])
if not res[0][0] or force_all:
dates = ("0000-00-00", None)
else:
dates = (res[0][0], None)
if dates[1] is None:
res = intbitset(run_sql("""SELECT b.id FROM bibrec AS b WHERE b.modification_date >= %s""",
(dates[0],)))
if index_name == 'fulltext':
res |= intbitset(run_sql("""SELECT id_bibrec FROM bibrec_bibdoc JOIN bibdoc ON id_bibdoc=id
WHERE text_extraction_date <= modification_date AND
modification_date >= %s
AND status<>'DELETED'""",
(dates[0],)))
elif dates[0] is None:
res = intbitset(run_sql("""SELECT b.id FROM bibrec AS b WHERE b.modification_date <= %s""",
(dates[1],)))
if index_name == 'fulltext':
res |= intbitset(run_sql("""SELECT id_bibrec FROM bibrec_bibdoc JOIN bibdoc ON id_bibdoc=id
WHERE text_extraction_date <= modification_date
AND modification_date <= %s
AND status<>'DELETED'""",
(dates[1],)))
else:
res = intbitset(run_sql("""SELECT b.id FROM bibrec AS b
WHERE b.modification_date >= %s AND
b.modification_date <= %s""",
(dates[0], dates[1])))
if index_name == 'fulltext':
res |= intbitset(run_sql("""SELECT id_bibrec FROM bibrec_bibdoc JOIN bibdoc ON id_bibdoc=id
WHERE text_extraction_date <= modification_date AND
modification_date >= %s AND
modification_date <= %s AND
status<>'DELETED'""",
(dates[0], dates[1],)))
return set(res)
def get_recIDs_by_date_authority(dates, index_name, force_all=False):
""" Finds records that were modified between DATES[0] and DATES[1]
for given index.
If DATES is not set, then finds records that were modified since
the last update of the index.
Searches for bibliographic records connected to authority records
that have been changed.
"""
index_id = get_index_id_from_index_name(index_name)
index_tags = get_index_tags(index_name)
if not dates:
query = """SELECT last_updated FROM idxINDEX WHERE id=%s"""
res = run_sql(query, (index_id,))
if not res:
return set([])
if not res[0][0] or force_all:
dates = ("0000-00-00", None)
else:
dates = (res[0][0], None)
res = intbitset()
for tag in index_tags:
pattern = tag.replace('%', '*')
matches = fnmatch.filter(CFG_BIBAUTHORITY_CONTROLLED_FIELDS_BIBLIOGRAPHIC.keys(), pattern)
if not len(matches):
continue
for tag_match in matches:
# get the type of authority record associated with this field
auth_type = CFG_BIBAUTHORITY_CONTROLLED_FIELDS_BIBLIOGRAPHIC.get(tag_match)
# find updated authority records of this type
# dates[1] is ignored, needs dates[0] to find res
now = datetime.now()
auth_recIDs = search_pattern(p='980__a:' + auth_type) \
& search_unit_in_bibrec(str(dates[0]), str(now), search_type='m')
# now find dependent bibliographic records
for auth_recID in auth_recIDs:
# get the fix authority identifier of this authority record
control_nos = get_control_nos_from_recID(auth_recID)
# there may be multiple control number entries! (the '035' field is repeatable!)
for control_no in control_nos:
# get the bibrec IDs that refer to AUTHORITY_ID in TAG
tag_0 = tag_match[:5] + '0' # possibly do the same for '4' subfields ?
fieldvalue = '"' + control_no + '"'
res |= search_pattern(p=tag_0 + ':' + fieldvalue)
return set(res)
def get_not_updated_recIDs(modified_dates, indexes, force_all=False):
"""Finds not updated recIDs in database for indexes.
@param modified_dates: between this dates we should look for modified records
@type modified_dates: [date_old, date_new]
@param indexes: list of indexes
@type indexes: string separated by coma
@param force_all: if True all records will be taken
"""
found_recIDs = set()
write_message(CFG_BIBINDEX_UPDATE_MESSAGE)
for index in indexes:
found_recIDs |= get_recIDs_by_date_bibliographic(modified_dates, index, force_all)
found_recIDs |= get_recIDs_by_date_authority(modified_dates, index, force_all)
return list(sorted(found_recIDs))
def get_recIDs_from_cli(indexes=[]):
"""
Gets recIDs ranges from CLI for indexing when
user specified 'id' or 'collection' option or
search for modified recIDs for provided indexes
when recIDs are not specified.
@param indexes: it's a list of specified indexes, which
can be obtained from CLI with use of:
get_indexes_from_cli() function.
@type indexes: list of strings
"""
# need to first update idxINDEX table to find proper recIDs for reindexing
if task_get_option("reindex"):
for index_name in indexes:
run_sql("""UPDATE idxINDEX SET last_updated='0000-00-00 00:00:00'
WHERE name=%s""", (index_name,))
if task_get_option("id"):
return task_get_option("id")
elif task_get_option("collection"):
l_of_colls = task_get_option("collection").split(",")
recIDs = perform_request_search(c=l_of_colls)
recIDs_range = []
for recID in recIDs:
recIDs_range.append([recID, recID])
return recIDs_range
elif task_get_option("cmd") == "add":
recs = get_not_updated_recIDs(task_get_option("modified"),
indexes,
task_get_option("force"))
recIDs_range = beautify_range_list(create_range_list(recs))
return recIDs_range
return []
def get_indexes_from_cli():
"""
Gets indexes from CLI and checks if they are
valid. If indexes weren't specified function
will return all known indexes.
"""
indexes = task_get_option("windex")
all_virtual = task_get_option("all-virtual")
if all_virtual:
indexes = filter_for_virtual_indexes(get_all_indexes())
elif not indexes:
indexes = get_all_indexes()
else:
indexes = indexes.split(",")
indexes = remove_inexistent_indexes(indexes, leave_virtual=True)
return indexes
def remove_dependent_index(virtual_indexes, dependent_index):
"""
Removes dependent index from virtual indexes.
@param virtual_indexes: names of virtual_indexes
@type virtual_indexes: list of strings
@param dependent_index: name of dependent index
@type dependent_index: string
"""
if not virtual_indexes:
write_message("You should specify a name of a virtual index...")
return
id_dependent = get_index_id_from_index_name(dependent_index)
for index_name in virtual_indexes:
index_id = get_index_id_from_index_name(index_name)
for type_ in CFG_BIBINDEX_INDEX_TABLE_TYPE.itervalues():
vit = VirtualIndexTable(index_name, type_)
vit.remove_dependent_index(dependent_index)
task_sleep_now_if_required()
query = """DELETE FROM idxINDEX_idxINDEX WHERE id_virtual=%s AND id_normal=%s"""
run_sql(query, (index_id, id_dependent))
def should_update_virtual_indexes():
"""
Decides if any virtual indexes should be updated.
Decision is made based on arguments obtained
from CLI.
"""
return task_get_option("all-virtual") or task_get_option("windex")
def update_virtual_indexes(virtual_indexes, reindex=False):
"""
Function will update all specified virtual_indexes.
@param virtual_indexes: list of index names
@param reindex: shall we reindex given v.indexes from scratch?
"""
kwargs = {}
if reindex:
kwargs.update({'table_prefix': 'tmp_'})
for index_name in virtual_indexes:
if reindex:
index_id = get_index_id_from_index_name(index_name)
init_temporary_reindex_tables(index_id)
for key, type_ in CFG_BIBINDEX_INDEX_TABLE_TYPE.iteritems():
kwargs.update({'wash_index_terms': CFG_BIBINDEX_WASH_INDEX_TERMS[key]})
vit = VirtualIndexTable(index_name, type_, **kwargs)
vit.set_reindex_mode()
vit.run_update()
swap_temporary_reindex_tables(index_id)
update_index_last_updated([index_name], task_get_task_param('task_starting_time'))
task_sleep_now_if_required(can_stop_too=True)
else:
for key, type_ in CFG_BIBINDEX_INDEX_TABLE_TYPE.iteritems():
kwargs.update({'wash_index_terms': CFG_BIBINDEX_WASH_INDEX_TERMS[key]})
vit = VirtualIndexTable(index_name, type_, **kwargs)
vit.run_update()
task_sleep_now_if_required(can_stop_too=True)
def task_run_core():
"""Runs the task by fetching arguments from the BibSched task queue.
This is what BibSched will be invoking via daemon call.
"""
global _last_word_table
indexes = get_indexes_from_cli()
if len(indexes) == 0:
write_message("Specified indexes can't be found.")
return True
virtual_indexes = filter_for_virtual_indexes(indexes)
regular_indexes = list(set(indexes) - set(virtual_indexes))
# check tables consistency
if task_get_option("cmd") == "check":
for index_name in indexes:
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
wash_index_terms=50)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Pairs"],
wash_index_terms=100)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Phrases"],
wash_index_terms=0)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
_last_word_table = None
return True
# virtual index: remove dependent index
if task_get_option("remove-dependent-index"):
remove_dependent_index(indexes,
task_get_option("remove-dependent-index"))
return True
# virtual index: update
if should_update_virtual_indexes():
update_virtual_indexes(virtual_indexes, task_get_option("reindex"))
if len(regular_indexes) == 0:
return True
# regular index: initialization for Words,Pairs,Phrases
recIDs_range = get_recIDs_from_cli(regular_indexes)
recIDs_for_index = find_affected_records_for_index(regular_indexes,
recIDs_range,
(task_get_option("force") or \
task_get_option("reindex") or \
task_get_option("cmd") == "del"))
if len(recIDs_for_index.keys()) == 0:
write_message("Selected indexes/recIDs are up to date.")
# Let's work on single words!
for index_name in recIDs_for_index.keys():
index_id = get_index_id_from_index_name(index_name)
reindex_prefix = ""
if task_get_option("reindex"):
reindex_prefix = "tmp_"
init_temporary_reindex_tables(index_id, reindex_prefix)
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Words"],
table_prefix=reindex_prefix,
wash_index_terms=50)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
try:
if task_get_option("cmd") == "del":
if task_get_option("id") or task_get_option("collection"):
wordTable.del_recIDs(recIDs_range)
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Missing IDs of records to delete from " \
"index %s." % wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
elif task_get_option("cmd") == "add":
final_recIDs = beautify_range_list(create_range_list(recIDs_for_index[index_name]))
wordTable.add_recIDs(final_recIDs, task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("cmd") == "repair":
wordTable.repair(task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Invalid command found processing %s" % \
wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
except StandardError as e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception(alert_admin=True)
if _last_word_table:
_last_word_table.put_into_db()
raise
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
# Let's work on pairs now
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Pairs"],
table_prefix=reindex_prefix,
wash_index_terms=100)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
try:
if task_get_option("cmd") == "del":
if task_get_option("id") or task_get_option("collection"):
wordTable.del_recIDs(recIDs_range)
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Missing IDs of records to delete from " \
"index %s." % wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
elif task_get_option("cmd") == "add":
final_recIDs = beautify_range_list(create_range_list(recIDs_for_index[index_name]))
wordTable.add_recIDs(final_recIDs, task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("cmd") == "repair":
wordTable.repair(task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Invalid command found processing %s" % \
wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
except StandardError as e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception()
if _last_word_table:
_last_word_table.put_into_db()
raise
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
# Let's work on phrases now
wordTable = WordTable(index_name=index_name,
table_type=CFG_BIBINDEX_INDEX_TABLE_TYPE["Phrases"],
table_prefix=reindex_prefix,
wash_index_terms=0)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
try:
if task_get_option("cmd") == "del":
if task_get_option("id") or task_get_option("collection"):
wordTable.del_recIDs(recIDs_range)
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Missing IDs of records to delete from " \
"index %s." % wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
elif task_get_option("cmd") == "add":
final_recIDs = beautify_range_list(create_range_list(recIDs_for_index[index_name]))
wordTable.add_recIDs(final_recIDs, task_get_option("flush"))
if not task_get_option("id") and not task_get_option("collection"):
update_index_last_updated([index_name], task_get_task_param('task_starting_time'))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("cmd") == "repair":
wordTable.repair(task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Invalid command found processing %s" % \
wordTable.table_name
write_message(error_message, stream=sys.stderr)
raise StandardError(error_message)
except StandardError as e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception()
if _last_word_table:
_last_word_table.put_into_db()
raise
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
if task_get_option("reindex"):
swap_temporary_reindex_tables(index_id, reindex_prefix)
update_index_last_updated([index_name], task_get_task_param('task_starting_time'))
task_sleep_now_if_required(can_stop_too=True)
# update modification date also for indexes that were up to date
if not task_get_option("id") and not task_get_option("collection") and \
task_get_option("cmd") == "add":
up_to_date = set(indexes) - set(recIDs_for_index.keys())
update_index_last_updated(list(up_to_date), task_get_task_param('task_starting_time'))
_last_word_table = None
return True
### okay, here we go:
if __name__ == '__main__':
main()
|
SDSG-Invenio/invenio
|
invenio/legacy/bibindex/engine.py
|
Python
|
gpl-2.0
| 101,575 | 0.002648 |
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django import test
from django.core.exceptions import ValidationError
from yepes.validators import (
CharSetValidator,
ColorValidator,
FormulaValidator,
IdentifierValidator,
PhoneNumberValidator,
PostalCodeValidator,
RestrictedEmailValidator,
)
class ValidatorsTest(test.SimpleTestCase):
def test_charset(self):
validator = CharSetValidator('abcdef')
def assertValid(value):
self.assertTrue(validator.validate(value))
validator(value)
def assertNotValid(value):
self.assertFalse(validator.validate(value))
with self.assertRaises(ValidationError):
validator(value)
assertValid('abcdef')
assertValid('dadadada')
assertNotValid('aBcDeF')
assertNotValid('DADADADA')
assertNotValid('uy')
assertNotValid('a-f')
validator = CharSetValidator('abcdefABCDEF')
def assertValid(value):
self.assertTrue(validator.validate(value))
validator(value)
def assertNotValid(value):
self.assertFalse(validator.validate(value))
with self.assertRaises(ValidationError):
validator(value)
assertValid('abcdef')
assertValid('dadadada')
assertValid('aBcDeF')
assertValid('DADADADA')
assertNotValid('uy')
assertNotValid('a-f')
def test_charset_with_range(self):
validator = CharSetValidator('a-f')
def assertValid(value):
self.assertTrue(validator.validate(value))
validator(value)
def assertNotValid(value):
self.assertFalse(validator.validate(value))
with self.assertRaises(ValidationError):
validator(value)
assertValid('abcdef')
assertValid('dadadada')
assertNotValid('aBcDeF')
assertNotValid('DADADADA')
assertNotValid('uy')
assertNotValid('a-f')
validator = CharSetValidator('a-fA-F')
def assertValid(value):
self.assertTrue(validator.validate(value))
validator(value)
def assertNotValid(value):
self.assertFalse(validator.validate(value))
with self.assertRaises(ValidationError):
validator(value)
assertValid('abcdef')
assertValid('dadadada')
assertValid('aBcDeF')
assertValid('DADADADA')
assertNotValid('uy')
assertNotValid('a-f')
def test_color(self):
validator = ColorValidator()
def assertValid(value):
self.assertTrue(validator.validate(value))
validator(value)
def assertNotValid(value):
self.assertFalse(validator.validate(value))
with self.assertRaises(ValidationError):
validator(value)
assertValid('#5DC1B9')
assertValid('#5dc1b9')
assertValid('#fff')
assertValid('#fffFFF')
assertNotValid('5DC1B9')
assertNotValid('5dc1b9')
assertNotValid('fff')
assertNotValid('fffFFF')
assertNotValid('#12')
assertNotValid('#1234')
assertNotValid('#12345678')
assertNotValid('#hijKLM')
def test_formula(self):
validator = FormulaValidator()
def assertValid(value):
self.assertTrue(validator.validate(value))
validator(value)
def assertNotValid(value):
self.assertFalse(validator.validate(value))
with self.assertRaises(ValidationError):
validator(value)
assertValid('1 * 3 ** 5')
assertValid('a * b ** c')
assertValid('x * y ** z')
assertNotValid('*')
assertNotValid('not')
assertNotValid('* 1')
assertNotValid('1 *')
assertNotValid('1 |/ 1')
assertNotValid('1 * (10 - 3')
assertNotValid('a * b)')
def test_formula_with_variables(self):
validator = FormulaValidator(['a', 'b', 'c'])
def assertValid(value):
self.assertTrue(validator.validate(value))
validator(value)
def assertNotValid(value):
self.assertFalse(validator.validate(value))
with self.assertRaises(ValidationError):
validator(value)
assertValid('1 * 3 ** 5')
assertValid('a * b ** c')
assertNotValid('x * y ** z')
assertNotValid('*')
assertNotValid('not')
assertNotValid('* 1')
assertNotValid('1 *')
assertNotValid('1 |/ 1')
assertNotValid('1 * (10 - 3')
assertNotValid('a * b)')
def test_identifier(self):
validator = IdentifierValidator()
def assertValid(value):
self.assertTrue(validator.validate(value))
validator(value)
def assertNotValid(value):
self.assertFalse(validator.validate(value))
with self.assertRaises(ValidationError):
validator(value)
assertValid('variable')
assertValid('variable_123')
assertValid('_')
assertValid('_variable')
assertValid('variable_')
assertValid('__variable__')
assertValid('UpperCamelCase')
assertValid('lowerCamelCase')
assertValid('UPPER_CASE_WITH_UNDERSCORES')
assertValid('lower_case_with_underscores')
assertValid('Mixed_Case_With_Underscores')
assertNotValid('123_variable')
assertNotValid('z%.# +ç@')
assertNotValid('UPPER-CASE-WITH-DASHES')
assertNotValid('lower-case-with-dashes')
assertNotValid('Mixed-Case-With-Dashes')
def test_phone_number(self):
validator = PhoneNumberValidator()
def assertValid(value):
self.assertTrue(validator.validate(value))
validator(value)
def assertNotValid(value):
self.assertFalse(validator.validate(value))
with self.assertRaises(ValidationError):
validator(value)
assertValid('+9-999-999-9999')
assertValid('999-999-999-9999')
assertValid('999 999 999 9999')
assertValid('999-99999')
assertValid('(999) / 999-99999')
assertValid('+99-99-999-99999')
assertValid('99-99-99-999-99999')
assertValid('999')
assertValid('9999-9999999')
assertValid('99999-99999')
assertValid('+99-99999-99999')
assertValid('9-999999999')
assertValid('(9999) 9999 9999')
assertValid('99999999')
assertValid('999999999999')
assertValid('+99 999 9999 9999')
assertValid('+99 (9999) 9999 9999')
assertValid('999 9999 9999')
assertValid('9999 9999')
assertValid('+9999-999-999')
assertValid('+999-999-9999')
assertValid('+999-9999-9999')
assertValid('+9999-999-9999')
assertValid('9999-999-999')
assertValid('+99 (9) 999 9999')
assertValid('+99 (99) 999 9999')
assertValid('+99 (999) 999 9999')
assertValid('9 (999) 999 9999')
assertValid('+99-9999-9999')
assertValid('+99 9999 9999')
assertValid('99 99 99 99')
assertValid('99 99 99 99 99')
assertValid('9 99 99 99 99')
assertValid('+99 9 99 99 99 99')
assertValid('99999 999999')
assertValid('99999 999999-99')
assertValid('+99 9999 999999')
assertValid('(99999) 999999')
assertValid('+99 (9999) 999999')
assertValid('99999-999999')
assertValid('99999/999999-99')
assertValid('999 9999')
assertValid('999-9999')
assertValid('99-99999999')
assertValid('999-9999999')
assertValid('9999-9999')
assertValid('+99 99 99999999')
assertValid('+99 9 99999999')
assertValid('999 99 999')
assertValid('999-999-999')
assertValid('99-999-99-99')
assertValid('(99) 999-99-99')
assertValid('9 9999 99-99-99')
assertValid('9 (999) 999-99-99')
assertValid('999 99 99 99')
assertValid('999 999 999')
assertValid('99 999 99 99')
assertValid('999 999 99 99')
assertValid('+99 99 999 99 99')
assertValid('9999 999 999')
assertValid('(999) 9999 9999')
assertValid('(9999) 999 9999')
assertValid('(99999) 99999')
assertValid('(9999 99) 99999')
assertValid('(9999 99) 9999')
assertValid('9999 999 9999')
assertValid('9999 999999')
assertValid('9999 999 99 99')
assertValid('(999) 999-9999')
assertValid('9-999-999-9999')
assertValid('999-999-9999')
assertValid('9 999 999-9999')
assertValid('(99) 9999 9999')
assertValid('(99) 99 99 99 99')
assertValid('99 9999 9999')
assertValid('+99 9 9999 9999')
assertValid('+99 999 999 999')
assertValid('99 99 99')
assertValid('999 999')
assertValid('(999) 9999-9999')
assertValid('+999 9999-9999')
assertValid('99999999999')
assertValid('(9999) 999-9999')
assertValid('(99999) 99-9999')
assertValid('(999) 999-999-9999')
assertValid('99 9-999-9999')
assertValid('(99) 9999-9999')
assertValid('(99999) 9999-9999')
assertValid('(999) 99-9999')
assertValid('9-999 9999')
assertNotValid('9')
assertNotValid('9')
assertNotValid('99')
assertNotValid('9999999999999999')
assertNotValid('+999999999999999')
assertNotValid('9-9-9-9-9')
assertNotValid(' 99 99 ')
assertNotValid('a')
assertNotValid('++999999')
assertNotValid('99+99999')
assertNotValid('999()999')
assertNotValid('99(999)99(999)')
assertNotValid('999/999/999')
assertNotValid('999.999.9999')
assertNotValid('abc')
assertNotValid('ABC')
assertNotValid('--/--')
assertNotValid('&')
def test_postal_code(self):
validator = PostalCodeValidator()
def assertValid(value):
self.assertTrue(validator.validate(value))
validator(value)
def assertNotValid(value):
self.assertFalse(validator.validate(value))
with self.assertRaises(ValidationError):
validator(value)
assertValid('999')
assertValid('999 99')
assertValid('999-9999')
assertValid('999-999-9')
assertValid('9999')
assertValid('9999 A')
assertValid('9999 AA')
assertValid('9999 999')
assertValid('9999 9999')
assertValid('9999-999')
assertValid('99999')
assertValid('99999-999')
assertValid('99999-9999')
assertValid('99999-99999')
assertValid('99999-999999')
assertValid('9999999')
assertValid('A9 9AA')
assertValid('A9A 9AA')
assertValid('A99 9AA')
assertValid('AA9 9AA')
assertValid('AA9A 9AA')
assertValid('AA99 9AA')
assertValid('AA999')
assertValid('AA9999')
assertValid('AAAA 9AA')
assertValid('AA-9999')
assertValid('A999')
assertValid('A9999AAA')
assertValid('AAAA9AA')
assertValid('AAA9999')
assertValid('AAA 9999')
assertNotValid('9')
assertNotValid('99')
assertNotValid('A')
assertNotValid('AA')
assertNotValid('999999999999999')
assertNotValid('AAAAAAAAAAAAAAA')
assertNotValid('9-9-9-9-9')
assertNotValid(' 99 99 ')
assertNotValid('+999')
assertNotValid('(99)999')
assertNotValid('99/999')
assertNotValid('99.999')
assertNotValid('-- --')
assertNotValid('&')
def test_restricted_email(self):
validator = RestrictedEmailValidator()
def assertValid(value):
self.assertTrue(validator.validate(value))
validator(value)
def assertNotValid(value):
self.assertFalse(validator.validate(value))
with self.assertRaises(ValidationError):
validator(value)
# Valid and common.
assertValid('niceandsimple@example.com')
assertValid('very.common@example.com')
assertValid('a.little.lengthy.but.fine@dept.example.com')
assertValid('disposable.style.email.with+symbol@example.com')
assertValid('other.email-with-dash@example.com')
# Valid according to standard but uncommon.
assertNotValid(r'"much.more unusual"@example.com')
assertNotValid(r'"very.unusual.@.unusual.com"@example.com')
assertNotValid(r'"very.(),:;<>[]\".VERY.\"very@\\ \"very\".unusual"@strange.example.com')
assertNotValid(r'admin@mailserver1')
assertNotValid(r"#!$%&'*+-/=?^_`{}|~@example.org")
assertNotValid(r'"()<>[]:,;@\\\"!#$%&\'*+-/=?^_`{}| ~.a"@example.org')
assertNotValid(r'" "@example.org')
assertNotValid(r'üñîçøðé@example.com')
assertNotValid(r'jsmith@üñîçøðé.com')
assertNotValid(r'jsmith@[192.168.2.1')
assertNotValid(r'jsmith@[IPv6:2001:db8::1]')
# Not valid.
assertNotValid(r'Abc.example.com')
assertNotValid(r'A@b@c@example.com')
assertNotValid(r'a"b(c)d,e:f;g<h>i[j\k]l@example.com')
assertNotValid(r'just"not"right@example.com')
assertNotValid(r'this is"not\allowed@example.com')
assertNotValid(r'this\ still\"not\\allowed@example.com')
assertNotValid(r'john..doe@example.com')
assertNotValid(r'john.doe@example..com')
|
samuelmaudo/yepes
|
tests/validators/tests.py
|
Python
|
bsd-3-clause
| 13,740 | 0.00102 |
#!/usr/bin/env python
"""Setup file for HT-BAC Tools.
"""
__author__ = "Ole Weidner"
__email__ = "ole.weidner@rutgers.edu"
__copyright__ = "Copyright 2014, The RADICAL Project at Rutgers"
__license__ = "MIT"
""" Setup script. Used by easy_install and pip. """
import os
import sys
import subprocess
from setuptools import setup, find_packages, Command
#-----------------------------------------------------------------------------
#
def get_version():
short_version = None # 0.4.0
long_version = None # 0.4.0-9-g0684b06
try:
import subprocess as sp
import re
srcroot = os.path.dirname (os.path.abspath (__file__))
VERSION_MATCH = re.compile (r'(([\d\.]+)\D.*)')
# attempt to get version information from git
p = sp.Popen ('cd %s && git describe --tags --always' % srcroot,
stdout=sp.PIPE, stderr=sp.STDOUT, shell=True)
out = p.communicate()[0]
if p.returncode != 0 or not out :
# the git check failed -- its likely that we are called from
# a tarball, so use ./VERSION instead
out=open ("%s/VERSION" % ".", 'r').read().strip()
# from the full string, extract short and long versions
v = VERSION_MATCH.search (out)
if v:
long_version = v.groups ()[0]
short_version = v.groups ()[1]
# sanity check if we got *something*
if not short_version or not long_version :
sys.stderr.write ("Cannot determine version from git or ./VERSION\n")
import sys
sys.exit (-1)
# make sure the version files exist for the runtime version inspection
open ('%s/VERSION' % srcroot, 'w').write (long_version+"\n")
open ('%s/src/radical/ensemblemd/mdkernels/VERSION' % srcroot, 'w').write (long_version+"\n")
except Exception as e :
print 'Could not extract/set version: %s' % e
import sys
sys.exit (-1)
return short_version, long_version
#short_version, long_version = get_version ()
#-----------------------------------------------------------------------------
# check python version. we need > 2.5, <3.x
if sys.hexversion < 0x02050000 or sys.hexversion >= 0x03000000:
raise RuntimeError("Sinon requires Python 2.x (2.5 or higher)")
#-----------------------------------------------------------------------------
#
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
#-----------------------------------------------------------------------------
setup_args = {
'name' : 'radical.ensemblemd.mdkernels',
'version' : 0.1,
'description' : "BAC is a tool for molecular dynamics binding affinity calculations.",
'long_description' : (read('README.md') + '\n\n' + read('CHANGES.md')),
'author' : 'RADICAL Group at Rutgers University',
'author_email' : 'ole.weidner@rutgers.edu',
'maintainer' : "Ole Weidner",
'maintainer_email' : 'ole.weidner@rutgers.edu',
'url' : 'https://github.com/radical-cybertools',
'license' : 'MIT',
'keywords' : "molecular dynamics binding affinity calculations",
'classifiers' : [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: MIT',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Utilities',
'Topic :: System :: Distributed Computing',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix'
],
#'entry_points': {
# 'console_scripts':
# ['htbac-fecalc = radical.ensemblemd.htbac.bin.fecalc:main',
# 'htbac-sim = radical.ensemblemd.htbac.bin.sim:main']
#},
#'dependency_links': ['https://github.com/saga-project/saga-pilot/tarball/master#egg=sagapilot'],
'namespace_packages': ['radical', 'radical.ensemblemd'],
'packages' : ['radical',
'radical.ensemblemd',
'radical.ensemblemd.mdkernels',
'radical.ensemblemd.mdkernels.configs'],
'package_dir' : {'': 'src'},
'package_data' : {'': ['*.sh', '*.json', 'VERSION', 'VERSION.git']},
'install_requires' : ['radical.utils', 'setuptools>=1'],
'test_suite' : 'radical.ensemblemd.mdkernels.tests',
'zip_safe' : False,
}
#-----------------------------------------------------------------------------
setup (**setup_args)
#-----------------------------------------------------------------------------
|
radical-cybertools/radical.ensemblemd.mdkernels
|
setup.py
|
Python
|
mit
| 5,031 | 0.015703 |
""" API v0 views. """
import datetime
import json
import logging
import pytz
from django.contrib.auth.models import User
from django.db import transaction
from django.http import Http404
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.generics import GenericAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework_oauth.authentication import OAuth2Authentication
from ccx_keys.locator import CCXLocator
from courseware import courses
from instructor.enrollment import (
enroll_email,
get_email_params,
)
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.lib.api import permissions
from student.models import CourseEnrollment
from student.roles import CourseCcxCoachRole
from lms.djangoapps.ccx.models import CcxFieldOverride, CustomCourseForEdX
from lms.djangoapps.ccx.overrides import (
override_field_for_ccx,
)
from lms.djangoapps.ccx.utils import (
add_master_course_staff_to_ccx,
assign_coach_role_to_ccx,
is_email,
get_course_chapters,
)
from .paginators import CCXAPIPagination
from .serializers import CCXCourseSerializer
log = logging.getLogger(__name__)
TODAY = datetime.datetime.today # for patching in tests
def get_valid_course(course_id, is_ccx=False, advanced_course_check=False):
"""
Helper function used to validate and get a course from a course_id string.
It works with both master and ccx course id.
Args:
course_id (str): A string representation of a Master or CCX Course ID.
is_ccx (bool): Flag to perform the right validation
advanced_course_check (bool): Flag to perform extra validations for the master course
Returns:
tuple: a tuple of course_object, course_key, error_code, http_status_code
"""
if course_id is None:
# the ccx detail view cannot call this function with a "None" value
# so the following `error_code` should be never used, but putting it
# to avoid a `NameError` exception in case this function will be used
# elsewhere in the future
error_code = 'course_id_not_provided'
if not is_ccx:
log.info('Master course ID not provided')
error_code = 'master_course_id_not_provided'
return None, None, error_code, status.HTTP_400_BAD_REQUEST
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.info('Course ID string "%s" is not valid', course_id)
return None, None, 'course_id_not_valid', status.HTTP_400_BAD_REQUEST
if not is_ccx:
try:
course_object = courses.get_course_by_id(course_key)
except Http404:
log.info('Master Course with ID "%s" not found', course_id)
return None, None, 'course_id_does_not_exist', status.HTTP_404_NOT_FOUND
if advanced_course_check:
if course_object.id.deprecated:
return None, None, 'deprecated_master_course_id', status.HTTP_400_BAD_REQUEST
if not course_object.enable_ccx:
return None, None, 'ccx_not_enabled_for_master_course', status.HTTP_403_FORBIDDEN
return course_object, course_key, None, None
else:
try:
ccx_id = course_key.ccx
except AttributeError:
log.info('Course ID string "%s" is not a valid CCX ID', course_id)
return None, None, 'course_id_not_valid_ccx_id', status.HTTP_400_BAD_REQUEST
# get the master_course key
master_course_key = course_key.to_course_locator()
try:
ccx_course = CustomCourseForEdX.objects.get(id=ccx_id, course_id=master_course_key)
return ccx_course, course_key, None, None
except CustomCourseForEdX.DoesNotExist:
log.info('CCX Course with ID "%s" not found', course_id)
return None, None, 'ccx_course_id_does_not_exist', status.HTTP_404_NOT_FOUND
def get_valid_input(request_data, ignore_missing=False):
"""
Helper function to validate the data sent as input and to
build field based errors.
Args:
request_data (OrderedDict): the request data object
ignore_missing (bool): whether or not to ignore fields
missing from the input data
Returns:
tuple: a tuple of two dictionaries for valid input and field errors
"""
valid_input = {}
field_errors = {}
mandatory_fields = ('coach_email', 'display_name', 'max_students_allowed',)
# checking first if all the fields are present and they are not null
if not ignore_missing:
for field in mandatory_fields:
if field not in request_data:
field_errors[field] = {'error_code': 'missing_field_{0}'.format(field)}
if field_errors:
return valid_input, field_errors
# at this point I can assume that if the fields are present,
# they must be validated, otherwise they can be skipped
coach_email = request_data.get('coach_email')
if coach_email is not None:
if is_email(coach_email):
valid_input['coach_email'] = coach_email
else:
field_errors['coach_email'] = {'error_code': 'invalid_coach_email'}
elif 'coach_email' in request_data:
field_errors['coach_email'] = {'error_code': 'null_field_coach_email'}
display_name = request_data.get('display_name')
if display_name is not None:
if not display_name:
field_errors['display_name'] = {'error_code': 'invalid_display_name'}
else:
valid_input['display_name'] = display_name
elif 'display_name' in request_data:
field_errors['display_name'] = {'error_code': 'null_field_display_name'}
max_students_allowed = request_data.get('max_students_allowed')
if max_students_allowed is not None:
try:
max_students_allowed = int(max_students_allowed)
valid_input['max_students_allowed'] = max_students_allowed
except (TypeError, ValueError):
field_errors['max_students_allowed'] = {'error_code': 'invalid_max_students_allowed'}
elif 'max_students_allowed' in request_data:
field_errors['max_students_allowed'] = {'error_code': 'null_field_max_students_allowed'}
course_modules = request_data.get('course_modules')
if course_modules is not None:
if isinstance(course_modules, list):
# de-duplicate list of modules
course_modules = list(set(course_modules))
for course_module_id in course_modules:
try:
UsageKey.from_string(course_module_id)
except InvalidKeyError:
field_errors['course_modules'] = {'error_code': 'invalid_course_module_keys'}
break
else:
valid_input['course_modules'] = course_modules
else:
field_errors['course_modules'] = {'error_code': 'invalid_course_module_list'}
elif 'course_modules' in request_data:
# case if the user actually passed null as input
valid_input['course_modules'] = None
return valid_input, field_errors
def valid_course_modules(course_module_list, master_course_key):
"""
Function to validate that each element in the course_module_list belongs
to the master course structure.
Args:
course_module_list (list): A list of strings representing Block Usage Keys
master_course_key (CourseKey): An object representing the master course key id
Returns:
bool: whether or not all the course module strings belong to the master course
"""
course_chapters = get_course_chapters(master_course_key)
if course_chapters is None:
return False
return set(course_module_list).intersection(set(course_chapters)) == set(course_module_list)
def make_user_coach(user, master_course_key):
"""
Makes an user coach on the master course.
This function is needed because an user cannot become a coach of the CCX if s/he is not
coach on the master course.
Args:
user (User): User object
master_course_key (CourseKey): Key locator object for the course
"""
coach_role_on_master_course = CourseCcxCoachRole(master_course_key)
coach_role_on_master_course.add_users(user)
class CCXListView(GenericAPIView):
"""
**Use Case**
* Get the list of CCX courses for a given master course.
* Creates a new CCX course for a given master course.
**Example Request**
GET /api/ccx/v0/ccx/?master_course_id={master_course_id}
POST /api/ccx/v0/ccx {
"master_course_id": "course-v1:Organization+EX101+RUN-FALL2099",
"display_name": "CCX example title",
"coach_email": "john@example.com",
"max_students_allowed": 123,
"course_modules" : [
"block-v1:Organization+EX101+RUN-FALL2099+type@chapter+block@week1",
"block-v1:Organization+EX101+RUN-FALL2099+type@chapter+block@week4",
"block-v1:Organization+EX101+RUN-FALL2099+type@chapter+block@week5"
]
}
**GET Parameters**
A GET request can include the following parameters.
* master_course_id: A string representation of a Master Course ID. Note that this must be properly
encoded by the client.
* page: Optional. An integer representing the pagination instance number.
* order_by: Optional. A string representing the field by which sort the results.
* sort_order: Optional. A string (either "asc" or "desc") indicating the desired order.
**POST Parameters**
A POST request can include the following parameters.
* master_course_id: A string representation of a Master Course ID.
* display_name: A string representing the CCX Course title.
* coach_email: A string representing the CCX owner email.
* max_students_allowed: An integer representing he maximum number of students that
can be enrolled in the CCX Course.
* course_modules: Optional. A list of course modules id keys.
**GET Response Values**
If the request for information about the course is successful, an HTTP 200 "OK" response
is returned with a collection of CCX courses for the specified master course.
The HTTP 200 response has the following values.
* results: a collection of CCX courses. Each CCX course contains the following values:
* ccx_course_id: A string representation of a CCX Course ID.
* display_name: A string representing the CCX Course title.
* coach_email: A string representing the CCX owner email.
* start: A string representing the start date for the CCX Course.
* due: A string representing the due date for the CCX Course.
* max_students_allowed: An integer representing he maximum number of students that
can be enrolled in the CCX Course.
* course_modules: A list of course modules id keys.
* count: An integer representing the total number of records that matched the request parameters.
* next: A string representing the URL where to retrieve the next page of results. This can be `null`
in case the response contains the complete list of results.
* previous: A string representing the URL where to retrieve the previous page of results. This can be
`null` in case the response contains the first page of results.
**Example GET Response**
{
"count": 99,
"next": "https://openedx-ccx-api-instance.org/api/ccx/v0/ccx/?course_id=<course_id>&page=2",
"previous": null,
"results": {
{
"ccx_course_id": "ccx-v1:Organization+EX101+RUN-FALL2099+ccx@1",
"display_name": "CCX example title",
"coach_email": "john@example.com",
"start": "2019-01-01",
"due": "2019-06-01",
"max_students_allowed": 123,
"course_modules" : [
"block-v1:Organization+EX101+RUN-FALL2099+type@chapter+block@week1",
"block-v1:Organization+EX101+RUN-FALL2099+type@chapter+block@week4",
"block-v1:Organization+EX101+RUN-FALL2099+type@chapter+block@week5"
]
},
{ ... }
}
}
**POST Response Values**
If the request for the creation of a CCX Course is successful, an HTTP 201 "Created" response
is returned with the newly created CCX details.
The HTTP 201 response has the following values.
* ccx_course_id: A string representation of a CCX Course ID.
* display_name: A string representing the CCX Course title.
* coach_email: A string representing the CCX owner email.
* start: A string representing the start date for the CCX Course.
* due: A string representing the due date for the CCX Course.
* max_students_allowed: An integer representing he maximum number of students that
can be enrolled in the CCX Course.
* course_modules: A list of course modules id keys.
**Example POST Response**
{
"ccx_course_id": "ccx-v1:Organization+EX101+RUN-FALL2099+ccx@1",
"display_name": "CCX example title",
"coach_email": "john@example.com",
"start": "2019-01-01",
"due": "2019-06-01",
"max_students_allowed": 123,
"course_modules" : [
"block-v1:Organization+EX101+RUN-FALL2099+type@chapter+block@week1",
"block-v1:Organization+EX101+RUN-FALL2099+type@chapter+block@week4",
"block-v1:Organization+EX101+RUN-FALL2099+type@chapter+block@week5"
]
}
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication,)
permission_classes = (IsAuthenticated, permissions.IsMasterCourseStaffInstructor)
serializer_class = CCXCourseSerializer
pagination_class = CCXAPIPagination
def get(self, request):
"""
Gets a list of CCX Courses for a given Master Course.
Additional parameters are allowed for pagination purposes.
Args:
request (Request): Django request object.
Return:
A JSON serialized representation of a list of CCX courses.
"""
master_course_id = request.GET.get('master_course_id')
master_course_object, master_course_key, error_code, http_status = get_valid_course(master_course_id)
if master_course_object is None:
return Response(
status=http_status,
data={
'error_code': error_code
}
)
queryset = CustomCourseForEdX.objects.filter(course_id=master_course_key)
order_by_input = request.query_params.get('order_by')
sort_order_input = request.query_params.get('sort_order')
if order_by_input in ('id', 'display_name'):
sort_direction = ''
if sort_order_input == 'desc':
sort_direction = '-'
queryset = queryset.order_by('{0}{1}'.format(sort_direction, order_by_input))
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
response = self.get_paginated_response(serializer.data)
return response
def post(self, request):
"""
Creates a new CCX course for a given Master Course.
Args:
request (Request): Django request object.
Return:
A JSON serialized representation a newly created CCX course.
"""
master_course_id = request.data.get('master_course_id')
master_course_object, master_course_key, error_code, http_status = get_valid_course(
master_course_id,
advanced_course_check=True
)
if master_course_object is None:
return Response(
status=http_status,
data={
'error_code': error_code
}
)
# validating the rest of the input
valid_input, field_errors = get_valid_input(request.data)
if field_errors:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
'field_errors': field_errors
}
)
try:
coach = User.objects.get(email=valid_input['coach_email'])
except User.DoesNotExist:
return Response(
status=status.HTTP_404_NOT_FOUND,
data={
'error_code': 'coach_user_does_not_exist'
}
)
if valid_input.get('course_modules'):
if not valid_course_modules(valid_input['course_modules'], master_course_key):
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
'error_code': 'course_module_list_not_belonging_to_master_course'
}
)
# prepare the course_modules to be stored in a json stringified field
course_modules_json = json.dumps(valid_input.get('course_modules'))
with transaction.atomic():
ccx_course_object = CustomCourseForEdX(
course_id=master_course_object.id,
coach=coach,
display_name=valid_input['display_name'],
structure_json=course_modules_json
)
ccx_course_object.save()
# Make sure start/due are overridden for entire course
start = TODAY().replace(tzinfo=pytz.UTC)
override_field_for_ccx(ccx_course_object, master_course_object, 'start', start)
override_field_for_ccx(ccx_course_object, master_course_object, 'due', None)
# Enforce a static limit for the maximum amount of students that can be enrolled
override_field_for_ccx(
ccx_course_object,
master_course_object,
'max_student_enrollments_allowed',
valid_input['max_students_allowed']
)
# Hide anything that can show up in the schedule
hidden = 'visible_to_staff_only'
for chapter in master_course_object.get_children():
override_field_for_ccx(ccx_course_object, chapter, hidden, True)
for sequential in chapter.get_children():
override_field_for_ccx(ccx_course_object, sequential, hidden, True)
for vertical in sequential.get_children():
override_field_for_ccx(ccx_course_object, vertical, hidden, True)
# make the coach user a coach on the master course
make_user_coach(coach, master_course_key)
# pull the ccx course key
ccx_course_key = CCXLocator.from_course_locator(master_course_object.id, ccx_course_object.id)
# enroll the coach in the newly created ccx
email_params = get_email_params(
master_course_object,
auto_enroll=True,
course_key=ccx_course_key,
display_name=ccx_course_object.display_name
)
enroll_email(
course_id=ccx_course_key,
student_email=coach.email,
auto_enroll=True,
email_students=True,
email_params=email_params,
)
# assign coach role for the coach to the newly created ccx
assign_coach_role_to_ccx(ccx_course_key, coach, master_course_object.id)
# assign staff role for all the staff and instructor of the master course to the newly created ccx
add_master_course_staff_to_ccx(
master_course_object,
ccx_course_key,
ccx_course_object.display_name,
send_email=False
)
serializer = self.get_serializer(ccx_course_object)
return Response(
status=status.HTTP_201_CREATED,
data=serializer.data
)
class CCXDetailView(GenericAPIView):
"""
**Use Case**
* Get the details of CCX course.
* Modify a CCX course.
* Delete a CCX course.
**Example Request**
GET /api/ccx/v0/ccx/{ccx_course_id}
PATCH /api/ccx/v0/ccx/{ccx_course_id} {
"display_name": "CCX example title modified",
"coach_email": "joe@example.com",
"max_students_allowed": 111,
"course_modules" : [
"block-v1:Organization+EX101+RUN-FALL2099+type@chapter+block@week1",
"block-v1:Organization+EX101+RUN-FALL2099+type@chapter+block@week4",
"block-v1:Organization+EX101+RUN-FALL2099+type@chapter+block@week5"
]
}
DELETE /api/ccx/v0/ccx/{ccx_course_id}
**GET and DELETE Parameters**
A GET or DELETE request must include the following parameter.
* ccx_course_id: A string representation of a CCX Course ID.
**PATCH Parameters**
A PATCH request can include the following parameters
* ccx_course_id: A string representation of a CCX Course ID.
* display_name: Optional. A string representing the CCX Course title.
* coach_email: Optional. A string representing the CCX owner email.
* max_students_allowed: Optional. An integer representing he maximum number of students that
can be enrolled in the CCX Course.
* course_modules: Optional. A list of course modules id keys.
**GET Response Values**
If the request for information about the CCX course is successful, an HTTP 200 "OK" response
is returned.
The HTTP 200 response has the following values.
* ccx_course_id: A string representation of a CCX Course ID.
* display_name: A string representing the CCX Course title.
* coach_email: A string representing the CCX owner email.
* start: A string representing the start date for the CCX Course.
* due: A string representing the due date for the CCX Course.
* max_students_allowed: An integer representing he maximum number of students that
can be enrolled in the CCX Course.
* course_modules: A list of course modules id keys.
**PATCH and DELETE Response Values**
If the request for modification or deletion of a CCX course is successful, an HTTP 204 "No Content"
response is returned.
"""
authentication_classes = (OAuth2Authentication, SessionAuthentication,)
permission_classes = (IsAuthenticated, permissions.IsCourseStaffInstructor)
serializer_class = CCXCourseSerializer
def get_object(self, course_id, is_ccx=False): # pylint: disable=arguments-differ
"""
Override the default get_object to allow a custom getter for the CCX
"""
course_object, course_key, error_code, http_status = get_valid_course(course_id, is_ccx)
self.check_object_permissions(self.request, course_object)
return course_object, course_key, error_code, http_status
def get(self, request, ccx_course_id=None):
"""
Gets a CCX Course information.
Args:
request (Request): Django request object.
ccx_course_id (string): URI element specifying the CCX course location.
Return:
A JSON serialized representation of the CCX course.
"""
ccx_course_object, _, error_code, http_status = self.get_object(ccx_course_id, is_ccx=True)
if ccx_course_object is None:
return Response(
status=http_status,
data={
'error_code': error_code
}
)
serializer = self.get_serializer(ccx_course_object)
return Response(serializer.data)
def delete(self, request, ccx_course_id=None): # pylint: disable=unused-argument
"""
Deletes a CCX course.
Args:
request (Request): Django request object.
ccx_course_id (string): URI element specifying the CCX course location.
"""
ccx_course_object, ccx_course_key, error_code, http_status = self.get_object(ccx_course_id, is_ccx=True)
if ccx_course_object is None:
return Response(
status=http_status,
data={
'error_code': error_code
}
)
ccx_course_overview = CourseOverview.get_from_id(ccx_course_key)
# clean everything up with a single transaction
with transaction.atomic():
CcxFieldOverride.objects.filter(ccx=ccx_course_object).delete()
# remove all users enrolled in the CCX from the CourseEnrollment model
CourseEnrollment.objects.filter(course_id=ccx_course_key).delete()
ccx_course_overview.delete()
ccx_course_object.delete()
return Response(
status=status.HTTP_204_NO_CONTENT,
)
def patch(self, request, ccx_course_id=None):
"""
Modifies a CCX course.
Args:
request (Request): Django request object.
ccx_course_id (string): URI element specifying the CCX course location.
"""
ccx_course_object, ccx_course_key, error_code, http_status = self.get_object(ccx_course_id, is_ccx=True)
if ccx_course_object is None:
return Response(
status=http_status,
data={
'error_code': error_code
}
)
master_course_id = request.data.get('master_course_id')
if master_course_id is not None and unicode(ccx_course_object.course_id) != master_course_id:
return Response(
status=status.HTTP_403_FORBIDDEN,
data={
'error_code': 'master_course_id_change_not_allowed'
}
)
valid_input, field_errors = get_valid_input(request.data, ignore_missing=True)
if field_errors:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
'field_errors': field_errors
}
)
# get the master course key and master course object
master_course_object, master_course_key, _, _ = get_valid_course(unicode(ccx_course_object.course_id))
with transaction.atomic():
# update the display name
if 'display_name' in valid_input:
ccx_course_object.display_name = valid_input['display_name']
# check if the coach has changed and in case update it
old_coach = None
if 'coach_email' in valid_input:
try:
coach = User.objects.get(email=valid_input['coach_email'])
except User.DoesNotExist:
return Response(
status=status.HTTP_404_NOT_FOUND,
data={
'error_code': 'coach_user_does_not_exist'
}
)
if ccx_course_object.coach.id != coach.id:
old_coach = ccx_course_object.coach
ccx_course_object.coach = coach
if 'course_modules' in valid_input:
if valid_input.get('course_modules'):
if not valid_course_modules(valid_input['course_modules'], master_course_key):
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={
'error_code': 'course_module_list_not_belonging_to_master_course'
}
)
# course_modules to be stored in a json stringified field
ccx_course_object.structure_json = json.dumps(valid_input.get('course_modules'))
ccx_course_object.save()
# update the overridden field for the maximum amount of students
if 'max_students_allowed' in valid_input:
override_field_for_ccx(
ccx_course_object,
ccx_course_object.course,
'max_student_enrollments_allowed',
valid_input['max_students_allowed']
)
# if the coach has changed, update the permissions
if old_coach is not None:
# make the new ccx coach a coach on the master course
make_user_coach(coach, master_course_key)
# enroll the coach in the ccx
email_params = get_email_params(
master_course_object,
auto_enroll=True,
course_key=ccx_course_key,
display_name=ccx_course_object.display_name
)
enroll_email(
course_id=ccx_course_key,
student_email=coach.email,
auto_enroll=True,
email_students=True,
email_params=email_params,
)
# enroll the coach to the newly created ccx
assign_coach_role_to_ccx(ccx_course_key, coach, master_course_object.id)
return Response(
status=status.HTTP_204_NO_CONTENT,
)
|
devs1991/test_edx_docmode
|
lms/djangoapps/ccx/api/v0/views.py
|
Python
|
agpl-3.0
| 30,649 | 0.002545 |
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import entities
from mixbox import fields
import cybox.bindings.win_service_object as win_service_binding
from cybox.common import HashList
from cybox.objects.win_process_object import WinProcess
from cybox.common import ObjectProperties, String
class ServiceDescriptionList(entities.EntityList):
_binding = win_service_binding
_binding_class = win_service_binding.ServiceDescriptionListType
_namespace = "http://cybox.mitre.org/objects#WinServiceObject-2"
description = fields.TypedField("Description", String, multiple=True)
class WinService(WinProcess):
_binding = win_service_binding
_binding_class = win_service_binding.WindowsServiceObjectType
_namespace = "http://cybox.mitre.org/objects#WinServiceObject-2"
_XSI_NS = "WinServiceObj"
_XSI_TYPE = "WindowsServiceObjectType"
service_dll_signature_exists = fields.TypedField("service_dll_signature_exists")
service_dll_signature_verified = fields.TypedField("service_dll_signature_verified")
description_list = fields.TypedField("Description_List", ServiceDescriptionList)
display_name = fields.TypedField("Display_Name", String)
group_name = fields.TypedField("Group_Name", String)
service_name = fields.TypedField("Service_Name", String)
service_dll = fields.TypedField("Service_DLL", String)
service_dll_certificate_issuer = fields.TypedField("Service_DLL_Certificate_Issuer", String)
service_dll_certificate_subject = fields.TypedField("Service_DLL_Certificate_Subject", String)
service_dll_hashes = fields.TypedField("Service_DLL_Hashes", HashList)
service_dll_signature_description = fields.TypedField("Service_DLL_Signature_Description", String)
startup_command_line = fields.TypedField("Startup_Command_Line", String)
startup_type = fields.TypedField("Startup_Type", String)
service_status = fields.TypedField("Service_Status", String)
service_type = fields.TypedField("Service_Type", String)
started_as = fields.TypedField("Started_As", String)
|
CybOXProject/python-cybox
|
cybox/objects/win_service_object.py
|
Python
|
bsd-3-clause
| 2,132 | 0.002814 |
import cPickle
import os
import tarfile
import PIL.Image
from downloader import DataDownloader
class Cifar100Downloader(DataDownloader):
"""
See details about the CIFAR100 dataset here:
http://www.cs.toronto.edu/~kriz/cifar.html
"""
def urlList(self):
return [
'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz',
]
def uncompressData(self):
filename = 'cifar-100-python.tar.gz'
filepath = os.path.join(self.outdir, filename)
assert os.path.exists(filepath), 'Expected "%s" to exist' % filename
if not os.path.exists(os.path.join(self.outdir, 'cifar-100-python')):
print "Uncompressing file=%s ..." % filename
with tarfile.open(filepath) as tf:
tf.extractall(self.outdir)
def processData(self):
label_filename = 'meta'
label_filepath = os.path.join(self.outdir, 'cifar-100-python', label_filename)
with open(label_filepath, 'rb') as infile:
pickleObj = cPickle.load(infile)
fine_label_names = pickleObj['fine_label_names']
coarse_label_names = pickleObj['coarse_label_names']
for level, label_names in [
('fine', fine_label_names),
('coarse', coarse_label_names),
]:
dirname = os.path.join(self.outdir, level)
self.mkdir(dirname, clean=True)
with open(os.path.join(dirname, 'labels.txt'), 'w') as outfile:
for name in label_names:
outfile.write('%s\n' % name)
for filename, phase in [
('train', 'train'),
('test', 'test'),
]:
filepath = os.path.join(self.outdir, 'cifar-100-python', filename)
assert os.path.exists(filepath), 'Expected "%s" to exist' % filename
self.__extractData(filepath, phase, fine_label_names, coarse_label_names)
def __extractData(self, input_file, phase, fine_label_names, coarse_label_names):
"""
Read a pickle file at input_file and output as images
Arguments:
input_file -- a pickle file
phase -- train or test
fine_label_names -- mapping from fine_labels to strings
coarse_label_names -- mapping from coarse_labels to strings
"""
print 'Extracting images file=%s ...' % input_file
# Read the pickle file
with open(input_file, 'rb') as infile:
pickleObj = cPickle.load(infile)
# print 'Batch -', pickleObj['batch_label']
data = pickleObj['data']
assert data.shape[1] == 3072, 'Unexpected data.shape %s' % (data.shape,)
count = data.shape[0]
fine_labels = pickleObj['fine_labels']
assert len(fine_labels) == count, 'Expected len(fine_labels) to be %d, not %d' % (count, len(fine_labels))
coarse_labels = pickleObj['coarse_labels']
assert len(coarse_labels) == count, 'Expected len(coarse_labels) to be %d, not %d' % (
count, len(coarse_labels))
filenames = pickleObj['filenames']
assert len(filenames) == count, 'Expected len(filenames) to be %d, not %d' % (count, len(filenames))
data = data.reshape((count, 3, 32, 32))
data = data.transpose((0, 2, 3, 1))
fine_to_coarse = {} # mapping of fine labels to coarse labels
fine_dirname = os.path.join(self.outdir, 'fine', phase)
os.makedirs(fine_dirname)
coarse_dirname = os.path.join(self.outdir, 'coarse', phase)
os.makedirs(coarse_dirname)
with open(os.path.join(self.outdir, 'fine', '%s.txt' % phase), 'w') as fine_textfile, \
open(os.path.join(self.outdir, 'coarse', '%s.txt' % phase), 'w') as coarse_textfile:
for index, image in enumerate(data):
# Create the directory
fine_label = fine_label_names[fine_labels[index]]
dirname = os.path.join(fine_dirname, fine_label)
self.mkdir(dirname)
# Get the filename
filename = filenames[index]
ext = os.path.splitext(filename)[1][1:].lower()
if ext != self.file_extension:
filename = '%s.%s' % (os.path.splitext(filename)[0], self.file_extension)
filename = os.path.join(dirname, filename)
# Save the image
PIL.Image.fromarray(image).save(filename)
fine_textfile.write('%s %s\n' % (filename, fine_labels[index]))
coarse_textfile.write('%s %s\n' % (filename, coarse_labels[index]))
if fine_label not in fine_to_coarse:
fine_to_coarse[fine_label] = coarse_label_names[coarse_labels[index]]
# Create the coarse dataset with symlinks
for fine, coarse in fine_to_coarse.iteritems():
self.mkdir(os.path.join(coarse_dirname, coarse))
os.symlink(
# Create relative symlinks for portability
os.path.join('..', '..', '..', 'fine', phase, fine),
os.path.join(coarse_dirname, coarse, fine)
)
|
winnerineast/Origae-6
|
origae/download_data/cifar100.py
|
Python
|
gpl-3.0
| 5,218 | 0.002491 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2008 - 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# usage: %prog file [ file [ file [...]]]
# This script merges the timing data from several files into a single
# aggregate which is sent to stdout.
class stamp:
def __init__(this, time, weight):
this.time = long(time)
this.weight = long(weight)
def weighted_time(this):
return this.time * this.weight
def minimum(x, y):
if x < y:
return x
else:
return y
def maximum(x, y):
if x > y:
return x
else:
return y
class timing_file:
def __init__(this, filename = ''):
this.stamps = {}
this.filename = ''
this.filename = filename
if this.filename:
f = open(filename, 'r')
this.lines = f.readlines()
f.close()
this.lines = [ line.strip() for line in this.lines ]
for line in this.lines:
space_sep = line.split()
if len(space_sep) != 2:
raise Exception('bad timing line in %s: %s' % (this.filename, line))
star_sep = space_sep[0].split('*')
if len(star_sep) == 1:
weight = 1
else:
weight = star_sep[1]
this.stamps[space_sep[1]] = stamp(star_sep[0], weight)
def write(this):
for stamp in this.stamps:
print '%d*%d %s' % (this.stamps[stamp].time, this.stamps[stamp].weight, stamp)
def merge(this, old):
new = timing_file()
minmax = ['maximum', 'minimum']
for s in this.stamps:
if s in minmax:
continue
if s in old.stamps:
total_weight = this.stamps[s].weight + old.stamps[s].weight
weighted_average_time = (this.stamps[s].weighted_time() + old.stamps[s].weighted_time()) / total_weight
new.stamps[s] = stamp(weighted_average_time, total_weight)
else:
new.stamps[s] = this.stamps[stamp]
for s in old.stamps:
if s in minmax:
continue
if s not in this.stamps:
new.stamps[s] = old.stamps[s]
stamps = [this.stamps[s].time for s in this.stamps] + [old.stamps[s].time for s in old.stamps]
new.stamps['maximum'] = stamp(reduce(maximum, stamps, 0), 0)
if new.stamps['maximum'] > 0:
new.stamps['minimum'] = stamp(reduce(minimum, stamps, new.stamps['maximum'].time), 0)
return new
def option_parser():
import optparse
usage = "Usage: %prog file [ file [ file [...]]]"
parser = optparse.OptionParser(usage = usage)
general = optparse.OptionGroup(parser, 'General Options', '')
# general.add_option('-i', '--input',
# type = 'string',
# dest = 'infile',
# default = '',
# help = 'use this as the input file [default: stdin]')
parser.add_option_group(general)
return parser
if __name__ == '__main__':
import optparse
options, args = option_parser().parse_args()
sum = timing_file()
for a in args:
sum = sum.merge(timing_file(a))
sum.write()
|
rich-pixley/zoo-animals
|
statlog-rollup.py
|
Python
|
apache-2.0
| 3,874 | 0.004388 |
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib import auth
admin.autodiscover()
import templog.urls
import control.urls
from thermoctrl import views
urlpatterns = [
# Examples:
# url(r'^$', 'thermoctrl.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', views.index, name='index'),
url(r'^admin/', include(admin.site.urls)),
url(r'^log/', include(templog.urls), name='log'),
url(r'^control/', include(control.urls), name='control'),
url(r'^login/', auth.views.login, {"SSL": True, "template_name": "main/login.html"}, name='login'),
]
|
DrChat/thermoctrl
|
thermoctrl/urls.py
|
Python
|
mit
| 645 | 0.006202 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import logging
from django.utils.translation import ugettext_noop as _
from django.conf import settings
from functools import wraps
from six import string_types
from geonode.notifications_helper import NotificationsAppConfigBase, has_notifications
from django.db.models.signals import post_migrate
log = logging.getLogger(__name__)
def run_setup_hooks(*args, **kwargs):
if not has_notifications:
log.warning("Monitoring requires notifications app to be enabled. "
"Otherwise, no notifications will be send")
from geonode.monitoring.models import populate
populate()
class MonitoringAppConfig(NotificationsAppConfigBase):
name = 'geonode.monitoring'
NOTIFICATION_NAME = 'monitoring_alert'
NOTIFICATIONS = ((NOTIFICATION_NAME,
_("Monitoring alert"),
_("Alert situation reported by monitoring"),
),
)
def ready(self):
super(MonitoringAppConfig, self).ready()
post_migrate.connect(run_setup_hooks, sender=self)
default_app_config = 'geonode.monitoring.MonitoringAppConfig'
def register_url_event(event_type=None):
"""
Decorator on views, which will register url event
usage:
>> register_url_event()(TemplateView.view_as_view())
"""
def _register_url_event(view):
@wraps(view)
def inner(*args, **kwargs):
if settings.MONITORING_ENABLED:
request = args[0]
register_event(request, event_type or 'view', request.path)
return view(*args, **kwargs)
return inner
return _register_url_event
def register_event(request, event_type, resource):
"""
Wrapper function to be used inside views to collect event and resource
@param request Request object
@param event_type name of event type
@param resource string (then resource type will be url) or Resource instance
>>> from geonode.monitoring import register_event
>>> def view(request):
register_event(request, 'view', layer)
"""
if not settings.MONITORING_ENABLED:
return
from geonode.base.models import ResourceBase
if isinstance(resource, string_types):
resource_type = 'url'
resource_name = request.path
resource_id = None
elif isinstance(resource, ResourceBase):
resource_type = resource.__class__._meta.verbose_name_raw
resource_name = getattr(resource, 'alternate', None) or resource.title
resource_id = resource.id
else:
raise ValueError("Invalid resource: {}".format(resource))
if request and hasattr(request, 'register_event'):
request.register_event(event_type, resource_type, resource_name, resource_id)
def register_proxy_event(request):
"""
Process request to geoserver proxy. Extract layer and ows type
"""
|
tomkralidis/geonode
|
geonode/monitoring/__init__.py
|
Python
|
gpl-3.0
| 3,733 | 0.000804 |
# -*- coding: utf-8 -*-
"""
Catch-up TV & More
Original work (C) JUL1EN094, SPM, SylvainCecchetto
Copyright (C) 2016 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from codequick import Route, Resolver, Listitem, utils, Script
from resources.lib import web_utils
from resources.lib import resolver_proxy
from resources.lib.menu_utils import item_post_treatment
from resources.lib.addon_utils import get_item_media_path
from kodi_six import xbmcplugin
import re
import json
import time
import urlquick
from six.moves.html_parser import HTMLParser
HTML_PARSER = HTMLParser()
TAG_RE = re.compile(r'<[^>]+>')
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
"""
Channels:
* france.tv (https://www.france.tv/)
"""
URL_API_MOBILE = utils.urljoin_partial("https://api-mobile.yatta.francetv.fr/")
URL_API_FRONT = utils.urljoin_partial("http://api-front.yatta.francetv.fr")
@Route.register
def francetv_root(plugin, **kwargs):
# Channels
item = Listitem()
item.label = Script.localize(30006)
item.set_callback(channels)
item_post_treatment(item)
yield item
# Categories
item = Listitem()
item.label = Script.localize(30725)
item.set_callback(categories)
item_post_treatment(item)
yield item
# Search feature
item = Listitem.search(search)
item_post_treatment(item)
yield item
@Route.register
def channels(plugin, **kwargs):
"""
List all france.tv channels
"""
# (item_id, label, thumb, fanart)
channels = [
('channels/france-2', 'France 2', 'france2.png', 'france2_fanart.jpg'),
('channels/france-3', 'France 3', 'france3.png', 'france3_fanart.jpg'),
('channels/france-4', 'France 4', 'france4.png', 'france4_fanart.jpg'),
('channels/france-5', 'France 5', 'france5.png', 'france5_fanart.jpg'),
('channels/france-o', 'France Ô', 'franceo.png', 'franceo_fanart.jpg'),
('regions/outre-mer', 'Outre-mer la 1ère', 'la1ere.png', 'la1ere_fanart.jpg'),
('channels/franceinfo', 'franceinfo:', 'franceinfo.png', 'franceinfo_fanart.jpg'),
('channels/slash', 'France tv Slash', 'slash.png', 'slash_fanart.jpg'),
('channels/enfants', 'Okoo', 'okoo.png', 'okoo_fanart.jpg'),
('channels/spectacles-et-culture', 'Culturebox', 'culturebox.png', 'culturebox_fanart.jpg')
]
for channel_infos in channels:
item = Listitem()
item.label = channel_infos[1]
item.art["thumb"] = get_item_media_path('channels/fr/' + channel_infos[2])
item.art["fanart"] = get_item_media_path('channels/fr/' + channel_infos[3])
item.set_callback(channel_homepage, channel_infos[0])
item_post_treatment(item)
yield item
@Route.register
def channel_homepage(plugin, item_id, **kwargs):
"""
List channel homepage elements
(e.g. https://www.france.tv/france-2/)
"""
r = urlquick.get(URL_API_MOBILE('/apps/%s' % item_id),
params={'platform': 'apps'})
j = json.loads(r.text)
j = j['collections'] if 'collections' in j else j['items']
for collection in j:
item = Listitem()
if set_item_callback_based_on_type(item, collection['type'], collection):
yield item
def set_item_callback_based_on_type(item, type_, j, next_page_item=None):
# First try to populate label
if 'label' in j:
item.label = j['label']
elif 'title' in j:
item.label = j['title']
else:
item.label = 'No title'
if 'description' in j:
item.info['plot'] = j['description']
# Second, try to populate images
if 'images' in j:
populate_images(item, j['images'])
# Then, based on type, try to guess the correct callback
# This is a new path
if type_ == 'program':
item.set_callback(grab_json_collections, URL_API_MOBILE('/apps/program/%s' % j['program_path']))
item_post_treatment(item)
return True
elif type_ == 'sous_categorie':
item.set_callback(grab_json_collections, URL_API_MOBILE('/apps/sub-categories/%s' % j['url_complete']))
item_post_treatment(item)
return True
elif type_ == 'region':
item.set_callback(outre_mer_root, j['region_path'])
item_post_treatment(item)
return True
elif type_ == 'categories':
item.label = 'Les sous-catégories'
item.set_callback(list_generic_items, j['items'], next_page_item)
item_post_treatment(item)
return True
# This is a video
elif type_ == 'integrale' or type_ == 'extrait' or type_ == 'unitaire':
si_id = populate_video_item(item, j)
item.set_callback(get_video_url,
broadcast_id=si_id)
item_post_treatment(item, is_playable=True, is_downloadable=True)
return True
elif 'items' in j:
item.set_callback(list_generic_items, j['items'], next_page_item)
item_post_treatment(item)
return True
return False
def populate_images(item, images):
all_images = {}
for image in images:
if 'type' in image:
type_ = image['type']
if type_ == 'carre':
all_images['carre'] = image['urls']['w:400']
elif type_ == 'vignette_16x9':
all_images['vignette_16x9'] = image['urls']['w:1024']
elif type_ == 'background_16x9':
all_images['background_16x9'] = image['urls']['w:2500']
elif type_ == 'vignette_3x4':
all_images['vignette_3x4'] = image['urls']['w:1024']
if 'vignette_3x4' in all_images:
item.art['thumb'] = item.art['landscape'] = all_images['vignette_3x4']
elif 'carre' in all_images:
item.art['thumb'] = item.art['landscape'] = all_images['carre']
if 'background_16x9' in all_images:
item.art['fanart'] = all_images['background_16x9']
elif 'vignette_16x9' in all_images:
item.art['fanart'] = all_images['vignette_16x9']
def populate_video_item(item, video):
if 'episode_title' in video:
item.label = video['episode_title']
else:
item.label = video['title']
description = video['description']
if description:
item.info['plot'] = TAG_RE.sub('', HTML_PARSER.unescape(description))
begin_date = time.strftime('%Y-%m-%d', time.localtime(video['begin_date']))
item.info.date(begin_date, "%Y-%m-%d")
if 'program' in video and video['program'] is not None and 'label' in video['program']:
item.label = video['program']['label'] + ' - ' + item.label
type_ = video['type']
if type_ == 'extrait':
item.label = '[extrait] ' + item.label
# It's too bad item.info['title'] overrules item.label everywhere
# so there's no difference between what is shown in the video list
# and what is shown in the video details
# item.info['title'] = video['title']
item.info['title'] = item.label
# id_ = video['id']
rating = video['rating_csa_code']
if rating.isdigit():
rating = "-" + rating
item.info['mpaa'] = rating
if "text" in video and video['text']:
item.info['plot'] = video['text']
if "director" in video and video['director']:
item.info['director'] = video['director']
if "saison" in video and video['saison']:
item.info['season'] = video['saison']
if "episode" in video and video['episode']:
# Now we know for sure we are dealing with an episode
item.info['mediatype'] = "episode"
item.info['episode'] = video['episode']
actors = []
if "casting" in video and video['casting']:
actors = [actor.strip() for actor in video['casting'].split(",")]
elif "presenter" in video and video['presenter']:
actors.append(video['presenter'])
item.info['cast'] = actors
if "characters" in video and video['characters']:
characters = [role.strip() for role in video['characters'].split(",")]
if len(actors) > 0 and len(characters) > 0:
item.info['castandrole'] = list(zip_longest(actors, characters))
si_id = video['si_id']
return si_id
@Route.register
def search(plugin, search_query, **kwargs):
r = urlquick.get(URL_API_MOBILE('/apps/search'),
params={'platform': 'apps',
'filters': 'with-collections',
'term': search_query})
j = json.loads(r.text)
for collection in j['collections']:
item = Listitem()
if set_item_callback_based_on_type(item, collection['type'], collection):
yield item
@Route.register
def categories(plugin, **kwargs):
"""
List all ctagories
(e.g. séries & fictions, documentaires, ...)
This folder will also list videos that are not associated with any channel
"""
categories = {
'Séries & fictions': 'series-et-fictions',
'Documentaires': 'documentaires',
'Cinéma': 'films',
'Info & société': 'actualites-et-societe',
'Culture': 'spectacles-et-culture',
'Sports': 'sport',
'Jeux & divertissements': 'jeux-et-divertissements',
'Art de vivre': 'vie-quotidienne',
'Enfants': 'enfants'
}
for category_label, category_path in categories.items():
item = Listitem()
item.label = category_label
item.set_callback(grab_json_collections, URL_API_MOBILE('/apps/categories/%s' % category_path))
item_post_treatment(item)
yield item
@Route.register
def outre_mer_root(plugin, region_path, **kwargs):
menu_items = [
(Script.localize(30704), '/generic/taxonomy/%s/contents'), # Last videos
(Script.localize(30717), '/apps/regions/%s/programs') # All programs
]
for menu_item in menu_items:
item = Listitem()
item.label = menu_item[0]
item.set_callback(grab_json_collections, URL_API_MOBILE(menu_item[1] % region_path), page=0, collection_position=0)
item_post_treatment(item)
yield item
@Route.register
def list_generic_items(plugin, generic_items, next_page_item, **kwargs):
"""
List items of a generic type
"""
plugin.add_sort_methods(xbmcplugin.SORT_METHOD_UNSORTED)
items = []
for collection_item in generic_items:
item = Listitem()
if set_item_callback_based_on_type(item, collection_item['type'], collection_item):
items.append(item)
if next_page_item:
items.append(next_page_item)
return items
@Route.register
def grab_json_collections(plugin, json_url, page=0, collection_position=None, **kwargs):
plugin.add_sort_methods(xbmcplugin.SORT_METHOD_UNSORTED)
r = urlquick.get(json_url,
params={'platform': 'apps',
'page': str(page)})
j = json.loads(r.text)
cnt = -1
items = []
if 'collections' in j:
collections = j['collections']
else:
collections = [j]
for collection in collections:
cnt = cnt + 1
next_page_item = None
if 'cursor' in collection:
if 'next' in collection['cursor']:
next_ = collection['cursor']['next']
if next_:
next_page_item = Listitem.next_page(json_url,
page=next_,
collection_position=cnt)
# If we are not in page 0, directly print items
if collection_position is not None and cnt == collection_position:
return list_generic_items(plugin, collection['items'], next_page_item)
else:
item = Listitem()
if set_item_callback_based_on_type(item, collection['type'], collection, next_page_item):
items.append(item)
if 'item' in j and 'program_path' in j['item']:
item = Listitem()
item.label = Script.localize(30701) # All videos
item.set_callback(grab_json_collections, URL_API_MOBILE('/generic/taxonomy/%s/contents' % j['item']['program_path']), page=0, collection_position=0)
item_post_treatment(item)
items.append(item)
return items
@Resolver.register
def get_video_url(plugin,
broadcast_id=None,
id_yatta=None,
download_mode=False,
**kwargs):
if id_yatta is not None:
url_yatta_video = "standard/publish/contents/%s" % id_yatta
resp = urlquick.get(URL_API_FRONT(url_yatta_video), max_age=-1)
json_parser = json.loads(resp.text)
for medium in json_parser['content_has_medias']:
if "si_id" in medium['media']:
broadcast_id = medium['media']['si_id']
break
return resolver_proxy.get_francetv_video_stream(plugin, broadcast_id,
download_mode)
@Resolver.register
def get_live_url(plugin, item_id, **kwargs):
broadcast_id = 'SIM_France%s'
return resolver_proxy.get_francetv_live_stream(
plugin, broadcast_id % item_id.split('-')[1])
|
SylvainCecchetto/plugin.video.catchuptvandmore
|
plugin.video.catchuptvandmore/resources/lib/channels/fr/francetv.py
|
Python
|
gpl-2.0
| 14,146 | 0.001344 |
# -*- coding: utf-8 -*-
#
# This file is part of the python-chess library.
# Copyright (C) 2012-2016 Niklas Fiekas <niklas.fiekas@backscattering.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import chess
import itertools
import re
import logging
try:
import backport_collections as collections
except ImportError:
import collections
LOGGER = logging.getLogger(__name__)
NAG_NULL = 0
NAG_GOOD_MOVE = 1
"""A good move. Can also be indicated by ``!`` in PGN notation."""
NAG_MISTAKE = 2
"""A mistake. Can also be indicated by ``?`` in PGN notation."""
NAG_BRILLIANT_MOVE = 3
"""A brilliant move. Can also be indicated by ``!!`` in PGN notation."""
NAG_BLUNDER = 4
"""A blunder. Can also be indicated by ``??`` in PGN notation."""
NAG_SPECULATIVE_MOVE = 5
"""A speculative move. Can also be indicated by ``!?`` in PGN notation."""
NAG_DUBIOUS_MOVE = 6
"""A dubious move. Can also be indicated by ``?!`` in PGN notation."""
NAG_FORCED_MOVE = 7
NAG_SINGULAR_MOVE = 8
NAG_WORST_MOVE = 9
NAG_DRAWISH_POSITION = 10
NAG_QUIET_POSITION = 11
NAG_ACTIVE_POSITION = 12
NAG_UNCLEAR_POSITION = 13
NAG_WHITE_SLIGHT_ADVANTAGE = 14
NAG_BLACK_SLIGHT_ADVANTAGE = 15
# TODO: Add more constants for example from
# https://en.wikipedia.org/wiki/Numeric_Annotation_Glyphs
NAG_WHITE_MODERATE_COUNTERPLAY = 132
NAG_BLACK_MODERATE_COUNTERPLAY = 133
NAG_WHITE_DECISIVE_COUNTERPLAY = 134
NAG_BLACK_DECISIVE_COUNTERPLAY = 135
NAG_WHITE_MODERATE_TIME_PRESSURE = 136
NAG_BLACK_MODERATE_TIME_PRESSURE = 137
NAG_WHITE_SEVERE_TIME_PRESSURE = 138
NAG_BLACK_SEVERE_TIME_PRESSURE = 139
TAG_REGEX = re.compile(r"^\[([A-Za-z0-9_]+)\s+\"(.*)\"\]\s*$")
MOVETEXT_REGEX = re.compile(r"""
(%.*?[\n\r])
|(\{.*)
|(\$[0-9]+)
|(\()
|(\))
|(\*|1-0|0-1|1/2-1/2)
|(
[NBKRQ]?[a-h]?[1-8]?[\-x]?[a-h][1-8](?:=?[nbrqkNBRQK])?
|[PNBRQK]?@[a-h][1-8]
|--
|O-O(?:-O)?
|0-0(?:-0)?
)
|([\?!]{1,2})
""", re.DOTALL | re.VERBOSE)
class GameNode(object):
def __init__(self):
self.parent = None
self.move = None
self.nags = set()
self.starting_comment = ""
self.comment = ""
self.variations = []
self.board_cached = None
def board(self, _cache=True):
"""
Gets a board with the position of the node.
It's a copy, so modifying the board will not alter the game.
"""
if self.board_cached:
return self.board_cached.copy()
board = self.parent.board(_cache=False)
board.push(self.move)
if _cache:
self.board_cached = board
return board.copy()
else:
return board
def san(self):
"""
Gets the standard algebraic notation of the move leading to this node.
Do not call this on the root node.
"""
return self.parent.board().san(self.move)
def root(self):
"""Gets the root node, i.e. the game."""
node = self
while node.parent:
node = node.parent
return node
def end(self):
"""Follows the main variation to the end and returns the last node."""
node = self
while node.variations:
node = node.variations[0]
return node
def is_end(self):
"""Checks if this node is the last node in the current variation."""
return not self.variations
def starts_variation(self):
"""
Checks if this node starts a variation (and can thus have a starting
comment). The root node does not start a variation and can have no
starting comment.
"""
if not self.parent or not self.parent.variations:
return False
return self.parent.variations[0] != self
def is_main_line(self):
"""Checks if the node is in the main line of the game."""
node = self
while node.parent:
parent = node.parent
if not parent.variations or parent.variations[0] != node:
return False
node = parent
return True
def is_main_variation(self):
"""
Checks if this node is the first variation from the point of view of its
parent. The root node also is in the main variation.
"""
if not self.parent:
return True
return not self.parent.variations or self.parent.variations[0] == self
def variation(self, move):
"""
Gets a child node by move or index.
"""
for index, variation in enumerate(self.variations):
if move == variation.move or index == move or move == variation:
return variation
raise KeyError("variation not found")
def has_variation(self, move):
"""Checks if the given move appears as a variation."""
return move in (variation.move for variation in self.variations)
def promote_to_main(self, move):
"""Promotes the given move to the main variation."""
variation = self.variation(move)
self.variations.remove(variation)
self.variations.insert(0, variation)
def promote(self, move):
"""Moves the given variation one up in the list of variations."""
variation = self.variation(move)
i = self.variations.index(variation)
if i > 0:
self.variations[i - 1], self.variations[i] = self.variations[i], self.variations[i - 1]
def demote(self, move):
"""Moves the given variation one down in the list of variations."""
variation = self.variation(move)
i = self.variations.index(variation)
if i < len(self.variations) - 1:
self.variations[i + 1], self.variations[i] = self.variations[i], self.variations[i + 1]
def remove_variation(self, move):
"""Removes a variation by move."""
self.variations.remove(self.variation(move))
def add_variation(self, move, comment="", starting_comment="", nags=()):
"""Creates a child node with the given attributes."""
node = GameNode()
node.move = move
node.nags = set(nags)
node.parent = self
node.comment = comment
node.starting_comment = starting_comment
self.variations.append(node)
return node
def add_main_variation(self, move, comment=""):
"""
Creates a child node with the given attributes and promotes it to the
main variation.
"""
node = self.add_variation(move, comment=comment)
self.variations.remove(node)
self.variations.insert(0, node)
return node
def main_line(self):
"""Yields the moves of the main line starting in this node."""
node = self
while node.variations:
node = node.variations[0]
yield node.move
def add_line(self, moves, comment="", starting_comment="", nags=()):
"""
Creates a sequence of child nodes for the given list of moves.
Adds *comment* and *nags* to the last node of the line and returns it.
"""
node = self
# Add line.
for move in moves:
node = node.add_variation(move, starting_comment=starting_comment)
starting_comment = ""
# Merge comment and NAGs.
if node.comment:
node.comment += " " + comment
else:
node.comment = comment
node.nags.update(nags)
return node
def accept(self, visitor, _board=None):
"""
Traverse game nodes in PGN order using the given *visitor*. Returns
the visitor result.
"""
board = self.board() if _board is None else _board
# The mainline move goes first.
if self.variations:
main_variation = self.variations[0]
visitor.visit_move(board, main_variation.move)
# Visit NAGs.
for nag in sorted(main_variation.nags):
visitor.visit_nag(nag)
# Visit the comment.
if main_variation.comment:
visitor.visit_comment(main_variation.comment)
# Then visit sidelines.
for variation in itertools.islice(self.variations, 1, None):
# Start variation.
visitor.begin_variation()
# Append starting comment.
if variation.starting_comment:
visitor.visit_comment(variation.starting_comment)
# Visit move.
visitor.visit_move(board, variation.move)
# Visit NAGs.
for nag in sorted(variation.nags):
visitor.visit_nag(nag)
# Visit comment.
if variation.comment:
visitor.visit_comment(variation.comment)
# Recursively append the next moves.
board.push(variation.move)
variation.accept(visitor, _board=board)
board.pop()
# End variation.
visitor.end_variation()
# The mainline is continued last.
if self.variations:
main_variation = self.variations[0]
# Recursively append the next moves.
board.push(main_variation.move)
main_variation.accept(visitor, _board=board)
board.pop()
# Get the result if not called recursively.
if _board is None:
return visitor.result()
def __str__(self):
return self.accept(StringExporter(columns=None))
class Game(GameNode):
"""
The root node of a game with extra information such as headers and the
starting position.
By default the following 7 headers are provided in an ordered dictionary:
>>> game = chess.pgn.Game()
>>> game.headers["Event"]
'?'
>>> game.headers["Site"]
'?'
>>> game.headers["Date"]
'????.??.??'
>>> game.headers["Round"]
'?'
>>> game.headers["White"]
'?'
>>> game.headers["Black"]
'?'
>>> game.headers["Result"]
'*'
Also has all the other properties and methods of
:class:`~chess.pgn.GameNode`.
"""
def __init__(self):
super(Game, self).__init__()
self.headers = collections.OrderedDict()
self.headers["Event"] = "?"
self.headers["Site"] = "?"
self.headers["Date"] = "????.??.??"
self.headers["Round"] = "?"
self.headers["White"] = "?"
self.headers["Black"] = "?"
self.headers["Result"] = "*"
self.errors = []
def board(self, _cache=False):
"""
Gets the starting position of the game.
Unless the `SetUp` and `FEN` header tags are set this is the default
starting position.
"""
chess960 = self.headers.get("Variant", "").lower() == "chess960"
if chess960 or "Variant" not in self.headers:
VariantBoard = chess.Board
else:
from chess.variant import find_variant
VariantBoard = find_variant(self.headers["Variant"])
fen = self.headers.get("FEN") if self.headers.get("SetUp", "1") == "1" else None
board = VariantBoard(fen or VariantBoard.starting_fen, chess960)
board.chess960 = board.chess960 or board.has_chess960_castling_rights()
return board
def setup(self, board):
"""
Setup a specific starting position. This sets (or resets) the *SetUp*,
*FEN* and *Variant* header tags.
"""
try:
fen = board.fen()
except AttributeError:
board = chess.Board(board)
board.chess960 = board.has_chess960_castling_rights()
fen = board.fen()
if fen == type(board).starting_fen:
self.headers.pop("SetUp", None)
self.headers.pop("FEN", None)
else:
self.headers["SetUp"] = "1"
self.headers["FEN"] = fen
if type(board).aliases[0] == "Standard" and board.chess960:
self.headers["Variant"] = "Chess960"
elif type(board).aliases[0] != "Standard":
self.headers["Variant"] = type(board).aliases[0]
self.headers["FEN"] = board.fen()
else:
self.headers.pop("Variant", None)
def accept(self, visitor):
"""
Traverses the game in PGN order using the given *visitor*. Returns
the visitor result.
"""
visitor.begin_game()
visitor.begin_headers()
for tagname, tagvalue in self.headers.items():
visitor.visit_header(tagname, tagvalue)
visitor.end_headers()
if self.comment:
visitor.visit_comment(self.comment)
super(Game, self).accept(visitor, _board=self.board())
visitor.visit_result(self.headers.get("Result", "*"))
visitor.end_game()
return visitor.result()
@classmethod
def from_board(cls, board):
"""Creates a game from the move stack of a :class:`~chess.Board()`."""
# Undo all moves.
switchyard = collections.deque()
while board.move_stack:
switchyard.append(board.pop())
# Setup initial position.
game = cls()
game.setup(board)
node = game
# Replay all moves.
while switchyard:
move = switchyard.pop()
node = node.add_variation(move)
board.push(move)
game.headers["Result"] = board.result()
return game
class BaseVisitor(object):
"""
Base class for visitors.
Use with :func:`chess.pgn.Game.accept()` or
:func:`chess.pgn.GameNode.accept()`.
Methods are called in PGN order.
"""
def begin_game(self):
"""Called at the start of a game."""
pass
def begin_headers(self):
"""Called at the start of the game headers."""
pass
def visit_header(self, tagname, tagvalue):
"""Called for each game header."""
pass
def end_headers(self):
"""Called at the end of the game headers."""
pass
def visit_move(self, board, move):
"""
Called for each move.
*board* is the board state before the move. The board state must be
restored before the traversal continues.
"""
pass
def visit_comment(self, comment):
"""Called for each comment."""
pass
def visit_nag(self, nag):
"""Called for each NAG."""
pass
def begin_variation(self):
"""
Called at the start of a new variation. It is not called for the
mainline of the game.
"""
pass
def end_variation(self):
"""Concludes a variation."""
pass
def visit_result(self, result):
"""Called at the end of the game with the *Result*-header."""
pass
def end_game(self):
"""Called at the end of a game."""
pass
def result(self):
"""Called to get the result of the visitor. Defaults to ``True``."""
return True
def handle_error(self, error):
"""Called for errors encountered. Defaults to raising an exception."""
raise error
class GameModelCreator(BaseVisitor):
"""
Creates a game model. Default visitor for :func:`~chess.pgn.read_game()`.
"""
def __init__(self):
self.game = Game()
self.variation_stack = collections.deque([self.game])
self.starting_comment = ""
self.in_variation = False
def visit_header(self, tagname, tagvalue):
self.game.headers[tagname] = tagvalue
def visit_nag(self, nag):
self.variation_stack[-1].nags.add(nag)
def begin_variation(self):
self.variation_stack.append(self.variation_stack[-1].parent)
self.in_variation = False
def end_variation(self):
self.variation_stack.pop()
def visit_result(self, result):
if self.game.headers.get("Result", "*") == "*":
self.game.headers["Result"] = result
def visit_comment(self, comment):
if self.in_variation or (not self.variation_stack[-1].parent and self.variation_stack[-1].is_end()):
# Add as a comment for the current node if in the middle of
# a variation. Add as a comment for the game, if the comment
# starts before any move.
new_comment = [self.variation_stack[-1].comment, comment]
self.variation_stack[-1].comment = "\n".join(new_comment).strip()
else:
# Otherwise it is a starting comment.
new_comment = [self.starting_comment, comment]
self.starting_comment = "\n".join(new_comment).strip()
def visit_move(self, board, move):
self.variation_stack[-1] = self.variation_stack[-1].add_variation(move)
self.variation_stack[-1].starting_comment = self.starting_comment
self.starting_comment = ""
self.in_variation = True
def handle_error(self, error):
"""
Populates :data:`chess.pgn.Game.errors` with encountered errors and
logs them.
"""
LOGGER.exception("error during pgn parsing")
self.game.errors.append(error)
def result(self):
"""
Returns the visited :class:`~chess.pgn.Game()`.
"""
return self.game
class StringExporter(BaseVisitor):
"""
Allows exporting a game as a string.
>>> exporter = chess.pgn.StringExporter(headers=True, variations=True, comments=True)
>>> pgn_string = game.accept(exporter)
Only *columns* characters are written per line. If *columns* is ``None``
then the entire movetext will be on a single line. This does not affect
header tags and comments.
There will be no newlines at the end of the string.
"""
def __init__(self, columns=80, headers=True, comments=True, variations=True):
self.columns = columns
self.headers = headers
self.comments = comments
self.variations = variations
self.force_movenumber = True
self.lines = []
self.current_line = ""
self.variation_depth = 0
def flush_current_line(self):
if self.current_line:
self.lines.append(self.current_line.rstrip())
self.current_line = ""
def write_token(self, token):
if self.columns is not None and self.columns - len(self.current_line) < len(token):
self.flush_current_line()
self.current_line += token
def write_line(self, line=""):
self.flush_current_line()
self.lines.append(line.rstrip())
def begin_game(self):
self.after_variation = True
def end_game(self):
self.write_line()
def visit_header(self, tagname, tagvalue):
if self.headers:
self.write_line("[{0} \"{1}\"]".format(tagname, tagvalue))
def end_headers(self):
if self.headers:
self.write_line()
def begin_variation(self):
self.variation_depth += 1
if self.variations:
self.write_token("( ")
self.force_movenumber = True
def end_variation(self):
self.variation_depth -= 1
if self.variations:
self.write_token(") ")
self.force_movenumber = True
def visit_comment(self, comment):
if self.comments and (self.variations or not self.variation_depth):
self.write_token("{ " + comment.replace("}", "").strip() + " } ")
self.force_movenumber = True
def visit_nag(self, nag):
if self.comments and (self.variations or not self.variation_depth):
self.write_token("$" + str(nag) + " ")
def visit_move(self, board, move):
if self.variations or not self.variation_depth:
# Write the move number.
if board.turn == chess.WHITE:
self.write_token(str(board.fullmove_number) + ". ")
elif self.force_movenumber:
self.write_token(str(board.fullmove_number) + "... ")
# Write the SAN.
self.write_token(board.san(move) + " ")
self.force_movenumber = False
def visit_result(self, result):
self.write_token(result + " ")
def result(self):
if self.current_line:
return "\n".join(itertools.chain(self.lines, [self.current_line.rstrip()])).rstrip()
else:
return "\n".join(self.lines).rstrip()
def __str__(self):
return self.result()
class FileExporter(StringExporter):
"""
Like a :class:`~chess.pgn.StringExporter`, but games are written directly
to a text file.
There will always be a blank line after each game. Handling encodings is up
to the caller.
>>> new_pgn = open("new.pgn", "w", encoding="utf-8")
>>> exporter = chess.pgn.FileExporter(new_pgn)
>>> game.accept(exporter)
"""
def __init__(self, handle, columns=80, headers=True, comments=True, variations=True):
super(FileExporter, self).__init__(columns=columns, headers=headers, comments=comments, variations=variations)
self.handle = handle
def flush_current_line(self):
if self.current_line:
self.handle.write(self.current_line.rstrip())
self.handle.write("\n")
self.current_line = ""
def write_line(self, line=""):
self.flush_current_line()
self.handle.write(line.rstrip())
self.handle.write("\n")
def result(self):
return None
def __repr__(self):
return "<FileExporter at {0}>".format(hex(id(self)))
def __str__(self):
return self.__repr__()
def read_game(handle, Visitor=GameModelCreator):
"""
Reads a game from a file opened in text mode.
>>> pgn = open("data/pgn/kasparov-deep-blue-1997.pgn")
>>> first_game = chess.pgn.read_game(pgn)
>>> second_game = chess.pgn.read_game(pgn)
>>>
>>> first_game.headers["Event"]
'IBM Man-Machine, New York USA'
By using text mode the parser does not need to handle encodings. It is the
callers responsibility to open the file with the correct encoding.
PGN files are ASCII or UTF-8 most of the time. So the following should
cover most relevant cases (ASCII, UTF-8 without BOM, UTF-8 with BOM,
UTF-8 with encoding errors).
>>> pgn = open("data/pgn/kasparov-deep-blue-1997.pgn", encoding="utf-8-sig", errors="surrogateescape")
Use `StringIO` to parse games from a string.
>>> pgn_string = "1. e4 e5 2. Nf3 *"
>>>
>>> try:
>>> from StringIO import StringIO # Python 2
>>> except ImportError:
>>> from io import StringIO # Python 3
>>>
>>> pgn = StringIO(pgn_string)
>>> game = chess.pgn.read_game(pgn)
The end of a game is determined by a completely blank line or the end of
the file. (Of course blank lines in comments are possible.)
According to the standard at least the usual 7 header tags are required
for a valid game. This parser also handles games without any headers just
fine.
The parser is relatively forgiving when it comes to errors. It skips over
tokens it can not parse. Any exceptions are logged.
Returns the parsed game or ``None`` if the EOF is reached.
"""
visitor = Visitor()
dummy_game = Game()
found_game = False
found_content = False
line = handle.readline()
# Parse game headers.
while line:
# Skip empty lines and comments.
if not line.strip() or line.strip().startswith("%"):
line = handle.readline()
continue
if not found_game:
visitor.begin_game()
visitor.begin_headers()
found_game = True
# Read header tags.
tag_match = TAG_REGEX.match(line)
if tag_match:
dummy_game.headers[tag_match.group(1)] = tag_match.group(2)
visitor.visit_header(tag_match.group(1), tag_match.group(2))
else:
break
line = handle.readline()
if found_game:
visitor.end_headers()
# Get the next non-empty line.
while not line.strip() and line:
line = handle.readline()
# Movetext parser state.
try:
board_stack = collections.deque([dummy_game.board()])
except ValueError as error:
visitor.handle_error(error)
board_stack = collections.deque([chess.Board()])
# Parse movetext.
while line:
read_next_line = True
# An empty line is the end of a game.
if not line.strip() and found_content:
if found_game:
visitor.end_game()
return visitor.result()
else:
return
for match in MOVETEXT_REGEX.finditer(line):
token = match.group(0)
if token.startswith("%"):
# Ignore the rest of the line.
line = handle.readline()
continue
if not found_game:
found_game = True
visitor.begin_game()
if token.startswith("{"):
# Consume until the end of the comment.
line = token[1:]
comment_lines = []
while line and "}" not in line:
comment_lines.append(line.rstrip())
line = handle.readline()
end_index = line.find("}")
comment_lines.append(line[:end_index])
if "}" in line:
line = line[end_index:]
else:
line = ""
visitor.visit_comment("\n".join(comment_lines).strip())
# Continue with the current or the next line.
if line:
read_next_line = False
break
elif token.startswith("$"):
# Found a NAG.
try:
nag = int(token[1:])
except ValueError as error:
visitor.handle_error(error)
else:
visitor.visit_nag(nag)
elif token == "?":
visitor.visit_nag(NAG_MISTAKE)
elif token == "??":
visitor.visit_nag(NAG_BLUNDER)
elif token == "!":
visitor.visit_nag(NAG_GOOD_MOVE)
elif token == "!!":
visitor.visit_nag(NAG_BRILLIANT_MOVE)
elif token == "!?":
visitor.visit_nag(NAG_SPECULATIVE_MOVE)
elif token == "?!":
visitor.visit_nag(NAG_DUBIOUS_MOVE)
elif token == "(":
if board_stack[-1].move_stack:
visitor.begin_variation()
board = board_stack[-1].copy()
board.pop()
board_stack.append(board)
elif token == ")":
# Found a close variation token. Always leave at least the
# root node on the stack.
if len(board_stack) > 1:
visitor.end_variation()
board_stack.pop()
elif token in ["1-0", "0-1", "1/2-1/2", "*"] and len(board_stack) == 1:
# Found a result token.
found_content = True
visitor.visit_result(token)
else:
# Found a SAN token.
found_content = True
# Replace zeros castling notation.
if token == "0-0":
token = "O-O"
elif token == "0-0-0":
token = "O-O-O"
# Parse the SAN.
try:
move = board_stack[-1].parse_san(token)
except ValueError as error:
visitor.handle_error(error)
else:
visitor.visit_move(board_stack[-1], move)
board_stack[-1].push(move)
if read_next_line:
line = handle.readline()
if found_game:
visitor.end_game()
return visitor.result()
def scan_headers(handle):
"""
Scan a PGN file opened in text mode for game offsets and headers.
Yields a tuple for each game. The first element is the offset. The second
element is an ordered dictionary of game headers.
Since actually parsing many games from a big file is relatively expensive,
this is a better way to look only for specific games and seek and parse
them later.
This example scans for the first game with Kasparov as the white player.
>>> pgn = open("mega.pgn")
>>> for offset, headers in chess.pgn.scan_headers(pgn):
... if "Kasparov" in headers["White"]:
... kasparov_offset = offset
... break
Then it can later be seeked an parsed.
>>> pgn.seek(kasparov_offset)
>>> game = chess.pgn.read_game(pgn)
This also works nicely with generators, scanning lazily only when the next
offset is required.
>>> white_win_offsets = (offset for offset, headers in chess.pgn.scan_headers(pgn)
... if headers["Result"] == "1-0")
>>> first_white_win = next(white_win_offsets)
>>> second_white_win = next(white_win_offsets)
:warning: Be careful when seeking a game in the file while more offsets are
being generated.
"""
in_comment = False
game_headers = None
game_pos = None
last_pos = handle.tell()
line = handle.readline()
while line:
# Skip single line comments.
if line.startswith("%"):
last_pos = handle.tell()
line = handle.readline()
continue
# Reading a header tag. Parse it and add it to the current headers.
if not in_comment and line.startswith("["):
tag_match = TAG_REGEX.match(line)
if tag_match:
if game_pos is None:
game_headers = collections.OrderedDict()
game_headers["Event"] = "?"
game_headers["Site"] = "?"
game_headers["Date"] = "????.??.??"
game_headers["Round"] = "?"
game_headers["White"] = "?"
game_headers["Black"] = "?"
game_headers["Result"] = "*"
game_pos = last_pos
game_headers[tag_match.group(1)] = tag_match.group(2)
last_pos = handle.tell()
line = handle.readline()
continue
# Reading movetext. Update parser state in_comment in order to skip
# comments that look like header tags.
if (not in_comment and "{" in line) or (in_comment and "}" in line):
in_comment = line.rfind("{") > line.rfind("}")
# Reading movetext. If there were headers, previously, those are now
# complete and can be yielded.
if game_pos is not None:
yield game_pos, game_headers
game_pos = None
last_pos = handle.tell()
line = handle.readline()
# Yield the headers of the last game.
if game_pos is not None:
yield game_pos, game_headers
def scan_offsets(handle):
"""
Scan a PGN file opened in text mode for game offsets.
Yields the starting offsets of all the games, so that they can be seeked
later. This is just like :func:`~chess.pgn.scan_headers()` but more
efficient if you do not actually need the header information.
The PGN standard requires each game to start with an *Event*-tag. So does
this scanner.
"""
in_comment = False
last_pos = handle.tell()
line = handle.readline()
while line:
if not in_comment and line.startswith("[Event \""):
yield last_pos
elif (not in_comment and "{" in line) or (in_comment and "}" in line):
in_comment = line.rfind("{") > line.rfind("}")
last_pos = handle.tell()
line = handle.readline()
|
johncheetham/jcchess
|
chess/pgn.py
|
Python
|
gpl-3.0
| 32,586 | 0.00043 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import sys
class Astyle(MakefilePackage):
"""A Free, Fast, and Small Automatic Formatter for C, C++, C++/CLI,
Objective-C, C#, and Java Source Code.
"""
homepage = "http://astyle.sourceforge.net/"
url = "https://sourceforge.net/projects/astyle/files/astyle/astyle%203.0.1/astyle_3.0.1_linux.tar.gz"
# Gentoo alternative
# url = "http://distfiles.gentoo.org/distfiles/astyle_3.0.1_linux.tar.gz"
maintainers = ['davydden']
version('3.0.1', 'c301f09679efa2e1eb6e6b5fd33788b4')
version('2.06', 'ff588e7fcede824591cf5b9085df109d')
version('2.05.1', '4142d178047d7040da3e0e2f1b030a1a')
version('2.04', '30b1193a758b0909d06e7ee8dd9627f6')
parallel = False
@property
def build_directory(self):
return join_path(self.stage.source_path, 'build', self.compiler.name)
def edit(self, spec, prefix):
makefile = join_path(self.build_directory, 'Makefile')
filter_file(r'^CXX\s*=.*', 'CXX=%s' % spack_cxx, makefile)
# strangely enough install -o $(USER) -g $(USER) stoped working on OSX
if sys.platform == 'darwin':
filter_file(r'^INSTALL=.*', 'INSTALL=install', makefile)
@property
def install_targets(self):
return ['install', 'prefix={0}'.format(self.prefix)]
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/astyle/package.py
|
Python
|
lgpl-2.1
| 2,548 | 0.000392 |
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kinow_client
from kinow_client.rest import ApiException
from kinow_client.models.subtitle_file import SubtitleFile
class TestSubtitleFile(unittest.TestCase):
""" SubtitleFile unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testSubtitleFile(self):
"""
Test SubtitleFile
"""
model = kinow_client.models.subtitle_file.SubtitleFile()
if __name__ == '__main__':
unittest.main()
|
kinow-io/kinow-python-sdk
|
test/test_subtitle_file.py
|
Python
|
apache-2.0
| 747 | 0.001339 |
"""
Trends library module.
"""
import datetime
from lib import database as db
from lib.twitter_api import authentication
# Global object to be used as api connection. During execution of the insert
# function, this can be setup once with default app then reused later,
# to avoid time calling Twitter API. It can be left as null if not needed.
appApi = None
def insertTrendsForWoeid(woeid, userApi=None, delete=False, verbose=True):
"""
Retrieve Trend data from the Twitter API for a place and insert into the
database.
Expects a WOEID value for a Place, gets up to 50 trend records for the
Place as limited by the API and stores each of the values in the Trend
table.
From the API request response, we ignore the location field (which we know
already) and the time field (since we just use current time as close
enough).
For printing of the added trend, it works normally to print the string as
'...{}'.format, even if the value is 'Jonathan Garc\xeda'. This was tested
in the bash console of Python Anywhere. However, when running as a cronjob
and outputting to log file, it appears to be converted to ASCII and throws
an error. Therefore encoding to ASCII and replacing the character is done,
even though it less readable.
:param woeid: Integer for WOEID value of a Place.
:param userApi: tweepy API connection object. Set this with a
user-authorised connection to skip the default behaviour of generating
and using an app-authorised connection.
:param delete: Boolean, default False. If set to True, delete item after
it is inserted into db. This is useful for testing.
:param verbose: Print details for each trend added.
"""
global appApi
now = datetime.datetime.now()
print(f"{now.strftime('%x %X')} Inserting trend data for WOEID {woeid}")
assert isinstance(
woeid, int
), f"Expected WOEID as type `int` but got type `{type(woeid).__name__}`."
if userApi:
# Use user token.
api = userApi
else:
# Use app token.
if not appApi:
# Set it if necessary and then reuse it next time.
appApi = authentication.getAPIConnection()
api = appApi
response = api.trends_place(woeid)[0]
trends = response["trends"]
for x in trends:
topic = x["name"]
volume = x["tweet_volume"]
t = db.Trend(topic=topic, volume=volume).setPlace(woeid)
if verbose:
print(
"Added trend: {tweetID:4d} | {topic:25} - {volume:7,d} K |"
" {woeid:10} - {place}.".format(
tweetID=t.id,
topic=t.topic,
volume=(t.volume // 1000 if t.volume else 0),
woeid=t.place.woeid,
place=t.place.name,
)
)
if delete:
db.Trend.delete(t.id)
if verbose:
print(" - removed from db.")
return len(trends)
|
MichaelCurrin/twitterverse
|
app/lib/trends.py
|
Python
|
mit
| 3,038 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.