repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
jhbsz/OSI-OS
|
contrib/wpa/wpa_supplicant/examples/p2p/p2p_group_add.py
|
29
|
5810
|
#!/usr/bin/python
# Tests p2p_group_add
######### MAY NEED TO RUN AS SUDO #############
import dbus
import sys, os
import time
import gobject
import getopt
import threading
from dbus.mainloop.glib import DBusGMainLoop
def usage():
print "Usage:"
print " %s -i <interface_name> [-p <persistent>] \ " \
% sys.argv[0]
print " [-f <frequency>] [-o <group_object_path>] \ "
print " [-w <wpas_dbus_interface>]"
print "Options:"
print " -i = interface name"
print " -p = persistant group = 0 (0=false, 1=true)"
print " -f = frequency"
print " -o = persistent group object path"
print " -w = wpas dbus interface = fi.w1.wpa_supplicant1"
print "Example:"
print " %s -i wlan0" % sys.argv[0]
# Required Signals
def GroupStarted(properties):
if properties.has_key("group_object"):
print 'Group Formation Complete %s' \
% properties["group_object"]
os._exit(0)
def WpsFailure(status, etc):
print "WPS Authentication Failure".format(status)
print etc
os._exit(0)
class P2P_Group_Add (threading.Thread):
# Needed Variables
global bus
global wpas_object
global interface_object
global p2p_interface
global interface_name
global wpas
global wpas_dbus_interface
global path
global persistent
global frequency
global persistent_group_object
# Dbus Paths
global wpas_dbus_opath
global wpas_dbus_interfaces_opath
global wpas_dbus_interfaces_interface
global wpas_dbus_interfaces_p2pdevice
# Arguements
global P2PDictionary
# Constructor
def __init__(self,interface_name,wpas_dbus_interface,persistent,frequency,
persistent_group_object):
# Initializes variables and threads
self.interface_name = interface_name
self.wpas_dbus_interface = wpas_dbus_interface
self.persistent = persistent
self.frequency = frequency
self.persistent_group_object = persistent_group_object
# Initializes thread and daemon allows for ctrl-c kill
threading.Thread.__init__(self)
self.daemon = True
# Generating interface/object paths
self.wpas_dbus_opath = "/" + \
self.wpas_dbus_interface.replace(".","/")
self.wpas_wpas_dbus_interfaces_opath = self.wpas_dbus_opath + \
"/Interfaces"
self.wpas_dbus_interfaces_interface = \
self.wpas_dbus_interface + ".Interface"
self.wpas_dbus_interfaces_p2pdevice = \
self.wpas_dbus_interfaces_interface \
+ ".P2PDevice"
# Getting interfaces and objects
DBusGMainLoop(set_as_default=True)
self.bus = dbus.SystemBus()
self.wpas_object = self.bus.get_object(
self.wpas_dbus_interface,
self.wpas_dbus_opath)
self.wpas = dbus.Interface(self.wpas_object,
self.wpas_dbus_interface)
# Try to see if supplicant knows about interface
# If not, throw an exception
try:
self.path = self.wpas.GetInterface(
self.interface_name)
except dbus.DBusException, exc:
error = 'Error:\n Interface ' + self.interface_name \
+ ' was not found'
print error
usage()
os._exit(0)
self.interface_object = self.bus.get_object(
self.wpas_dbus_interface, self.path)
self.p2p_interface = dbus.Interface(self.interface_object,
self.wpas_dbus_interfaces_p2pdevice)
#Adds listeners
self.bus.add_signal_receiver(GroupStarted,
dbus_interface=self.wpas_dbus_interfaces_p2pdevice,
signal_name="GroupStarted")
self.bus.add_signal_receiver(WpsFailure,
dbus_interface=self.wpas_dbus_interfaces_p2pdevice,
signal_name="WpsFailed")
# Sets up p2p_group_add dictionary
def constructArguements(self):
self.P2PDictionary = {'persistent':self.persistent}
if (self.frequency != None):
if (int(self.frequency) > 0):
self.P2PDictionary.update({'frequency':int(self.frequency)})
else:
print "Error:\n Frequency must be greater than 0"
usage()
os._exit(0)
if (self.persistent_group_object != None):
self.P2PDictionary.update({'persistent_group_object':
self.persistent_group_object})
# Run p2p_group_remove
def run(self):
try:
self.p2p_interface.GroupAdd(self.P2PDictionary)
except:
print "Error:\n Could not preform group add"
usage()
os._exit(0)
# Allows other threads to keep working while MainLoop runs
# Required for timeout implementation
gobject.MainLoop().get_context().iteration(True)
gobject.threads_init()
gobject.MainLoop().run()
if __name__ == "__main__":
# Defaults for optional inputs
# 0 = false, 1 = true
persistent = False
frequency = None
persistent_group_object = None
wpas_dbus_interface = 'fi.w1.wpa_supplicant1'
# interface_name is required
interface_name = None
# Using getopts to handle options
try:
options, args = getopt.getopt(sys.argv[1:],"hi:p:f:o:w:")
except getopt.GetoptError:
usage()
quit()
# If theres a switch, override default option
for key, value in options:
# Help
if (key == "-h"):
usage()
quit()
# Interface Name
elif (key == "-i"):
interface_name = value
# Timeout
elif (key == "-p"):
if (value == '0'):
persistent = False
elif (value == '1'):
persistent = True
else:
print "Error:\n Persistent can only be 1 or 0"
usage()
os._exit(0)
# Frequency
elif (key == "-f"):
frequency = value
# Persistent group object path
elif (key == "-o"):
persistent_group_object = value
# Dbus interface
elif (key == "-w"):
wpas_dbus_interface = value
else:
assert False, "unhandled option"
# Interface name is required and was not given
if (interface_name == None):
print "Error:\n interface_name is required"
usage()
quit()
try:
p2p_group_add_test = P2P_Group_Add(interface_name,wpas_dbus_interface,
persistent,frequency,persistent_group_object)
except:
print "Error:\n Invalid Arguements"
p2p_group_add_test.constructArguements()
p2p_group_add_test.start()
time.sleep(5)
print "Error:\n Group formation timed out"
os._exit(0)
|
bsd-3-clause
|
puckipedia/youtube-dl
|
youtube_dl/extractor/elpais.py
|
128
|
2110
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import unified_strdate
class ElPaisIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^.]+\.)?elpais\.com/.*/(?P<id>[^/#?]+)\.html(?:$|[?#])'
IE_DESC = 'El País'
_TEST = {
'url': 'http://blogs.elpais.com/la-voz-de-inaki/2014/02/tiempo-nuevo-recetas-viejas.html',
'md5': '98406f301f19562170ec071b83433d55',
'info_dict': {
'id': 'tiempo-nuevo-recetas-viejas',
'ext': 'mp4',
'title': 'Tiempo nuevo, recetas viejas',
'description': 'De lunes a viernes, a partir de las ocho de la mañana, Iñaki Gabilondo nos cuenta su visión de la actualidad nacional e internacional.',
'upload_date': '20140206',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
prefix = self._html_search_regex(
r'var url_cache = "([^"]+)";', webpage, 'URL prefix')
video_suffix = self._search_regex(
r"URLMediaFile = url_cache \+ '([^']+)'", webpage, 'video URL')
video_url = prefix + video_suffix
thumbnail_suffix = self._search_regex(
r"URLMediaStill = url_cache \+ '([^']+)'", webpage, 'thumbnail URL',
fatal=False)
thumbnail = (
None if thumbnail_suffix is None
else prefix + thumbnail_suffix)
title = self._html_search_regex(
'<h2 class="entry-header entry-title.*?>(.*?)</h2>',
webpage, 'title')
date_str = self._search_regex(
r'<p class="date-header date-int updated"\s+title="([^"]+)">',
webpage, 'upload date', fatal=False)
upload_date = (None if date_str is None else unified_strdate(date_str))
return {
'id': video_id,
'url': video_url,
'title': title,
'description': self._og_search_description(webpage),
'thumbnail': thumbnail,
'upload_date': upload_date,
}
|
unlicense
|
icebluechao/stuttgart_kernel
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
11670
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
gpl-2.0
|
benthomasson/ansible
|
lib/ansible/plugins/connection/netconf.py
|
22
|
6114
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import logging
import json
from ansible import constants as C
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.plugins import netconf_loader
from ansible.plugins.connection import ConnectionBase, ensure_connect
from ansible.utils.jsonrpc import Rpc
try:
from ncclient import manager
from ncclient.operations import RPCError
from ncclient.transport.errors import SSHUnknownHostError
from ncclient.xml_ import to_ele, to_xml
except ImportError:
raise AnsibleError("ncclient is not installed")
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
logging.getLogger('ncclient').setLevel(logging.INFO)
class Connection(Rpc, ConnectionBase):
"""NetConf connections"""
transport = 'netconf'
has_pipelining = False
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self._network_os = self._play_context.network_os or 'default'
display.display('network_os is set to %s' % self._network_os, log_only=True)
self._manager = None
self._connected = False
def _connect(self):
super(Connection, self)._connect()
display.display('ssh connection done, stating ncclient', log_only=True)
self.allow_agent = True
if self._play_context.password is not None:
self.allow_agent = False
self.key_filename = None
if self._play_context.private_key_file:
self.key_filename = os.path.expanduser(self._play_context.private_key_file)
network_os = self._play_context.network_os
if not network_os:
for cls in netconf_loader.all(class_only=True):
network_os = cls.guess_network_os(self)
if network_os:
display.display('discovered network_os %s' % network_os, log_only=True)
if not network_os:
raise AnsibleConnectionFailure('Unable to automatically determine host network os. Please ansible_network_os value')
try:
self._manager = manager.connect(
host=self._play_context.remote_addr,
port=self._play_context.port or 830,
username=self._play_context.remote_user,
password=self._play_context.password,
key_filename=str(self.key_filename),
hostkey_verify=C.HOST_KEY_CHECKING,
look_for_keys=C.PARAMIKO_LOOK_FOR_KEYS,
allow_agent=self.allow_agent,
timeout=self._play_context.timeout,
device_params={'name': network_os}
)
except SSHUnknownHostError as exc:
raise AnsibleConnectionFailure(str(exc))
if not self._manager.connected:
return 1, b'', b'not connected'
display.display('ncclient manager object created successfully', log_only=True)
self._connected = True
self._netconf = netconf_loader.get(network_os, self)
if self._netconf:
self._rpc.add(self._netconf)
display.display('loaded netconf plugin for network_os %s' % network_os, log_only=True)
else:
display.display('unable to load netconf for network_os %s' % network_os)
return 0, to_bytes(self._manager.session_id, errors='surrogate_or_strict'), b''
def close(self):
if self._manager:
self._manager.close_session()
self._connected = False
super(Connection, self).close()
@ensure_connect
def exec_command(self, request):
"""Sends the request to the node and returns the reply
The method accepts two forms of request. The first form is as a byte
string that represents xml string be send over netconf session.
The second form is a json-rpc (2.0) byte string.
"""
try:
obj = json.loads(to_text(request, errors='surrogate_or_strict'))
if 'jsonrpc' in obj:
if self._netconf:
out = self._exec_rpc(obj)
else:
out = self.internal_error("netconf plugin is not supported for network_os %s" % self._play_context.network_os)
return 0, to_bytes(out, errors='surrogate_or_strict'), b''
else:
err = self.invalid_request(obj)
return 1, b'', to_bytes(err, errors='surrogate_or_strict')
except (ValueError, TypeError):
# to_ele operates on native strings
request = to_native(request, errors='surrogate_or_strict')
req = to_ele(request)
if req is None:
return 1, b'', b'unable to parse request'
try:
reply = self._manager.rpc(req)
except RPCError as exc:
return 1, b'', to_bytes(to_xml(exc.xml), errors='surrogate_or_strict')
return 0, to_bytes(reply.data_xml, errors='surrogate_or_strict'), b''
def put_file(self, in_path, out_path):
"""Transfer a file from local to remote"""
pass
def fetch_file(self, in_path, out_path):
"""Fetch a file from remote to local"""
pass
|
gpl-3.0
|
Livefyre/django-cms
|
cms/test_utils/project/placeholderapp/south_migrations/0001_initial.py
|
46
|
10010
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Example1'
db.create_table(u'placeholderapp_example1', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('char_1', self.gf('django.db.models.fields.CharField')(max_length=255)),
('char_2', self.gf('django.db.models.fields.CharField')(max_length=255)),
('char_3', self.gf('django.db.models.fields.CharField')(max_length=255)),
('char_4', self.gf('django.db.models.fields.CharField')(max_length=255)),
('date_field', self.gf('django.db.models.fields.DateField')(null=True)),
('placeholder', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Placeholder'], null=True)),
))
db.send_create_signal(u'placeholderapp', ['Example1'])
# Adding model 'TwoPlaceholderExample'
db.create_table(u'placeholderapp_twoplaceholderexample', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('char_1', self.gf('django.db.models.fields.CharField')(max_length=255)),
('char_2', self.gf('django.db.models.fields.CharField')(max_length=255)),
('char_3', self.gf('django.db.models.fields.CharField')(max_length=255)),
('char_4', self.gf('django.db.models.fields.CharField')(max_length=255)),
('placeholder_1', self.gf('django.db.models.fields.related.ForeignKey')(related_name='p1', null=True, to=orm['cms.Placeholder'])),
('placeholder_2', self.gf('django.db.models.fields.related.ForeignKey')(related_name='p2', null=True, to=orm['cms.Placeholder'])),
))
db.send_create_signal(u'placeholderapp', ['TwoPlaceholderExample'])
# Adding model 'DynamicPlaceholderSlotExample'
db.create_table(u'placeholderapp_dynamicplaceholderslotexample', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('char_1', self.gf('django.db.models.fields.CharField')(max_length=255)),
('char_2', self.gf('django.db.models.fields.CharField')(max_length=255)),
('placeholder_1', self.gf('django.db.models.fields.related.ForeignKey')(related_name='dynamic_pl_1', null=True, to=orm['cms.Placeholder'])),
('placeholder_2', self.gf('django.db.models.fields.related.ForeignKey')(related_name='dynamic_pl_2', null=True, to=orm['cms.Placeholder'])),
))
db.send_create_signal(u'placeholderapp', ['DynamicPlaceholderSlotExample'])
# Adding model 'CharPksExample'
db.create_table(u'placeholderapp_charpksexample', (
('char_1', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=255, primary_key=True)),
('placeholder_1', self.gf('django.db.models.fields.related.ForeignKey')(related_name='charpk_p1', null=True, to=orm['cms.Placeholder'])),
))
db.send_create_signal(u'placeholderapp', ['CharPksExample'])
# Adding model 'MultilingualExample1Translation'
db.create_table(u'placeholderapp_multilingualexample1_translation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('char_1', self.gf('django.db.models.fields.CharField')(max_length=255)),
('char_2', self.gf('django.db.models.fields.CharField')(max_length=255)),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('master', self.gf('django.db.models.fields.related.ForeignKey')(related_name='translations', null=True, to=orm['placeholderapp.MultilingualExample1'])),
))
db.send_create_signal(u'placeholderapp', ['MultilingualExample1Translation'])
# Adding unique constraint on 'MultilingualExample1Translation', fields ['language_code', 'master']
db.create_unique(u'placeholderapp_multilingualexample1_translation', ['language_code', 'master_id'])
# Adding model 'MultilingualExample1'
db.create_table(u'placeholderapp_multilingualexample1', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('placeholder_1', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Placeholder'], null=True)),
))
db.send_create_signal(u'placeholderapp', ['MultilingualExample1'])
def backwards(self, orm):
# Removing unique constraint on 'MultilingualExample1Translation', fields ['language_code', 'master']
db.delete_unique(u'placeholderapp_multilingualexample1_translation', ['language_code', 'master_id'])
# Deleting model 'Example1'
db.delete_table(u'placeholderapp_example1')
# Deleting model 'TwoPlaceholderExample'
db.delete_table(u'placeholderapp_twoplaceholderexample')
# Deleting model 'DynamicPlaceholderSlotExample'
db.delete_table(u'placeholderapp_dynamicplaceholderslotexample')
# Deleting model 'CharPksExample'
db.delete_table(u'placeholderapp_charpksexample')
# Deleting model 'MultilingualExample1Translation'
db.delete_table(u'placeholderapp_multilingualexample1_translation')
# Deleting model 'MultilingualExample1'
db.delete_table(u'placeholderapp_multilingualexample1')
models = {
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'placeholderapp.charpksexample': {
'Meta': {'object_name': 'CharPksExample'},
'char_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'charpk_p1'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'primary_key': 'True'})
},
u'placeholderapp.dynamicplaceholderslotexample': {
'Meta': {'object_name': 'DynamicPlaceholderSlotExample'},
'char_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'char_2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dynamic_pl_1'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'placeholder_2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dynamic_pl_2'", 'null': 'True', 'to': "orm['cms.Placeholder']"})
},
u'placeholderapp.example1': {
'Meta': {'object_name': 'Example1'},
'char_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'char_2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'char_3': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'char_4': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'date_field': ('django.db.models.fields.DateField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
u'placeholderapp.multilingualexample1': {
'Meta': {'object_name': 'MultilingualExample1'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
u'placeholderapp.multilingualexample1translation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'MultilingualExample1Translation', 'db_table': "u'placeholderapp_multilingualexample1_translation'"},
'char_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'char_2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['placeholderapp.MultilingualExample1']"})
},
u'placeholderapp.twoplaceholderexample': {
'Meta': {'object_name': 'TwoPlaceholderExample'},
'char_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'char_2': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'char_3': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'char_4': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'placeholder_1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'p1'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'placeholder_2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'p2'", 'null': 'True', 'to': "orm['cms.Placeholder']"})
}
}
complete_apps = ['placeholderapp']
|
bsd-3-clause
|
ehealthafrica-ci/kivy
|
kivy/input/postproc/ignorelist.py
|
59
|
1365
|
'''
Ignore list
===========
Ignore touch on some areas of the screen
'''
__all__ = ('InputPostprocIgnoreList', )
from kivy.config import Config
from kivy.utils import strtotuple
class InputPostprocIgnoreList(object):
'''
InputPostprocIgnoreList is a post-processor which removes touches in the
Ignore list. The Ignore list can be configured in the Kivy config file::
[postproc]
# Format: [(xmin, ymin, xmax, ymax), ...]
ignore = [(0.1, 0.1, 0.15, 0.15)]
The Ignore list coordinates are in the range 0-1, not in screen pixels.
'''
def __init__(self):
self.ignore_list = strtotuple(Config.get('postproc', 'ignore'))
def collide_ignore(self, touch):
x, y = touch.sx, touch.sy
for l in self.ignore_list:
xmin, ymin, xmax, ymax = l
if x > xmin and x < xmax and y > ymin and y < ymax:
return True
def process(self, events):
if not len(self.ignore_list):
return events
for etype, touch in events:
if not touch.is_touch:
continue
if etype != 'begin':
continue
if self.collide_ignore(touch):
touch.ud.__pp_ignore__ = True
return [(etype, touch) for etype, touch in events
if not '__pp_ignore__' in touch.ud]
|
mit
|
edmstudio/ansible
|
lib/ansible/galaxy/api.py
|
10
|
6504
|
#!/usr/bin/env python
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
import json
from urllib2 import quote as urlquote, HTTPError
from urlparse import urlparse
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils.urls import open_url
class GalaxyAPI(object):
''' This class is meant to be used as a API client for an Ansible Galaxy server '''
SUPPORTED_VERSIONS = ['v1']
def __init__(self, galaxy, api_server):
self.galaxy = galaxy
try:
urlparse(api_server, scheme='https')
except:
raise AnsibleError("Invalid server API url passed: %s" % api_server)
server_version = self.get_server_api_version('%s/api/' % (api_server))
if not server_version:
raise AnsibleError("Could not retrieve server API version: %s" % api_server)
if server_version in self.SUPPORTED_VERSIONS:
self.baseurl = '%s/api/%s' % (api_server, server_version)
self.version = server_version # for future use
self.galaxy.display.vvvvv("Base API: %s" % self.baseurl)
else:
raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
def get_server_api_version(self, api_server):
"""
Fetches the Galaxy API current version to ensure
the API server is up and reachable.
"""
#TODO: fix galaxy server which returns current_version path (/api/v1) vs actual version (v1)
# also should set baseurl using supported_versions which has path
return 'v1'
try:
data = json.load(open_url(api_server, validate_certs=self.galaxy.options.validate_certs))
return data.get("current_version", 'v1')
except Exception as e:
# TODO: report error
return None
def lookup_role_by_name(self, role_name, notify=True):
"""
Find a role by name
"""
role_name = urlquote(role_name)
try:
parts = role_name.split(".")
user_name = ".".join(parts[0:-1])
role_name = parts[-1]
if notify:
self.galaxy.display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
except:
raise AnsibleError("- invalid role name (%s). Specify role as format: username.rolename" % role_name)
url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name)
self.galaxy.display.vvvv("- %s" % (url))
try:
data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs))
if len(data["results"]) != 0:
return data["results"][0]
except:
# TODO: report on connection/availability errors
pass
return None
def fetch_role_related(self, related, role_id):
"""
Fetch the list of related items for the given role.
The url comes from the 'related' field of the role.
"""
try:
url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related)
data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs))
results = data['results']
done = (data.get('next', None) == None)
while not done:
url = '%s%s' % (self.baseurl, data['next'])
self.galaxy.display.display(url)
data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs))
results += data['results']
done = (data.get('next', None) == None)
return results
except:
return None
def get_list(self, what):
"""
Fetch the list of items specified.
"""
try:
url = '%s/%s/?page_size' % (self.baseurl, what)
data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs))
if "results" in data:
results = data['results']
else:
results = data
done = True
if "next" in data:
done = (data.get('next', None) == None)
while not done:
url = '%s%s' % (self.baseurl, data['next'])
self.galaxy.display.display(url)
data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs))
results += data['results']
done = (data.get('next', None) == None)
return results
except Exception as error:
raise AnsibleError("Failed to download the %s list: %s" % (what, str(error)))
def search_roles(self, search, platforms=None, tags=None):
search_url = self.baseurl + '/roles/?page=1'
if search:
search_url += '&search=' + urlquote(search)
if tags is None:
tags = []
elif isinstance(tags, basestring):
tags = tags.split(',')
for tag in tags:
search_url += '&chain__tags__name=' + urlquote(tag)
if platforms is None:
platforms = []
elif isinstance(platforms, basestring):
platforms = platforms.split(',')
for plat in platforms:
search_url += '&chain__platforms__name=' + urlquote(plat)
self.galaxy.display.debug("Executing query: %s" % search_url)
try:
data = json.load(open_url(search_url, validate_certs=self.galaxy.options.validate_certs))
except HTTPError as e:
raise AnsibleError("Unsuccessful request to server: %s" % str(e))
return data
|
gpl-3.0
|
costypetrisor/scikit-learn
|
examples/hetero_feature_union.py
|
288
|
6236
|
"""
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <matt.terry@gmail.com>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
|
bsd-3-clause
|
mytoh/shadowsocks-android
|
gfwlist/gen.py
|
304
|
1535
|
#!/usr/bin/python
# -*- encoding: utf8 -*-
import itertools
import math
import sys
import IPy
def main():
china_list_set = IPy.IPSet()
for line in sys.stdin:
line_params = line.split("|")
if len(line_params) < 5 or line_params[2] != "ipv4" or line_params[1] != "CN":
continue
ip_addr = line_params[3]
ip_length = float(line_params[4])
ip_mask = 32 - int(math.ceil(math.log(ip_length, 2)))
china_list_set.add(IPy.IP("%s/%d" % (ip_addr, ip_mask)))
# 添加内网地址
internal_list = IPy.IPSet(map(IPy.IP, [
"0.0.0.0/8",
"10.0.0.0/8",
"100.64.0.0/10",
"112.124.47.0/24",
"114.114.114.0/24",
"127.0.0.0/8",
"169.254.0.0/16",
"172.16.0.0/12",
"192.0.0.0/29",
"192.0.2.0/24",
"192.88.99.0/24",
"192.168.0.0/16",
"198.18.0.0/15",
"198.51.100.0/24",
"203.0.113.0/24",
"224.0.0.0/4",
"240.0.0.0/4",
]))
china_list_set += internal_list
all = china_list_set
# 取反
# all = IPy.IPSet([IPy.IP("0.0.0.0/0")])
# 剔除所有孤立的C段
# for ip in china_list_set:
# all.discard(ip)
# filter = itertools.ifilter(lambda x: len(x) <= 65536, all)
# for ip in filter:
# all.discard(ip)
# all.add(IPy.IP(ip.strNormal(0)).make_net('255.255.0.0'))
# 输出结果
for ip in all:
print '<item>' + str(ip) + '</item>'
if __name__ == "__main__":
main()
|
gpl-3.0
|
open-synergy/event
|
event_registration_partner_unique/tests/test_event.py
|
1
|
1524
|
# -*- coding: utf-8 -*-
# © 2016 Antiun Ingeniería S.L. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from .. import exceptions
from openerp.tests.common import TransactionCase
class DuplicatedPartnerCase(TransactionCase):
def setUp(self):
super(DuplicatedPartnerCase, self).setUp()
self.event = self.env.ref("event.event_0")
self.partner = self.env.ref("base.res_partner_1")
self.registration = self.env["event.registration"].create({
"event_id": self.event.id,
"partner_id": self.partner.id,
})
def test_allowed(self):
"""No problem when it is not forbidden."""
self.registration.copy()
def test_forbidden(self):
"""Cannot when it is forbidden."""
self.event.forbid_duplicates = True
with self.assertRaises(exceptions.DuplicatedPartnerError):
self.registration.copy()
def test_saved_in_exception(self):
"""The failing partners are saved in the exception."""
self.event.forbid_duplicates = True
try:
self.registration.copy()
except exceptions.DuplicatedPartnerError as error:
self.assertEqual(error._kwargs["registrations"], self.registration)
def test_duplicates_already_exist(self):
"""Cannot forbid what already happened."""
self.registration.copy()
with self.assertRaises(exceptions.DuplicatedPartnerError):
self.event.forbid_duplicates = True
|
agpl-3.0
|
Tsjerk/MartiniTools
|
gmx/tpr/topology.py
|
1
|
6714
|
import versions
from ftypes import FFParam, Ilist
from tprio import *
class Block(ListWithNames):
def __init__(self,tpr):
self.extend([
("dummy", Tuple(tpr, 256, Integer) if tpr.version < 44 else None),
("nr", Integer(tpr)),
("nra", Integer(tpr) if tpr.version < 51 else None),
])
self.extend([
("index", Tuple(tpr, self.nr+1, Integer)),
("A", Tuple(tpr, self.nra, Integer) if tpr.version < 51 else None )
])
class BlockA(ListWithNames):
def __init__(self,tpr):
self.extend([
("dummy", Tuple(tpr, 256, Integer) if tpr.version < 44 else None),
("nr", Integer(tpr)),
("nra", Integer(tpr)),
])
self.extend([
("index", Tuple(tpr, self.nr+1, Integer)),
("A", Tuple(tpr, self.nra, Integer))
])
class Atom(ListWithNames):
def __init__(self,tpr):
self.position = tpr.tell()
ngrps = versions.ngrps(tpr)
self.extend([
("m", Real(tpr)),
("q", Real(tpr)),
("mB", Real(tpr)),
("qB", Real(tpr)),
("type", Unsigned(tpr)),
("typeB", Unsigned(tpr)),
("ptype", Integer(tpr)),
("resind", Integer(tpr)),
("atomnumber", Integer(tpr) if tpr.version >= 52 else 0),
("groups", Tuple(tpr, ngrps, Unsigned) if tpr.version < 57 else None)
])
class Moltype(ListWithNames):
def __init__(self,tpr,symtab=None):
self.position = tpr.tell()
ngrps = versions.ngrps(tpr)
self.extend([
("nameidx", Integer(tpr) if tpr.version >= 57 else None), # Symtab index for Moleculetype Name
("natoms", Integer(tpr)), # Number of atoms in Moleculetype
("nres", Integer(tpr)), # Number of residues in Moleculetype
("ngroupname", Integer(tpr) if tpr.version < 57 else None), # Number of group names in Moleculetype.
# Kept in struct starting with version 57.
])
self.extend([
("atoms", Tuple(tpr, self.natoms, Atom)),
("atomnameidx", Tuple(tpr, self.natoms, Integer)),
("atomtypeidx", Tuple(tpr, self.natoms, Integer) if tpr.version > 20 else []),
("atomtypeBidx", Tuple(tpr, self.natoms, Integer) if tpr.version > 20 else []),
("residues", Tuple(tpr, self.nres, (Integer,Integer,Char) if tpr.version >= 63 else (Integer,))),
("groupnameidx", Tuple(tpr, self.ngroupname, Integer) if tpr.version < 57 else []),
("groups", Tuple(tpr, ngrps, Group) if tpr.version < 57 else []),
("ilists", Ilist(tpr) if tpr.version >= 57 else None),
("cgs", Block(tpr) if tpr.version >= 57 else None),
("excls", BlockA(tpr)),
])
if symtab:
self.setnames(symtab)
def setnames(self,symtab):
self.extend([
("name", symtab[self.nameidx] if self.nameidx != None else None),
("atomnames", [ symtab[i] for i in self.atomnameidx ]),
("atomtypes", [ symtab[i] for i in self.atomtypeidx ]),
("atomtypesB", [ symtab[i] for i in self.atomtypeBidx ]),
("groupnames", [ symtab[i] for i in self.groupnameidx ]),
("residuenames", [ symtab[i[0]] for i in self.residues ]),
])
class Molblock(ListWithNames):
def __init__(self,tpr):
self.position = tpr.tell()
self.extend([
("type", Integer(tpr)),
("nmol", Integer(tpr)),
("natoms_mol", Integer(tpr)),
("posresA", Group(tpr, (Real,Real,Real))),
("posresB", Group(tpr, (Real,Real,Real))),
])
class AtomTypes(ListWithNames):
def __init__(self,tpr):
if tpr.version <= 25:
return
self.append(("atomTypeN", Integer(tpr)))
self.extend([
("radius", Tuple(tpr, self.atomTypeN, Real)),
("volume", Tuple(tpr, self.atomTypeN, Real)),
("surftens", Tuple(tpr, self.atomTypeN, Real)),
("number", Tuple(tpr, self.atomTypeN, Integer) if tpr.version >= 40 else None),
("gbRadius", Tuple(tpr, self.atomTypeN, Real) if tpr.version >= 60 else None),
("S_hct", Tuple(tpr, self.atomTypeN, Real) if tpr.version >= 60 else None),
])
class Topology(ListWithNames):
def __init__(self,tpr):
ngrps = versions.ngrps(tpr)
self.extend([
("symtab", Strings(tpr)),
("symstridx", Integer(tpr)),
])
self.extend([
("symstr", self.symtab[self.symstridx]),
("ffparam", FFParam(tpr,self.symtab) if tpr.version >= 57 else None),
("moltypes", Group(tpr, Moltype) if tpr.version >= 57 else None),
("molblocks", Group(tpr, Molblock) if tpr.version >= 57 else None),
("topnatoms", Integer(tpr) if tpr.version >= 57 else None),
("atomtypes", AtomTypes(tpr) if tpr.version > 25 else None),
# For earlier versions (<57), there should be one moltype, and
# this set of ffparam/ilists should be set as attribute to that
("ffparam", FFParam(tpr,self.symtab) if tpr.version < 57 else None),
("ilists", Ilist(tpr) if tpr.version < 57 else None),
("cmapN", Integer(tpr) if tpr.version >= 65 else None),
("cmapGridSize", Integer(tpr) if tpr.version >= 65 else None),
])
self.extend([
("cmap", [ Tuple(tpr, self.cmapGridSize**2, (Real, Real, Real, Real)) for i in range(self.cmapN) ] if tpr.version >= 65 else None ),
("groupids", [ Group(tpr, Integer) for i in range(ngrps) ] if tpr.version >= 57 else None ),
("groupnameidx", Group(tpr, Integer) if tpr.version >= 57 else None),
("groups", [ Group(tpr, Unsigned) for i in range(ngrps) ] if tpr.version >= 57 else None ),
("cgs", Block(tpr) if tpr.version < 57 else None),
("mol", Block(tpr) if tpr.version < 57 else None),
("shake", BlockA(tpr) if tpr.version < 51 else None),
])
|
gpl-2.0
|
40223119/2015w13
|
static/Brython3.1.3-20150514-095342/Lib/site-packages/pygame/SDL.py
|
603
|
1813
|
from browser import document
SDL_INIT_VIDEO=0
SDL_GL_DOUBLEBUFFER=1
SDL_GL_DEPTH_SIZE=2
SDL_DOUBLEBUF=3
SDL_ANYFORMAT=4
SDL_ACTIVEEVENT=5
SDL_ALLEVENTS=5
SDL_KEYDOWN=6
SDL_KEYUP=7
SDL_MOUSEMOTION=8
SDL_MOUSEBUTTONDOWN=9
SDL_MOUSEBUTTONUP=10
SDL_JOYAXISMOTION=11
SDL_JOYBALLMOTION=12
SDL_JOYHATMOTION=13
SDL_JOYBUTTONUP=14
SDL_JOYBUTTONDOWN=15
SDL_QUIT=16
SDL_SYSWMEVENT=17
SDL_VIDEORESIZE=18
SDL_VIDEOEXPOSE=19
SDL_NOEVENT=20
SDL_GETEVENT=21
SDL_OPENGL=False
def SDL_WasInit(var):
return True
_attrs={}
_wm={}
def SDL_PeepEvents(num, event, mask):
pass
def SDL_GL_SetAttribute(variable, value):
_attrs[variable]=value
def SDL_GL_GetAttribute(variable):
return _attrs.getvalue(variable, None)
def SDL_GL_SetVideoMode(width, height, depth, flags):
pass
def SDL_WM_SetCaption(title, icontitle):
_wm['title']=title
_wm['icontitle']=icontitle
def SDL_PumpEvents():
pass
def SDL_SetVideoMode(width, height, depth, flags):
pass
def SDL_SetColorKey(surface, key, value):
pass
def SDL_WM_GetCaption():
return _wm.get('title', ''), _wm.get('icontitle', '')
def SDL_UpdateRect(screen, x1, y1, x2, y2):
screen.canvas.style.width=screen.canvas.style.width
def SDL_UpdateRects(screen, rects):
for _rect in rects:
SDL_UpdateRect(screen, _rect)
def SDL_GetVideoSurface():
return _Screen
def SDL_GetVideoInfo():
return
def SDL_VideoModeOK(width, height, depth, flags):
pass
def SDL_SetPalette(surface, sdl_var, colors, flag):
pass
class Screen:
def __init__(self):
self.flags=0
@property
def canvas(self):
return document.get(selector='canvas')[0]
_Screen=Screen()
class SDL_Rect:
def __init__(self, x, y, w, h):
self.x=x
self.y=y
self.w=w
self.h=h
def SDL_Flip(screen):
pass
|
gpl-3.0
|
binarydud/django-oscar
|
src/oscar/apps/dashboard/pages/app.py
|
49
|
1113
|
from django.conf.urls import url
from oscar.core.application import Application
from oscar.core.loading import get_class
class FlatPageManagementApplication(Application):
name = None
default_permissions = ['is_staff', ]
list_view = get_class('dashboard.pages.views', 'PageListView')
create_view = get_class('dashboard.pages.views', 'PageCreateView')
update_view = get_class('dashboard.pages.views', 'PageUpdateView')
delete_view = get_class('dashboard.pages.views', 'PageDeleteView')
def get_urls(self):
"""
Get URL patterns defined for flatpage management application.
"""
urls = [
url(r'^$', self.list_view.as_view(), name='page-list'),
url(r'^create/$', self.create_view.as_view(), name='page-create'),
url(r'^update/(?P<pk>[-\w]+)/$',
self.update_view.as_view(), name='page-update'),
url(r'^delete/(?P<pk>\d+)/$',
self.delete_view.as_view(), name='page-delete')
]
return self.post_process_urls(urls)
application = FlatPageManagementApplication()
|
bsd-3-clause
|
3dfxsoftware/cbss-addons
|
survey/wizard/survey_send_invitation.py
|
19
|
10658
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import time
from random import choice
import string
import os
import datetime
import socket
from openerp import addons, netsvc, tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class survey_send_invitation(osv.osv_memory):
_name = 'survey.send.invitation'
_columns = {
'partner_ids': fields.many2many('res.partner','survey_res_partner','partner_id',\
'survey_id', "Answer", required=1),
'send_mail': fields.boolean('Send Mail for New User'),
'send_mail_existing': fields.boolean('Send Reminder for Existing User'),
'mail_subject': fields.char('Subject', size=256),
'mail_subject_existing': fields.char('Subject', size=256),
'mail_from': fields.char('From', size=256, required=1),
'mail': fields.text('Body')
}
_defaults = {
'send_mail': lambda *a: 1,
'send_mail_existing': lambda *a: 1,
}
def genpasswd(self):
chars = string.letters + string.digits
return ''.join([choice(chars) for i in range(6)])
def default_get(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
data = super(survey_send_invitation, self).default_get(cr, uid, fields_list, context)
survey_obj = self.pool.get('survey')
msg = ""
name = ""
for sur in survey_obj.browse(cr, uid, context.get('active_ids', []), context=context):
name += "\n --> " + sur.title + "\n"
if sur.state != 'open':
msg += sur.title + "\n"
data['mail_subject'] = _("Invitation for %s") % (sur.title)
data['mail_subject_existing'] = _("Invitation for %s") % (sur.title)
data['mail_from'] = sur.responsible_id.email
if msg:
raise osv.except_osv(_('Warning!'), _('The following surveys are not in open state: %s') % msg)
data['mail'] = _('''
Hello %%(name)s, \n\n
Would you please spent some of your time to fill-in our survey: \n%s\n
You can access this survey with the following parameters:
URL: %s
Your login ID: %%(login)s\n
Your password: %%(passwd)s\n
\n\n
Thanks,''') % (name, self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url', default='http://localhost:8069', context=context))
return data
def create_report(self, cr, uid, res_ids, report_name=False, file_name=False):
if not report_name or not res_ids:
return (False, Exception('Report name and Resources ids are required !!!'))
try:
ret_file_name = addons.get_module_resource('survey', 'report') + file_name + '.pdf'
service = netsvc.LocalService(report_name);
(result, format) = service.create(cr, uid, res_ids, {}, {})
fp = open(ret_file_name, 'wb+');
fp.write(result);
fp.close();
except Exception,e:
print 'Exception in create report:',e
return (False, str(e))
return (True, ret_file_name)
def action_send(self, cr, uid, ids, context=None):
if context is None:
context = {}
record = self.read(cr, uid, ids, [],context=context)
survey_ids = context.get('active_ids', [])
record = record and record[0]
partner_ids = record['partner_ids']
user_ref= self.pool.get('res.users')
survey_ref= self.pool.get('survey')
mail_message = self.pool.get('mail.message')
model_data_obj = self.pool.get('ir.model.data')
group_id = model_data_obj._get_id(cr, uid, 'base', 'group_survey_user')
group_id = model_data_obj.browse(cr, uid, group_id, context=context).res_id
act_id = self.pool.get('ir.actions.act_window')
act_id = act_id.search(cr, uid, [('res_model', '=' , 'survey.name.wiz'), \
('view_type', '=', 'form')])
out = "login,password\n"
skipped = 0
existing = ""
created = ""
error = ""
new_user = []
attachments = {}
current_sur = survey_ref.browse(cr, uid, context.get('active_id'), context=context)
exist_user = current_sur.invited_user_ids
if exist_user:
for use in exist_user:
new_user.append(use.id)
for id in survey_ref.browse(cr, uid, survey_ids):
report = self.create_report(cr, uid, [id.id], 'report.survey.form', id.title)
file = open(addons.get_module_resource('survey', 'report') + id.title +".pdf")
file_data = ""
while 1:
line = file.readline()
file_data += line
if not line:
break
file.close()
attachments[id.title +".pdf"] = file_data
os.remove(addons.get_module_resource('survey', 'report') + id.title +".pdf")
for partner in self.pool.get('res.partner').browse(cr, uid, partner_ids):
if not partner.email:
skipped+= 1
continue
user = user_ref.search(cr, uid, [('login', "=", partner.email)])
if user:
if user[0] not in new_user:
new_user.append(user[0])
user = user_ref.browse(cr, uid, user[0])
user_ref.write(cr, uid, user.id, {'survey_id':[[6, 0, survey_ids]]})
mail = record['mail']%{'login':partner.email, 'passwd':user.password, \
'name' : partner.name}
if record['send_mail_existing']:
vals = {
'state': 'outgoing',
'subject': record['mail_subject_existing'],
'body_html': '<pre>%s</pre>' % mail,
'email_to': partner.email,
'email_from': record['mail_from'],
}
self.pool.get('mail.mail').create(cr, uid, vals, context=context)
existing+= "- %s (Login: %s, Password: %s)\n" % (user.name, partner.email, \
user.password)
continue
passwd= self.genpasswd()
out+= partner.email + ',' + passwd + '\n'
mail= record['mail'] % {'login' : partner.email, 'passwd' : passwd, 'name' : partner.name}
if record['send_mail']:
vals = {
'state': 'outgoing',
'subject': record['mail_subject'],
'body_html': '<pre>%s</pre>' % mail,
'email_to': partner.email,
'email_from': record['mail_from'],
}
if attachments:
vals['attachment_ids'] = [(0,0,{'name': a_name,
'datas_fname': a_name,
'datas': str(a_content).encode('base64')})
for a_name, a_content in attachments.items()]
ans = self.pool.get('mail.mail').create(cr, uid, vals, context=context)
if ans:
res_data = {'name': partner.name or _('Unknown'),
'login': partner.email,
'password': passwd,
'address_id': partner.id,
'groups_id': [[6, 0, [group_id]]],
'action_id': act_id[0],
'survey_id': [[6, 0, survey_ids]]
}
user = user_ref.create(cr, uid, res_data)
if user not in new_user:
new_user.append(user)
created+= "- %s (Login: %s, Password: %s)\n" % (partner.name or _('Unknown'),\
partner.email, passwd)
else:
error+= "- %s (Login: %s, Password: %s)\n" % (partner.name or _('Unknown'),\
partner.email, passwd)
new_vals = {}
new_vals.update({'invited_user_ids':[[6,0,new_user]]})
survey_ref.write(cr, uid, context.get('active_id'),new_vals)
note= ""
if created:
note += 'Created users:\n%s\n\n' % (created)
if existing:
note +='Already existing users:\n%s\n\n' % (existing)
if skipped:
note += "%d contacts where ignored (an email address is missing).\n\n" % (skipped)
if error:
note += 'Email not send successfully:\n====================\n%s\n' % (error)
context.update({'note' : note})
return {
'view_type': 'form',
"view_mode": 'form',
'res_model': 'survey.send.invitation.log',
'type': 'ir.actions.act_window',
'target': 'new',
'context': context
}
survey_send_invitation()
class survey_send_invitation_log(osv.osv_memory):
_name = 'survey.send.invitation.log'
_columns = {
'note' : fields.text('Log', readonly=1)
}
def default_get(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
data = super(survey_send_invitation_log, self).default_get(cr, uid, fields_list, context)
data['note'] = context.get('note', '')
return data
survey_send_invitation_log()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
gpl-2.0
|
defionscode/ansible
|
lib/ansible/modules/packaging/os/yum_repository.py
|
21
|
24040
|
#!/usr/bin/python
# encoding: utf-8
# (c) 2015-2016, Jiri Tyr <jiri.tyr@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'
}
DOCUMENTATION = '''
---
module: yum_repository
author: Jiri Tyr (@jtyr)
version_added: '2.1'
short_description: Add or remove YUM repositories
description:
- Add or remove YUM repositories in RPM-based Linux distributions.
- If you wish to update an existing repository definition use M(ini_file) instead.
options:
async:
description:
- If set to C(yes) Yum will download packages and metadata from this
repo in parallel, if possible.
type: bool
default: 'yes'
bandwidth:
description:
- Maximum available network bandwidth in bytes/second. Used with the
I(throttle) option.
- If I(throttle) is a percentage and bandwidth is C(0) then bandwidth
throttling will be disabled. If I(throttle) is expressed as a data rate
(bytes/sec) then this option is ignored. Default is C(0) (no bandwidth
throttling).
default: 0
baseurl:
description:
- URL to the directory where the yum repository's 'repodata' directory
lives.
- It can also be a list of multiple URLs.
- This, the I(metalink) or I(mirrorlist) parameters are required if I(state) is set to
C(present).
cost:
description:
- Relative cost of accessing this repository. Useful for weighing one
repo's packages as greater/less than any other.
default: 1000
deltarpm_metadata_percentage:
description:
- When the relative size of deltarpm metadata vs pkgs is larger than
this, deltarpm metadata is not downloaded from the repo. Note that you
can give values over C(100), so C(200) means that the metadata is
required to be half the size of the packages. Use C(0) to turn off
this check, and always download metadata.
default: 100
deltarpm_percentage:
description:
- When the relative size of delta vs pkg is larger than this, delta is
not used. Use C(0) to turn off delta rpm processing. Local repositories
(with file:// I(baseurl)) have delta rpms turned off by default.
default: 75
description:
description:
- A human readable string describing the repository. This option corresponds to the "name" property in the repo file.
- This parameter is only required if I(state) is set to C(present).
enabled:
description:
- This tells yum whether or not use this repository.
type: bool
default: 'yes'
enablegroups:
description:
- Determines whether yum will allow the use of package groups for this
repository.
type: bool
default: 'yes'
exclude:
description:
- List of packages to exclude from updates or installs. This should be a
space separated list. Shell globs using wildcards (eg. C(*) and C(?))
are allowed.
- The list can also be a regular YAML array.
failovermethod:
choices: [roundrobin, priority]
default: roundrobin
description:
- C(roundrobin) randomly selects a URL out of the list of URLs to start
with and proceeds through each of them as it encounters a failure
contacting the host.
- C(priority) starts from the first I(baseurl) listed and reads through
them sequentially.
file:
description:
- File name without the C(.repo) extension to save the repo in. Defaults
to the value of I(name).
gpgcakey:
description:
- A URL pointing to the ASCII-armored CA key file for the repository.
gpgcheck:
description:
- Tells yum whether or not it should perform a GPG signature check on
packages.
- No default setting. If the value is not set, the system setting from
C(/etc/yum.conf) or system default of C(no) will be used.
type: bool
gpgkey:
description:
- A URL pointing to the ASCII-armored GPG key file for the repository.
- It can also be a list of multiple URLs.
http_caching:
description:
- Determines how upstream HTTP caches are instructed to handle any HTTP
downloads that Yum does.
- C(all) means that all HTTP downloads should be cached.
- C(packages) means that only RPM package downloads should be cached (but
not repository metadata downloads).
- C(none) means that no HTTP downloads should be cached.
choices: [all, packages, none]
default: all
include:
description:
- Include external configuration file. Both, local path and URL is
supported. Configuration file will be inserted at the position of the
I(include=) line. Included files may contain further include lines.
Yum will abort with an error if an inclusion loop is detected.
includepkgs:
description:
- List of packages you want to only use from a repository. This should be
a space separated list. Shell globs using wildcards (eg. C(*) and C(?))
are allowed. Substitution variables (e.g. C($releasever)) are honored
here.
- The list can also be a regular YAML array.
ip_resolve:
description:
- Determines how yum resolves host names.
- C(4) or C(IPv4) - resolve to IPv4 addresses only.
- C(6) or C(IPv6) - resolve to IPv6 addresses only.
choices: [4, 6, IPv4, IPv6, whatever]
default: whatever
keepalive:
description:
- This tells yum whether or not HTTP/1.1 keepalive should be used with
this repository. This can improve transfer speeds by using one
connection when downloading multiple files from a repository.
type: bool
default: 'no'
keepcache:
description:
- Either C(1) or C(0). Determines whether or not yum keeps the cache of
headers and packages after successful installation.
choices: ['0', '1']
default: '1'
metadata_expire:
description:
- Time (in seconds) after which the metadata will expire.
- Default value is 6 hours.
default: 21600
metadata_expire_filter:
description:
- Filter the I(metadata_expire) time, allowing a trade of speed for
accuracy if a command doesn't require it. Each yum command can specify
that it requires a certain level of timeliness quality from the remote
repos. from "I'm about to install/upgrade, so this better be current"
to "Anything that's available is good enough".
- C(never) - Nothing is filtered, always obey I(metadata_expire).
- C(read-only:past) - Commands that only care about past information are
filtered from metadata expiring. Eg. I(yum history) info (if history
needs to lookup anything about a previous transaction, then by
definition the remote package was available in the past).
- C(read-only:present) - Commands that are balanced between past and
future. Eg. I(yum list yum).
- C(read-only:future) - Commands that are likely to result in running
other commands which will require the latest metadata. Eg.
I(yum check-update).
- Note that this option does not override "yum clean expire-cache".
choices: [never, 'read-only:past', 'read-only:present', 'read-only:future']
default: 'read-only:present'
metalink:
description:
- Specifies a URL to a metalink file for the repomd.xml, a list of
mirrors for the entire repository are generated by converting the
mirrors for the repomd.xml file to a I(baseurl).
- This, the I(baseurl) or I(mirrorlist) parameters are required if I(state) is set to
C(present).
mirrorlist:
description:
- Specifies a URL to a file containing a list of baseurls.
- This, the I(baseurl) or I(metalink) parameters are required if I(state) is set to
C(present).
mirrorlist_expire:
description:
- Time (in seconds) after which the mirrorlist locally cached will
expire.
- Default value is 6 hours.
default: 21600
name:
description:
- Unique repository ID. This option builds the section name of the repository in the repo file.
- This parameter is only required if I(state) is set to C(present) or
C(absent).
required: true
password:
description:
- Password to use with the username for basic authentication.
priority:
description:
- Enforce ordered protection of repositories. The value is an integer
from 1 to 99.
- This option only works if the YUM Priorities plugin is installed.
default: 99
protect:
description:
- Protect packages from updates from other repositories.
type: bool
default: 'no'
proxy:
description:
- URL to the proxy server that yum should use. Set to C(_none_) to
disable the global proxy setting.
proxy_password:
description:
- Username to use for proxy.
proxy_username:
description:
- Password for this proxy.
repo_gpgcheck:
description:
- This tells yum whether or not it should perform a GPG signature check
on the repodata from this repository.
type: bool
default: 'no'
reposdir:
description:
- Directory where the C(.repo) files will be stored.
default: /etc/yum.repos.d
retries:
description:
- Set the number of times any attempt to retrieve a file should retry
before returning an error. Setting this to C(0) makes yum try forever.
default: 10
s3_enabled:
description:
- Enables support for S3 repositories.
- This option only works if the YUM S3 plugin is installed.
type: bool
default: 'no'
skip_if_unavailable:
description:
- If set to C(yes) yum will continue running if this repository cannot be
contacted for any reason. This should be set carefully as all repos are
consulted for any given command.
type: bool
default: 'no'
ssl_check_cert_permissions:
description:
- Whether yum should check the permissions on the paths for the
certificates on the repository (both remote and local).
- If we can't read any of the files then yum will force
I(skip_if_unavailable) to be C(yes). This is most useful for non-root
processes which use yum on repos that have client cert files which are
readable only by root.
type: bool
default: 'no'
sslcacert:
description:
- Path to the directory containing the databases of the certificate
authorities yum should use to verify SSL certificates.
sslclientcert:
description:
- Path to the SSL client certificate yum should use to connect to
repos/remote sites.
sslclientkey:
description:
- Path to the SSL client key yum should use to connect to repos/remote
sites.
sslverify:
description:
- Defines whether yum should verify SSL certificates/hosts at all.
type: bool
default: 'yes'
state:
description:
- State of the repo file.
choices: [absent, present]
default: present
throttle:
description:
- Enable bandwidth throttling for downloads.
- This option can be expressed as a absolute data rate in bytes/sec. An
SI prefix (k, M or G) may be appended to the bandwidth value.
timeout:
description:
- Number of seconds to wait for a connection before timing out.
default: 30
ui_repoid_vars:
description:
- When a repository id is displayed, append these yum variables to the
string if they are used in the I(baseurl)/etc. Variables are appended
in the order listed (and found).
default: releasever basearch
username:
description:
- Username to use for basic authentication to a repo or really any url.
extends_documentation_fragment:
- files
notes:
- All comments will be removed if modifying an existing repo file.
- Section order is preserved in an existing repo file.
- Parameters in a section are ordered alphabetically in an existing repo
file.
- The repo file will be automatically deleted if it contains no repository.
- When removing a repository, beware that the metadata cache may still remain
on disk until you run C(yum clean all). Use a notification handler for this.
- "The C(params) parameter was removed in Ansible 2.5 due to circumventing Ansible's parameter
handling"
'''
EXAMPLES = '''
- name: Add repository
yum_repository:
name: epel
description: EPEL YUM repo
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
- name: Add multiple repositories into the same file (1/2)
yum_repository:
name: epel
description: EPEL YUM repo
file: external_repos
baseurl: https://download.fedoraproject.org/pub/epel/$releasever/$basearch/
gpgcheck: no
- name: Add multiple repositories into the same file (2/2)
yum_repository:
name: rpmforge
description: RPMforge YUM repo
file: external_repos
baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge
mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge
enabled: no
# Handler showing how to clean yum metadata cache
- name: yum-clean-metadata
command: yum clean metadata
args:
warn: no
# Example removing a repository and cleaning up metadata cache
- name: Remove repository (and clean up left-over metadata)
yum_repository:
name: epel
state: absent
notify: yum-clean-metadata
- name: Remove repository from a specific repo file
yum_repository:
name: epel
file: external_repos
state: absent
'''
RETURN = '''
repo:
description: repository name
returned: success
type: string
sample: "epel"
state:
description: state of the target, after execution
returned: success
type: string
sample: "present"
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import configparser
from ansible.module_utils._text import to_native
class YumRepo(object):
# Class global variables
module = None
params = None
section = None
repofile = configparser.RawConfigParser()
# List of parameters which will be allowed in the repo file output
allowed_params = [
'async',
'bandwidth',
'baseurl',
'cost',
'deltarpm_metadata_percentage',
'deltarpm_percentage',
'enabled',
'enablegroups',
'exclude',
'failovermethod',
'gpgcakey',
'gpgcheck',
'gpgkey',
'http_caching',
'include',
'includepkgs',
'ip_resolve',
'keepalive',
'keepcache',
'metadata_expire',
'metadata_expire_filter',
'metalink',
'mirrorlist',
'mirrorlist_expire',
'name',
'password',
'priority',
'protect',
'proxy',
'proxy_password',
'proxy_username',
'repo_gpgcheck',
'retries',
's3_enabled',
'skip_if_unavailable',
'sslcacert',
'ssl_check_cert_permissions',
'sslclientcert',
'sslclientkey',
'sslverify',
'throttle',
'timeout',
'ui_repoid_vars',
'username']
# List of parameters which can be a list
list_params = ['exclude', 'includepkgs']
def __init__(self, module):
# To be able to use fail_json
self.module = module
# Shortcut for the params
self.params = self.module.params
# Section is always the repoid
self.section = self.params['repoid']
# Check if repo directory exists
repos_dir = self.params['reposdir']
if not os.path.isdir(repos_dir):
self.module.fail_json(
msg="Repo directory '%s' does not exist." % repos_dir)
# Set dest; also used to set dest parameter for the FS attributes
self.params['dest'] = os.path.join(
repos_dir, "%s.repo" % self.params['file'])
# Read the repo file if it exists
if os.path.isfile(self.params['dest']):
self.repofile.read(self.params['dest'])
def add(self):
# Remove already existing repo and create a new one
if self.repofile.has_section(self.section):
self.repofile.remove_section(self.section)
# Add section
self.repofile.add_section(self.section)
# Baseurl/mirrorlist is not required because for removal we need only
# the repo name. This is why we check if the baseurl/mirrorlist is
# defined.
req_params = (self.params['baseurl'], self.params['metalink'], self.params['mirrorlist'])
if req_params == (None, None, None):
self.module.fail_json(
msg="Parameter 'baseurl', 'metalink' or 'mirrorlist' is required for "
"adding a new repo.")
# Set options
for key, value in sorted(self.params.items()):
if key in self.list_params and isinstance(value, list):
# Join items into one string for specific parameters
value = ' '.join(value)
elif isinstance(value, bool):
# Convert boolean value to integer
value = int(value)
# Set the value only if it was defined (default is None)
if value is not None and key in self.allowed_params:
self.repofile.set(self.section, key, value)
def save(self):
if len(self.repofile.sections()):
# Write data into the file
try:
fd = open(self.params['dest'], 'w')
except IOError as e:
self.module.fail_json(
msg="Cannot open repo file %s." % self.params['dest'],
details=to_native(e))
self.repofile.write(fd)
try:
fd.close()
except IOError as e:
self.module.fail_json(
msg="Cannot write repo file %s." % self.params['dest'],
details=to_native(e))
else:
# Remove the file if there are not repos
try:
os.remove(self.params['dest'])
except OSError as e:
self.module.fail_json(
msg=(
"Cannot remove empty repo file %s." %
self.params['dest']),
details=to_native(e))
def remove(self):
# Remove section if exists
if self.repofile.has_section(self.section):
self.repofile.remove_section(self.section)
def dump(self):
repo_string = ""
# Compose the repo file
for section in sorted(self.repofile.sections()):
repo_string += "[%s]\n" % section
for key, value in sorted(self.repofile.items(section)):
repo_string += "%s = %s\n" % (key, value)
repo_string += "\n"
return repo_string
def main():
# Module settings
argument_spec = dict(
bandwidth=dict(),
baseurl=dict(type='list'),
cost=dict(),
deltarpm_metadata_percentage=dict(),
deltarpm_percentage=dict(),
description=dict(),
enabled=dict(type='bool'),
enablegroups=dict(type='bool'),
exclude=dict(type='list'),
failovermethod=dict(choices=['roundrobin', 'priority']),
file=dict(),
gpgcakey=dict(),
gpgcheck=dict(type='bool'),
gpgkey=dict(type='list'),
http_caching=dict(choices=['all', 'packages', 'none']),
include=dict(),
includepkgs=dict(type='list'),
ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
keepalive=dict(type='bool'),
keepcache=dict(choices=['0', '1']),
metadata_expire=dict(),
metadata_expire_filter=dict(
choices=[
'never',
'read-only:past',
'read-only:present',
'read-only:future']),
metalink=dict(),
mirrorlist=dict(),
mirrorlist_expire=dict(),
name=dict(required=True),
params=dict(type='dict'),
password=dict(no_log=True),
priority=dict(),
protect=dict(type='bool'),
proxy=dict(),
proxy_password=dict(no_log=True),
proxy_username=dict(),
repo_gpgcheck=dict(type='bool'),
reposdir=dict(default='/etc/yum.repos.d', type='path'),
retries=dict(),
s3_enabled=dict(type='bool'),
skip_if_unavailable=dict(type='bool'),
sslcacert=dict(),
ssl_check_cert_permissions=dict(type='bool'),
sslclientcert=dict(),
sslclientkey=dict(),
sslverify=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present'),
throttle=dict(),
timeout=dict(),
ui_repoid_vars=dict(),
username=dict(),
)
argument_spec['async'] = dict(type='bool')
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
)
# Params was removed
# https://meetbot.fedoraproject.org/ansible-meeting/2017-09-28/ansible_dev_meeting.2017-09-28-15.00.log.html
if module.params['params']:
module.fail_json(msg="The params option to yum_repository was removed in Ansible 2.5 since it circumvents Ansible's option handling")
name = module.params['name']
state = module.params['state']
# Check if required parameters are present
if state == 'present':
if (
module.params['baseurl'] is None and
module.params['metalink'] is None and
module.params['mirrorlist'] is None):
module.fail_json(
msg="Parameter 'baseurl', 'metalink' or 'mirrorlist' is required.")
if module.params['description'] is None:
module.fail_json(
msg="Parameter 'description' is required.")
# Rename "name" and "description" to ensure correct key sorting
module.params['repoid'] = module.params['name']
module.params['name'] = module.params['description']
del module.params['description']
# Change list type to string for baseurl and gpgkey
for list_param in ['baseurl', 'gpgkey']:
if (
list_param in module.params and
module.params[list_param] is not None):
module.params[list_param] = "\n".join(module.params[list_param])
# Define repo file name if it doesn't exist
if module.params['file'] is None:
module.params['file'] = module.params['repoid']
# Instantiate the YumRepo object
yumrepo = YumRepo(module)
# Get repo status before change
diff = {
'before_header': yumrepo.params['dest'],
'before': yumrepo.dump(),
'after_header': yumrepo.params['dest'],
'after': ''
}
# Perform action depending on the state
if state == 'present':
yumrepo.add()
elif state == 'absent':
yumrepo.remove()
# Get repo status after change
diff['after'] = yumrepo.dump()
# Compare repo states
changed = diff['before'] != diff['after']
# Save the file only if not in check mode and if there was a change
if not module.check_mode and changed:
yumrepo.save()
# Change file attributes if needed
if os.path.isfile(module.params['dest']):
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
# Print status of the change
module.exit_json(changed=changed, repo=name, state=state, diff=diff)
if __name__ == '__main__':
main()
|
gpl-3.0
|
Edraak/circleci-edx-platform
|
common/djangoapps/terrain/stubs/lti.py
|
44
|
12380
|
"""
Stub implementation of LTI Provider.
What is supported:
------------------
1.) This LTI Provider can service only one Tool Consumer at the same time. It is
not possible to have this LTI multiple times on a single page in LMS.
"""
from uuid import uuid4
import textwrap
import urllib
from oauthlib.oauth1.rfc5849 import signature, parameters
import oauthlib.oauth1
import hashlib
import base64
import mock
import requests
from http import StubHttpRequestHandler, StubHttpService
class StubLtiHandler(StubHttpRequestHandler):
"""
A handler for LTI POST and GET requests.
"""
DEFAULT_CLIENT_KEY = 'test_client_key'
DEFAULT_CLIENT_SECRET = 'test_client_secret'
DEFAULT_LTI_ENDPOINT = 'correct_lti_endpoint'
DEFAULT_LTI_ADDRESS = 'http://127.0.0.1:{port}/'
def do_GET(self):
"""
Handle a GET request from the client and sends response back.
Used for checking LTI Provider started correctly.
"""
self.send_response(200, 'This is LTI Provider.', {'Content-type': 'text/plain'})
def do_POST(self):
"""
Handle a POST request from the client and sends response back.
"""
if 'grade' in self.path and self._send_graded_result().status_code == 200:
status_message = 'LTI consumer (edX) responded with XML content:<br>' + self.server.grade_data['TC answer']
content = self._create_content(status_message)
self.send_response(200, content)
elif 'lti2_outcome' in self.path and self._send_lti2_outcome().status_code == 200:
status_message = 'LTI consumer (edX) responded with HTTP {}<br>'.format(
self.server.grade_data['status_code'])
content = self._create_content(status_message)
self.send_response(200, content)
elif 'lti2_delete' in self.path and self._send_lti2_delete().status_code == 200:
status_message = 'LTI consumer (edX) responded with HTTP {}<br>'.format(
self.server.grade_data['status_code'])
content = self._create_content(status_message)
self.send_response(200, content)
# Respond to request with correct lti endpoint
elif self._is_correct_lti_request():
params = {k: v for k, v in self.post_dict.items() if k != 'oauth_signature'}
if self._check_oauth_signature(params, self.post_dict.get('oauth_signature', "")):
status_message = "This is LTI tool. Success."
# Set data for grades what need to be stored as server data
if 'lis_outcome_service_url' in self.post_dict:
self.server.grade_data = {
'callback_url': self.post_dict.get('lis_outcome_service_url').replace('https', 'http'),
'sourcedId': self.post_dict.get('lis_result_sourcedid')
}
submit_url = '//{}:{}'.format(*self.server.server_address)
content = self._create_content(status_message, submit_url)
self.send_response(200, content)
else:
content = self._create_content("Wrong LTI signature")
self.send_response(200, content)
else:
content = self._create_content("Invalid request URL")
self.send_response(500, content)
def _send_graded_result(self):
"""
Send grade request.
"""
values = {
'textString': 0.5,
'sourcedId': self.server.grade_data['sourcedId'],
'imsx_messageIdentifier': uuid4().hex,
}
payload = textwrap.dedent("""
<?xml version = "1.0" encoding = "UTF-8"?>
<imsx_POXEnvelopeRequest xmlns="http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">
<imsx_POXHeader>
<imsx_POXRequestHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>{imsx_messageIdentifier}</imsx_messageIdentifier> /
</imsx_POXRequestHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>
<replaceResultRequest>
<resultRecord>
<sourcedGUID>
<sourcedId>{sourcedId}</sourcedId>
</sourcedGUID>
<result>
<resultScore>
<language>en-us</language>
<textString>{textString}</textString>
</resultScore>
</result>
</resultRecord>
</replaceResultRequest>
</imsx_POXBody>
</imsx_POXEnvelopeRequest>
""")
data = payload.format(**values)
url = self.server.grade_data['callback_url']
headers = {
'Content-Type': 'application/xml',
'X-Requested-With': 'XMLHttpRequest',
'Authorization': self._oauth_sign(url, data)
}
# Send request ignoring verifirecation of SSL certificate
response = requests.post(url, data=data, headers=headers, verify=False)
self.server.grade_data['TC answer'] = response.content
return response
def _send_lti2_outcome(self):
"""
Send a grade back to consumer
"""
payload = textwrap.dedent("""
{{
"@context" : "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type" : "Result",
"resultScore" : {score},
"comment" : "This is awesome."
}}
""")
data = payload.format(score=0.8)
return self._send_lti2(data)
def _send_lti2_delete(self):
"""
Send a delete back to consumer
"""
payload = textwrap.dedent("""
{
"@context" : "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type" : "Result"
}
""")
return self._send_lti2(payload)
def _send_lti2(self, payload):
"""
Send lti2 json result service request.
"""
### We compute the LTI V2.0 service endpoint from the callback_url (which is set by the launch call)
url = self.server.grade_data['callback_url']
url_parts = url.split('/')
url_parts[-1] = "lti_2_0_result_rest_handler"
anon_id = self.server.grade_data['sourcedId'].split(":")[-1]
url_parts.extend(["user", anon_id])
new_url = '/'.join(url_parts)
content_type = 'application/vnd.ims.lis.v2.result+json'
headers = {
'Content-Type': content_type,
'Authorization': self._oauth_sign(new_url, payload,
method='PUT',
content_type=content_type)
}
# Send request ignoring verifirecation of SSL certificate
response = requests.put(new_url, data=payload, headers=headers, verify=False)
self.server.grade_data['status_code'] = response.status_code
self.server.grade_data['TC answer'] = response.content
return response
def _create_content(self, response_text, submit_url=None):
"""
Return content (str) either for launch, send grade or get result from TC.
"""
if submit_url:
submit_form = textwrap.dedent("""
<form action="{submit_url}/grade" method="post">
<input type="submit" name="submit-button" value="Submit">
</form>
<form action="{submit_url}/lti2_outcome" method="post">
<input type="submit" name="submit-lti2-button" value="Submit">
</form>
<form action="{submit_url}/lti2_delete" method="post">
<input type="submit" name="submit-lti2-delete-button" value="Submit">
</form>
""").format(submit_url=submit_url)
else:
submit_form = ''
# Show roles only for LTI launch.
if self.post_dict.get('roles'):
role = '<h5>Role: {}</h5>'.format(self.post_dict['roles'])
else:
role = ''
response_str = textwrap.dedent("""
<html>
<head>
<title>TEST TITLE</title>
</head>
<body>
<div>
<h2>IFrame loaded</h2>
<h3>Server response is:</h3>
<h3 class="result">{response}</h3>
{role}
</div>
{submit_form}
</body>
</html>
""").format(response=response_text, role=role, submit_form=submit_form)
# Currently LTI module doublequotes the lis_result_sourcedid parameter.
# Unquote response two times.
return urllib.unquote(urllib.unquote(response_str))
def _is_correct_lti_request(self):
"""
Return a boolean indicating whether the URL path is a valid LTI end-point.
"""
lti_endpoint = self.server.config.get('lti_endpoint', self.DEFAULT_LTI_ENDPOINT)
return lti_endpoint in self.path
def _oauth_sign(self, url, body, content_type=u'application/x-www-form-urlencoded', method=u'POST'):
"""
Signs request and returns signed Authorization header.
"""
client_key = self.server.config.get('client_key', self.DEFAULT_CLIENT_KEY)
client_secret = self.server.config.get('client_secret', self.DEFAULT_CLIENT_SECRET)
client = oauthlib.oauth1.Client(
client_key=unicode(client_key),
client_secret=unicode(client_secret)
)
headers = {
# This is needed for body encoding:
'Content-Type': content_type,
}
# Calculate and encode body hash. See http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
sha1 = hashlib.sha1()
sha1.update(body)
oauth_body_hash = unicode(base64.b64encode(sha1.digest()))
params = client.get_oauth_params(None)
params.append((u'oauth_body_hash', oauth_body_hash))
mock_request = mock.Mock(
uri=unicode(urllib.unquote(url)),
headers=headers,
body=u"",
decoded_body=u"",
oauth_params=params,
http_method=unicode(method),
)
sig = client.get_oauth_signature(mock_request)
mock_request.oauth_params.append((u'oauth_signature', sig))
new_headers = parameters.prepare_headers(mock_request.oauth_params, headers, realm=None)
return new_headers['Authorization']
def _check_oauth_signature(self, params, client_signature):
"""
Checks oauth signature from client.
`params` are params from post request except signature,
`client_signature` is signature from request.
Builds mocked request and verifies hmac-sha1 signing::
1. builds string to sign from `params`, `url` and `http_method`.
2. signs it with `client_secret` which comes from server settings.
3. obtains signature after sign and then compares it with request.signature
(request signature comes form client in request)
Returns `True` if signatures are correct, otherwise `False`.
"""
client_secret = unicode(self.server.config.get('client_secret', self.DEFAULT_CLIENT_SECRET))
port = self.server.server_address[1]
lti_base = self.DEFAULT_LTI_ADDRESS.format(port=port)
lti_endpoint = self.server.config.get('lti_endpoint', self.DEFAULT_LTI_ENDPOINT)
url = lti_base + lti_endpoint
request = mock.Mock()
request.params = [(unicode(k), unicode(v)) for k, v in params.items()]
request.uri = unicode(url)
request.http_method = u'POST'
request.signature = unicode(client_signature)
return signature.verify_hmac_sha1(request, client_secret)
class StubLtiService(StubHttpService):
"""
A stub LTI provider server that responds
to POST and GET requests to localhost.
"""
HANDLER_CLASS = StubLtiHandler
|
agpl-3.0
|
aimas/TuniErp-8.0
|
addons/hr_gamification/__openerp__.py
|
1
|
1679
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'HR Gamification',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'hidden',
'website': 'https://www.tunierp.com/page/employees',
'depends': ['gamification', 'hr'],
'description': """Use the HR ressources for the gamification process.
The HR officer can now manage challenges and badges.
This allow the user to send badges to employees instead of simple users.
Badge received are displayed on the user profile.
""",
'data': [
'security/ir.model.access.csv',
'security/gamification_security.xml',
'wizard/grant_badge.xml',
'views/gamification.xml',
'views/hr_gamification.xml',
],
'auto_install': True,
}
|
agpl-3.0
|
KaranToor/MA450
|
google-cloud-sdk/lib/third_party/dns/rdtypes/IN/APL.py
|
248
|
5525
|
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.inet
import dns.rdata
import dns.tokenizer
class APLItem(object):
"""An APL list item.
@ivar family: the address family (IANA address family registry)
@type family: int
@ivar negation: is this item negated?
@type negation: bool
@ivar address: the address
@type address: string
@ivar prefix: the prefix length
@type prefix: int
"""
__slots__ = ['family', 'negation', 'address', 'prefix']
def __init__(self, family, negation, address, prefix):
self.family = family
self.negation = negation
self.address = address
self.prefix = prefix
def __str__(self):
if self.negation:
return "!%d:%s/%s" % (self.family, self.address, self.prefix)
else:
return "%d:%s/%s" % (self.family, self.address, self.prefix)
def to_wire(self, file):
if self.family == 1:
address = dns.inet.inet_pton(dns.inet.AF_INET, self.address)
elif self.family == 2:
address = dns.inet.inet_pton(dns.inet.AF_INET6, self.address)
else:
address = self.address.decode('hex_codec')
#
# Truncate least significant zero bytes.
#
last = 0
for i in xrange(len(address) - 1, -1, -1):
if address[i] != chr(0):
last = i + 1
break
address = address[0 : last]
l = len(address)
assert l < 128
if self.negation:
l |= 0x80
header = struct.pack('!HBB', self.family, self.prefix, l)
file.write(header)
file.write(address)
class APL(dns.rdata.Rdata):
"""APL record.
@ivar items: a list of APL items
@type items: list of APL_Item
@see: RFC 3123"""
__slots__ = ['items']
def __init__(self, rdclass, rdtype, items):
super(APL, self).__init__(rdclass, rdtype)
self.items = items
def to_text(self, origin=None, relativize=True, **kw):
return ' '.join(map(lambda x: str(x), self.items))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
items = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
item = token.value
if item[0] == '!':
negation = True
item = item[1:]
else:
negation = False
(family, rest) = item.split(':', 1)
family = int(family)
(address, prefix) = rest.split('/', 1)
prefix = int(prefix)
item = APLItem(family, negation, address, prefix)
items.append(item)
return cls(rdclass, rdtype, items)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
for item in self.items:
item.to_wire(file)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
items = []
while 1:
if rdlen < 4:
raise dns.exception.FormError
header = struct.unpack('!HBB', wire[current : current + 4])
afdlen = header[2]
if afdlen > 127:
negation = True
afdlen -= 128
else:
negation = False
current += 4
rdlen -= 4
if rdlen < afdlen:
raise dns.exception.FormError
address = wire[current : current + afdlen]
l = len(address)
if header[0] == 1:
if l < 4:
address += '\x00' * (4 - l)
address = dns.inet.inet_ntop(dns.inet.AF_INET, address)
elif header[0] == 2:
if l < 16:
address += '\x00' * (16 - l)
address = dns.inet.inet_ntop(dns.inet.AF_INET6, address)
else:
#
# This isn't really right according to the RFC, but it
# seems better than throwing an exception
#
address = address.encode('hex_codec')
current += afdlen
rdlen -= afdlen
item = APLItem(header[0], negation, address, header[1])
items.append(item)
if rdlen == 0:
break
return cls(rdclass, rdtype, items)
from_wire = classmethod(from_wire)
def _cmp(self, other):
f = cStringIO.StringIO()
self.to_wire(f)
wire1 = f.getvalue()
f.seek(0)
f.truncate()
other.to_wire(f)
wire2 = f.getvalue()
f.close()
return cmp(wire1, wire2)
|
apache-2.0
|
Luthaf/cymbalum
|
scripts/ci/check-whitespaces.py
|
4
|
1416
|
#!/usr/bin/env python
# coding=utf-8
import os
import sys
ROOT = os.path.join(os.path.dirname(__file__), "..")
DIRECTORIES = ["benches", "doc", "examples", "scripts", "src", "tests"]
IGNORED_EXTENSIONS = [
"xyz",
# website and documentation files
"svg", "html", "js",
# Binary files
"ttf", "eot", "woff", "woff2", "png",
]
ERRORS = 0
def error(message):
global ERRORS
ERRORS += 1
print(message)
def check_whitespace(path):
path = os.path.realpath(path)
path = os.path.relpath(path, ROOT)
with open(path) as fd:
lines = fd.readlines()
line_number = 0
for line in lines:
line_number += 1
if line.endswith(" \n") or line.endswith("\t\n"):
error(
"whitespace at the end of the line at {}:{}"
.format(path, line_number)
)
if not lines[-1].endswith("\n"):
error("missing new line at the end of the file in {}".format(path))
if __name__ == '__main__':
for directory in DIRECTORIES:
for (root, _, paths) in os.walk(os.path.join(ROOT, directory)):
for path in paths:
extension = path.split(".")[-1]
if extension not in IGNORED_EXTENSIONS:
check_whitespace(os.path.join(root, path))
if ERRORS != 0:
print("------------------\n{} whitespace errors".format(ERRORS))
sys.exit(1)
|
bsd-2-clause
|
adrienbrault/home-assistant
|
homeassistant/components/wink/__init__.py
|
3
|
32284
|
"""Support for Wink hubs."""
from datetime import timedelta
import json
import logging
import os
import time
from aiohttp.web import Response
from pubnubsubhandler import PubNubSubscriptionHandler
import pywink
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_NAME,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_EMAIL,
CONF_PASSWORD,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
STATE_OFF,
STATE_ON,
__version__,
)
from homeassistant.core import callback
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import make_entity_service_schema
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import track_time_interval
from homeassistant.helpers.network import get_url
from homeassistant.util.json import load_json, save_json
_LOGGER = logging.getLogger(__name__)
DOMAIN = "wink"
SUBSCRIPTION_HANDLER = None
CONF_USER_AGENT = "user_agent"
CONF_OAUTH = "oauth"
CONF_LOCAL_CONTROL = "local_control"
CONF_MISSING_OAUTH_MSG = "Missing oauth2 credentials."
ATTR_ACCESS_TOKEN = "access_token"
ATTR_REFRESH_TOKEN = "refresh_token"
ATTR_PAIRING_MODE = "pairing_mode"
ATTR_KIDDE_RADIO_CODE = "kidde_radio_code"
ATTR_HUB_NAME = "hub_name"
WINK_AUTH_CALLBACK_PATH = "/auth/wink/callback"
WINK_AUTH_START = "/auth/wink"
WINK_CONFIG_FILE = ".wink.conf"
USER_AGENT = f"Manufacturer/Home-Assistant{__version__} python/3 Wink/3"
DEFAULT_CONFIG = {
CONF_CLIENT_ID: "CLIENT_ID_HERE",
CONF_CLIENT_SECRET: "CLIENT_SECRET_HERE",
}
SERVICE_ADD_NEW_DEVICES = "pull_newly_added_devices_from_wink"
SERVICE_REFRESH_STATES = "refresh_state_from_wink"
SERVICE_RENAME_DEVICE = "rename_wink_device"
SERVICE_DELETE_DEVICE = "delete_wink_device"
SERVICE_SET_PAIRING_MODE = "pair_new_device"
SERVICE_SET_CHIME_VOLUME = "set_chime_volume"
SERVICE_SET_SIREN_VOLUME = "set_siren_volume"
SERVICE_ENABLE_CHIME = "enable_chime"
SERVICE_SET_SIREN_TONE = "set_siren_tone"
SERVICE_SET_AUTO_SHUTOFF = "siren_set_auto_shutoff"
SERVICE_SIREN_STROBE_ENABLED = "set_siren_strobe_enabled"
SERVICE_CHIME_STROBE_ENABLED = "set_chime_strobe_enabled"
SERVICE_ENABLE_SIREN = "enable_siren"
SERVICE_SET_DIAL_CONFIG = "set_nimbus_dial_configuration"
SERVICE_SET_DIAL_STATE = "set_nimbus_dial_state"
ATTR_VOLUME = "volume"
ATTR_TONE = "tone"
ATTR_ENABLED = "enabled"
ATTR_AUTO_SHUTOFF = "auto_shutoff"
ATTR_MIN_VALUE = "min_value"
ATTR_MAX_VALUE = "max_value"
ATTR_ROTATION = "rotation"
ATTR_SCALE = "scale"
ATTR_TICKS = "ticks"
ATTR_MIN_POSITION = "min_position"
ATTR_MAX_POSITION = "max_position"
ATTR_VALUE = "value"
ATTR_LABELS = "labels"
SCALES = ["linear", "log"]
ROTATIONS = ["cw", "ccw"]
VOLUMES = ["low", "medium", "high"]
TONES = [
"doorbell",
"fur_elise",
"doorbell_extended",
"alert",
"william_tell",
"rondo_alla_turca",
"police_siren",
"evacuation",
"beep_beep",
"beep",
]
CHIME_TONES = TONES + ["inactive"]
AUTO_SHUTOFF_TIMES = [None, -1, 30, 60, 120]
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Inclusive(
CONF_EMAIL, CONF_OAUTH, msg=CONF_MISSING_OAUTH_MSG
): cv.string,
vol.Inclusive(
CONF_PASSWORD, CONF_OAUTH, msg=CONF_MISSING_OAUTH_MSG
): cv.string,
vol.Inclusive(
CONF_CLIENT_ID, CONF_OAUTH, msg=CONF_MISSING_OAUTH_MSG
): cv.string,
vol.Inclusive(
CONF_CLIENT_SECRET, CONF_OAUTH, msg=CONF_MISSING_OAUTH_MSG
): cv.string,
vol.Optional(CONF_LOCAL_CONTROL, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
RENAME_DEVICE_SCHEMA = make_entity_service_schema(
{vol.Required(ATTR_NAME): cv.string}, extra=vol.ALLOW_EXTRA
)
DELETE_DEVICE_SCHEMA = make_entity_service_schema({}, extra=vol.ALLOW_EXTRA)
SET_PAIRING_MODE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_HUB_NAME): cv.string,
vol.Required(ATTR_PAIRING_MODE): cv.string,
vol.Optional(ATTR_KIDDE_RADIO_CODE): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
SET_VOLUME_SCHEMA = make_entity_service_schema(
{vol.Required(ATTR_VOLUME): vol.In(VOLUMES)}
)
SET_SIREN_TONE_SCHEMA = make_entity_service_schema(
{vol.Required(ATTR_TONE): vol.In(TONES)}
)
SET_CHIME_MODE_SCHEMA = make_entity_service_schema(
{vol.Required(ATTR_TONE): vol.In(CHIME_TONES)}
)
SET_AUTO_SHUTOFF_SCHEMA = make_entity_service_schema(
{vol.Required(ATTR_AUTO_SHUTOFF): vol.In(AUTO_SHUTOFF_TIMES)}
)
SET_STROBE_ENABLED_SCHEMA = make_entity_service_schema(
{vol.Required(ATTR_ENABLED): cv.boolean}
)
ENABLED_SIREN_SCHEMA = make_entity_service_schema(
{vol.Required(ATTR_ENABLED): cv.boolean}
)
DIAL_CONFIG_SCHEMA = make_entity_service_schema(
{
vol.Optional(ATTR_MIN_VALUE): vol.Coerce(int),
vol.Optional(ATTR_MAX_VALUE): vol.Coerce(int),
vol.Optional(ATTR_MIN_POSITION): cv.positive_int,
vol.Optional(ATTR_MAX_POSITION): cv.positive_int,
vol.Optional(ATTR_ROTATION): vol.In(ROTATIONS),
vol.Optional(ATTR_SCALE): vol.In(SCALES),
vol.Optional(ATTR_TICKS): cv.positive_int,
}
)
DIAL_STATE_SCHEMA = make_entity_service_schema(
{
vol.Required(ATTR_VALUE): vol.Coerce(int),
vol.Optional(ATTR_LABELS): cv.ensure_list(cv.string),
}
)
WINK_COMPONENTS = [
"binary_sensor",
"sensor",
"light",
"switch",
"lock",
"cover",
"climate",
"fan",
"alarm_control_panel",
"scene",
"water_heater",
]
WINK_HUBS = []
def _request_app_setup(hass, config):
"""Assist user with configuring the Wink dev application."""
hass.data[DOMAIN]["configurator"] = True
configurator = hass.components.configurator
def wink_configuration_callback(callback_data):
"""Handle configuration updates."""
_config_path = hass.config.path(WINK_CONFIG_FILE)
if not os.path.isfile(_config_path):
setup(hass, config)
return
client_id = callback_data.get(CONF_CLIENT_ID).strip()
client_secret = callback_data.get(CONF_CLIENT_SECRET).strip()
if None not in (client_id, client_secret):
save_json(
_config_path,
{CONF_CLIENT_ID: client_id, CONF_CLIENT_SECRET: client_secret},
)
setup(hass, config)
return
error_msg = "Your input was invalid. Please try again."
_configurator = hass.data[DOMAIN]["configuring"][DOMAIN]
configurator.notify_errors(_configurator, error_msg)
start_url = f"{get_url(hass)}{WINK_AUTH_CALLBACK_PATH}"
description = f"""Please create a Wink developer app at
https://developer.wink.com.
Add a Redirect URI of {start_url}.
They will provide you a Client ID and secret
after reviewing your request.
(This can take several days).
"""
hass.data[DOMAIN]["configuring"][DOMAIN] = configurator.request_config(
DOMAIN,
wink_configuration_callback,
description=description,
submit_caption="submit",
description_image="/static/images/config_wink.png",
fields=[
{"id": CONF_CLIENT_ID, "name": "Client ID", "type": "string"},
{"id": CONF_CLIENT_SECRET, "name": "Client secret", "type": "string"},
],
)
def _request_oauth_completion(hass, config):
"""Request user complete Wink OAuth2 flow."""
hass.data[DOMAIN]["configurator"] = True
configurator = hass.components.configurator
if DOMAIN in hass.data[DOMAIN]["configuring"]:
configurator.notify_errors(
hass.data[DOMAIN]["configuring"][DOMAIN],
"Failed to register, please try again.",
)
return
def wink_configuration_callback(callback_data):
"""Call setup again."""
setup(hass, config)
start_url = f"{get_url(hass)}{WINK_AUTH_START}"
description = f"Please authorize Wink by visiting {start_url}"
hass.data[DOMAIN]["configuring"][DOMAIN] = configurator.request_config(
DOMAIN, wink_configuration_callback, description=description
)
def setup(hass, config):
"""Set up the Wink component."""
if hass.data.get(DOMAIN) is None:
hass.data[DOMAIN] = {
"unique_ids": [],
"entities": {},
"oauth": {},
"configuring": {},
"pubnub": None,
"configurator": False,
}
if config.get(DOMAIN) is not None:
client_id = config[DOMAIN].get(CONF_CLIENT_ID)
client_secret = config[DOMAIN].get(CONF_CLIENT_SECRET)
email = config[DOMAIN].get(CONF_EMAIL)
password = config[DOMAIN].get(CONF_PASSWORD)
local_control = config[DOMAIN].get(CONF_LOCAL_CONTROL)
else:
client_id = None
client_secret = None
email = None
password = None
local_control = None
hass.data[DOMAIN]["configurator"] = True
if None not in [client_id, client_secret]:
_LOGGER.info("Using legacy OAuth authentication")
if not local_control:
pywink.disable_local_control()
hass.data[DOMAIN]["oauth"][CONF_CLIENT_ID] = client_id
hass.data[DOMAIN]["oauth"][CONF_CLIENT_SECRET] = client_secret
hass.data[DOMAIN]["oauth"]["email"] = email
hass.data[DOMAIN]["oauth"]["password"] = password
pywink.legacy_set_wink_credentials(email, password, client_id, client_secret)
else:
_LOGGER.info("Using OAuth authentication")
if not local_control:
pywink.disable_local_control()
config_path = hass.config.path(WINK_CONFIG_FILE)
if os.path.isfile(config_path):
config_file = load_json(config_path)
if config_file == DEFAULT_CONFIG:
_request_app_setup(hass, config)
return True
# else move on because the user modified the file
else:
save_json(config_path, DEFAULT_CONFIG)
_request_app_setup(hass, config)
return True
if DOMAIN in hass.data[DOMAIN]["configuring"]:
_configurator = hass.data[DOMAIN]["configuring"]
hass.components.configurator.request_done(_configurator.pop(DOMAIN))
# Using oauth
access_token = config_file.get(ATTR_ACCESS_TOKEN)
refresh_token = config_file.get(ATTR_REFRESH_TOKEN)
# This will be called after authorizing Home-Assistant
if None not in (access_token, refresh_token):
pywink.set_wink_credentials(
config_file.get(CONF_CLIENT_ID),
config_file.get(CONF_CLIENT_SECRET),
access_token=access_token,
refresh_token=refresh_token,
)
# This is called to create the redirect so the user can Authorize
# Home .
else:
redirect_uri = f"{get_url(hass)}{WINK_AUTH_CALLBACK_PATH}"
wink_auth_start_url = pywink.get_authorization_url(
config_file.get(CONF_CLIENT_ID), redirect_uri
)
hass.http.register_redirect(WINK_AUTH_START, wink_auth_start_url)
hass.http.register_view(
WinkAuthCallbackView(config, config_file, pywink.request_token)
)
_request_oauth_completion(hass, config)
return True
pywink.set_user_agent(USER_AGENT)
sub_details = pywink.get_subscription_details()
hass.data[DOMAIN]["pubnub"] = PubNubSubscriptionHandler(
sub_details[0], origin=sub_details[1]
)
def _subscribe():
hass.data[DOMAIN]["pubnub"].subscribe()
# Call subscribe after the user sets up wink via the configurator
# All other methods will complete setup before
# EVENT_HOMEASSISTANT_START is called meaning they
# will call subscribe via the method below. (start_subscription)
if hass.data[DOMAIN]["configurator"]:
_subscribe()
def keep_alive_call(event_time):
"""Call the Wink API endpoints to keep PubNub working."""
_LOGGER.info("Polling the Wink API to keep PubNub updates flowing")
pywink.set_user_agent(str(int(time.time())))
_temp_response = pywink.get_user()
_LOGGER.debug(str(json.dumps(_temp_response)))
time.sleep(1)
pywink.set_user_agent(USER_AGENT)
_temp_response = pywink.wink_api_fetch()
_LOGGER.debug("%s", _temp_response)
_temp_response = pywink.post_session()
_LOGGER.debug("%s", _temp_response)
# Call the Wink API every hour to keep PubNub updates flowing
track_time_interval(hass, keep_alive_call, timedelta(minutes=60))
def start_subscription(event):
"""Start the PubNub subscription."""
_subscribe()
hass.bus.listen(EVENT_HOMEASSISTANT_START, start_subscription)
def stop_subscription(event):
"""Stop the PubNub subscription."""
hass.data[DOMAIN]["pubnub"].unsubscribe()
hass.data[DOMAIN]["pubnub"] = None
hass.bus.listen(EVENT_HOMEASSISTANT_STOP, stop_subscription)
def save_credentials(event):
"""Save currently set OAuth credentials."""
if hass.data[DOMAIN]["oauth"].get("email") is None:
config_path = hass.config.path(WINK_CONFIG_FILE)
_config = pywink.get_current_oauth_credentials()
save_json(config_path, _config)
hass.bus.listen(EVENT_HOMEASSISTANT_STOP, save_credentials)
# Save the users potentially updated oauth credentials at a regular
# interval to prevent them from being expired after a HA reboot.
track_time_interval(hass, save_credentials, timedelta(minutes=60))
def force_update(call):
"""Force all devices to poll the Wink API."""
_LOGGER.info("Refreshing Wink states from API")
for entity_list in hass.data[DOMAIN]["entities"].values():
# Throttle the calls to Wink API
for entity in entity_list:
time.sleep(1)
entity.schedule_update_ha_state(True)
hass.services.register(DOMAIN, SERVICE_REFRESH_STATES, force_update)
def pull_new_devices(call):
"""Pull new devices added to users Wink account since startup."""
_LOGGER.info("Getting new devices from Wink API")
for _component in WINK_COMPONENTS:
discovery.load_platform(hass, _component, DOMAIN, {}, config)
hass.services.register(DOMAIN, SERVICE_ADD_NEW_DEVICES, pull_new_devices)
def set_pairing_mode(call):
"""Put the hub in provided pairing mode."""
hub_name = call.data.get("hub_name")
pairing_mode = call.data.get("pairing_mode")
kidde_code = call.data.get("kidde_radio_code")
for hub in WINK_HUBS:
if hub.name() == hub_name:
hub.pair_new_device(pairing_mode, kidde_radio_code=kidde_code)
def rename_device(call):
"""Set specified device's name."""
# This should only be called on one device at a time.
found_device = None
entity_id = call.data.get("entity_id")[0]
all_devices = []
for list_of_devices in hass.data[DOMAIN]["entities"].values():
all_devices += list_of_devices
for device in all_devices:
if device.entity_id == entity_id:
found_device = device
if found_device is not None:
name = call.data.get("name")
found_device.wink.set_name(name)
hass.services.register(
DOMAIN, SERVICE_RENAME_DEVICE, rename_device, schema=RENAME_DEVICE_SCHEMA
)
def delete_device(call):
"""Delete specified device."""
# This should only be called on one device at a time.
found_device = None
entity_id = call.data.get("entity_id")[0]
all_devices = []
for list_of_devices in hass.data[DOMAIN]["entities"].values():
all_devices += list_of_devices
for device in all_devices:
if device.entity_id == entity_id:
found_device = device
if found_device is not None:
found_device.wink.remove_device()
hass.services.register(
DOMAIN, SERVICE_DELETE_DEVICE, delete_device, schema=DELETE_DEVICE_SCHEMA
)
hubs = pywink.get_hubs()
for hub in hubs:
if hub.device_manufacturer() == "wink":
WINK_HUBS.append(hub)
if WINK_HUBS:
hass.services.register(
DOMAIN,
SERVICE_SET_PAIRING_MODE,
set_pairing_mode,
schema=SET_PAIRING_MODE_SCHEMA,
)
def nimbus_service_handle(service):
"""Handle nimbus services."""
entity_id = service.data.get("entity_id")[0]
_all_dials = []
for sensor in hass.data[DOMAIN]["entities"]["sensor"]:
if isinstance(sensor, WinkNimbusDialDevice):
_all_dials.append(sensor)
for _dial in _all_dials:
if _dial.entity_id == entity_id:
if service.service == SERVICE_SET_DIAL_CONFIG:
_dial.set_configuration(**service.data)
if service.service == SERVICE_SET_DIAL_STATE:
_dial.wink.set_state(
service.data.get("value"), service.data.get("labels")
)
def siren_service_handle(service):
"""Handle siren services."""
entity_ids = service.data.get("entity_id")
all_sirens = []
for switch in hass.data[DOMAIN]["entities"]["switch"]:
if isinstance(switch, WinkSirenDevice):
all_sirens.append(switch)
sirens_to_set = []
if entity_ids is None:
sirens_to_set = all_sirens
else:
for siren in all_sirens:
if siren.entity_id in entity_ids:
sirens_to_set.append(siren)
for siren in sirens_to_set:
_man = siren.wink.device_manufacturer()
if (
service.service != SERVICE_SET_AUTO_SHUTOFF
and service.service != SERVICE_ENABLE_SIREN
and _man not in ("dome", "wink")
):
_LOGGER.error("Service only valid for Dome or Wink sirens")
return
if service.service == SERVICE_ENABLE_SIREN:
siren.wink.set_state(service.data.get(ATTR_ENABLED))
elif service.service == SERVICE_SET_AUTO_SHUTOFF:
siren.wink.set_auto_shutoff(service.data.get(ATTR_AUTO_SHUTOFF))
elif service.service == SERVICE_SET_CHIME_VOLUME:
siren.wink.set_chime_volume(service.data.get(ATTR_VOLUME))
elif service.service == SERVICE_SET_SIREN_VOLUME:
siren.wink.set_siren_volume(service.data.get(ATTR_VOLUME))
elif service.service == SERVICE_SET_SIREN_TONE:
siren.wink.set_siren_sound(service.data.get(ATTR_TONE))
elif service.service == SERVICE_ENABLE_CHIME:
siren.wink.set_chime(service.data.get(ATTR_TONE))
elif service.service == SERVICE_SIREN_STROBE_ENABLED:
siren.wink.set_siren_strobe_enabled(service.data.get(ATTR_ENABLED))
elif service.service == SERVICE_CHIME_STROBE_ENABLED:
siren.wink.set_chime_strobe_enabled(service.data.get(ATTR_ENABLED))
# Load components for the devices in Wink that we support
for wink_component in WINK_COMPONENTS:
hass.data[DOMAIN]["entities"][wink_component] = []
discovery.load_platform(hass, wink_component, DOMAIN, {}, config)
component = EntityComponent(_LOGGER, DOMAIN, hass)
sirens = []
has_dome_or_wink_siren = False
for siren in pywink.get_sirens():
_man = siren.device_manufacturer()
if _man in ("dome", "wink"):
has_dome_or_wink_siren = True
_id = siren.object_id() + siren.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
sirens.append(WinkSirenDevice(siren, hass))
if sirens:
hass.services.register(
DOMAIN,
SERVICE_SET_AUTO_SHUTOFF,
siren_service_handle,
schema=SET_AUTO_SHUTOFF_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_ENABLE_SIREN,
siren_service_handle,
schema=ENABLED_SIREN_SCHEMA,
)
if has_dome_or_wink_siren:
hass.services.register(
DOMAIN,
SERVICE_SET_SIREN_TONE,
siren_service_handle,
schema=SET_SIREN_TONE_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_ENABLE_CHIME,
siren_service_handle,
schema=SET_CHIME_MODE_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_SET_SIREN_VOLUME,
siren_service_handle,
schema=SET_VOLUME_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_SET_CHIME_VOLUME,
siren_service_handle,
schema=SET_VOLUME_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_SIREN_STROBE_ENABLED,
siren_service_handle,
schema=SET_STROBE_ENABLED_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_CHIME_STROBE_ENABLED,
siren_service_handle,
schema=SET_STROBE_ENABLED_SCHEMA,
)
component.add_entities(sirens)
nimbi = []
dials = {}
all_nimbi = pywink.get_cloud_clocks()
all_dials = []
for nimbus in all_nimbi:
if nimbus.object_type() == "cloud_clock":
nimbi.append(nimbus)
dials[nimbus.object_id()] = []
for nimbus in all_nimbi:
if nimbus.object_type() == "dial":
dials[nimbus.parent_id()].append(nimbus)
for nimbus in nimbi:
for dial in dials[nimbus.object_id()]:
all_dials.append(WinkNimbusDialDevice(nimbus, dial, hass))
if nimbi:
hass.services.register(
DOMAIN,
SERVICE_SET_DIAL_CONFIG,
nimbus_service_handle,
schema=DIAL_CONFIG_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_SET_DIAL_STATE,
nimbus_service_handle,
schema=DIAL_STATE_SCHEMA,
)
component.add_entities(all_dials)
return True
class WinkAuthCallbackView(HomeAssistantView):
"""Handle OAuth finish callback requests."""
url = "/auth/wink/callback"
name = "auth:wink:callback"
requires_auth = False
def __init__(self, config, config_file, request_token):
"""Initialize the OAuth callback view."""
self.config = config
self.config_file = config_file
self.request_token = request_token
@callback
def get(self, request):
"""Finish OAuth callback request."""
hass = request.app["hass"]
data = request.query
response_message = """Wink has been successfully authorized!
You can close this window now! For the best results you should reboot
Home Assistant"""
html_response = """<html><head><title>Wink Auth</title></head>
<body><h1>{}</h1></body></html>"""
if data.get("code") is not None:
response = self.request_token(
data.get("code"), self.config_file[CONF_CLIENT_SECRET]
)
config_contents = {
ATTR_ACCESS_TOKEN: response["access_token"],
ATTR_REFRESH_TOKEN: response["refresh_token"],
CONF_CLIENT_ID: self.config_file[CONF_CLIENT_ID],
CONF_CLIENT_SECRET: self.config_file[CONF_CLIENT_SECRET],
}
save_json(hass.config.path(WINK_CONFIG_FILE), config_contents)
hass.async_add_job(setup, hass, self.config)
return Response(
text=html_response.format(response_message), content_type="text/html"
)
error_msg = "No code returned from Wink API"
_LOGGER.error(error_msg)
return Response(text=html_response.format(error_msg), content_type="text/html")
class WinkDevice(Entity):
"""Representation a base Wink device."""
def __init__(self, wink, hass):
"""Initialize the Wink device."""
self.hass = hass
self.wink = wink
hass.data[DOMAIN]["pubnub"].add_subscription(
self.wink.pubnub_channel, self._pubnub_update
)
hass.data[DOMAIN]["unique_ids"].append(self.wink.object_id() + self.wink.name())
def _pubnub_update(self, message):
_LOGGER.debug(message)
try:
if message is None:
_LOGGER.error(
"Error on pubnub update for %s polling API for current state",
self.name,
)
self.schedule_update_ha_state(True)
else:
self.wink.pubnub_update(message)
self.schedule_update_ha_state()
except (ValueError, KeyError, AttributeError):
_LOGGER.error(
"Error in pubnub JSON for %s polling API for current state", self.name
)
self.schedule_update_ha_state(True)
@property
def name(self):
"""Return the name of the device."""
return self.wink.name()
@property
def unique_id(self):
"""Return the unique id of the Wink device."""
if hasattr(self.wink, "capability") and self.wink.capability() is not None:
return f"{self.wink.object_id()}_{self.wink.capability()}"
return self.wink.object_id()
@property
def available(self):
"""Return true if connection == True."""
return self.wink.available()
def update(self):
"""Update state of the device."""
self.wink.update_state()
@property
def should_poll(self):
"""Only poll if we are not subscribed to pubnub."""
return self.wink.pubnub_channel is None
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attributes = {}
battery = self._battery_level
if battery:
attributes[ATTR_BATTERY_LEVEL] = battery
man_dev_model = self._manufacturer_device_model
if man_dev_model:
attributes["manufacturer_device_model"] = man_dev_model
man_dev_id = self._manufacturer_device_id
if man_dev_id:
attributes["manufacturer_device_id"] = man_dev_id
dev_man = self._device_manufacturer
if dev_man:
attributes["device_manufacturer"] = dev_man
model_name = self._model_name
if model_name:
attributes["model_name"] = model_name
tamper = self._tamper
if tamper is not None:
attributes["tamper_detected"] = tamper
return attributes
@property
def _battery_level(self):
"""Return the battery level."""
if self.wink.battery_level() is not None:
return self.wink.battery_level() * 100
@property
def _manufacturer_device_model(self):
"""Return the manufacturer device model."""
return self.wink.manufacturer_device_model()
@property
def _manufacturer_device_id(self):
"""Return the manufacturer device id."""
return self.wink.manufacturer_device_id()
@property
def _device_manufacturer(self):
"""Return the device manufacturer."""
return self.wink.device_manufacturer()
@property
def _model_name(self):
"""Return the model name."""
return self.wink.model_name()
@property
def _tamper(self):
"""Return the devices tamper status."""
if hasattr(self.wink, "tamper_detected"):
return self.wink.tamper_detected()
return None
class WinkSirenDevice(WinkDevice):
"""Representation of a Wink siren device."""
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]["entities"]["switch"].append(self)
@property
def state(self):
"""Return sirens state."""
if self.wink.state():
return STATE_ON
return STATE_OFF
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return "mdi:bell-ring"
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
attributes = super().extra_state_attributes
auto_shutoff = self.wink.auto_shutoff()
if auto_shutoff is not None:
attributes["auto_shutoff"] = auto_shutoff
siren_volume = self.wink.siren_volume()
if siren_volume is not None:
attributes["siren_volume"] = siren_volume
chime_volume = self.wink.chime_volume()
if chime_volume is not None:
attributes["chime_volume"] = chime_volume
strobe_enabled = self.wink.strobe_enabled()
if strobe_enabled is not None:
attributes["siren_strobe_enabled"] = strobe_enabled
chime_strobe_enabled = self.wink.chime_strobe_enabled()
if chime_strobe_enabled is not None:
attributes["chime_strobe_enabled"] = chime_strobe_enabled
siren_sound = self.wink.siren_sound()
if siren_sound is not None:
attributes["siren_sound"] = siren_sound
chime_mode = self.wink.chime_mode()
if chime_mode is not None:
attributes["chime_mode"] = chime_mode
return attributes
class WinkNimbusDialDevice(WinkDevice):
"""Representation of the Quirky Nimbus device."""
def __init__(self, nimbus, dial, hass):
"""Initialize the Nimbus dial."""
super().__init__(dial, hass)
self.parent = nimbus
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]["entities"]["sensor"].append(self)
@property
def state(self):
"""Return dials current value."""
return self.wink.state()
@property
def name(self):
"""Return the name of the device."""
return f"{self.parent.name()} dial {self.wink.index() + 1}"
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
attributes = super().extra_state_attributes
dial_attributes = self.dial_attributes()
return {**attributes, **dial_attributes}
def dial_attributes(self):
"""Return the dial only attributes."""
return {
"labels": self.wink.labels(),
"position": self.wink.position(),
"rotation": self.wink.rotation(),
"max_value": self.wink.max_value(),
"min_value": self.wink.min_value(),
"num_ticks": self.wink.ticks(),
"scale_type": self.wink.scale(),
"max_position": self.wink.max_position(),
"min_position": self.wink.min_position(),
}
def set_configuration(self, **kwargs):
"""
Set the dial config.
Anything not sent will default to current setting.
"""
attributes = {**self.dial_attributes(), **kwargs}
min_value = attributes["min_value"]
max_value = attributes["max_value"]
rotation = attributes["rotation"]
ticks = attributes["num_ticks"]
scale = attributes["scale_type"]
min_position = attributes["min_position"]
max_position = attributes["max_position"]
self.wink.set_configuration(
min_value,
max_value,
rotation,
scale=scale,
ticks=ticks,
min_position=min_position,
max_position=max_position,
)
|
mit
|
darach/cassandra
|
pylib/cqlshlib/pylexotron.py
|
38
|
18820
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from .saferscanner import SaferScanner
class LexingError(Exception):
@classmethod
def from_text(cls, rulestr, unmatched, msg='Lexing error'):
bad_char = len(rulestr) - len(unmatched)
linenum = rulestr[:bad_char].count('\n') + 1
charnum = len(rulestr[:bad_char].rsplit('\n', 1)[-1]) + 1
snippet_start = max(0, min(len(rulestr), bad_char - 10))
snippet_end = max(0, min(len(rulestr), bad_char + 10))
msg += " (Error at: '...%s...')" % (rulestr[snippet_start:snippet_end],)
raise cls(linenum, charnum, msg)
def __init__(self, linenum, charnum, msg='Lexing error'):
self.linenum = linenum
self.charnum = charnum
self.msg = msg
self.args = (linenum, charnum, msg)
def __str__(self):
return '%s at line %d, char %d' % (self.msg, self.linenum, self.charnum)
class Hint:
def __init__(self, text):
self.text = text
def __hash__(self):
return hash((id(self.__class__), self.text))
def __eq__(self, other):
return isinstance(other, self.__class__) and other.text == self.text
def __repr__(self):
return '%s(%r)' % (self.__class__, self.text)
def is_hint(x):
return isinstance(x, Hint)
class ParseContext:
"""
These are meant to be immutable, although it would be something of a
pain to enforce that in python.
"""
def __init__(self, ruleset, bindings, matched, remainder, productionname):
self.ruleset = ruleset
self.bindings = bindings
self.matched = matched
self.remainder = remainder
self.productionname = productionname
def get_production_by_name(self, name):
return self.ruleset[name]
def get_completer(self, symname):
return self.ruleset[(self.productionname, symname)]
def get_binding(self, name, default=None):
return self.bindings.get(name, default)
def with_binding(self, name, val):
newbinds = self.bindings.copy()
newbinds[name] = val
return self.__class__(self.ruleset, newbinds, self.matched,
self.remainder, self.productionname)
def with_match(self, num):
return self.__class__(self.ruleset, self.bindings,
self.matched + self.remainder[:num],
self.remainder[num:], self.productionname)
def with_production_named(self, newname):
return self.__class__(self.ruleset, self.bindings, self.matched,
self.remainder, newname)
def extract_orig(self, tokens=None):
if tokens is None:
tokens = self.matched
if not tokens:
return ''
orig = self.bindings.get('*SRC*', None)
if orig is None:
# pretty much just guess
return ' '.join([t[1] for t in tokens])
# low end of span for first token, to high end of span for last token
orig_text = orig[tokens[0][2][0]:tokens[-1][2][1]]
# Convert all unicode tokens to ascii, where possible. This
# helps avoid problems with performing unicode-incompatible
# operations on tokens (like .lower()). See CASSANDRA-9083
# for one example of this.
try:
orig_text = orig_text.encode('ascii')
except UnicodeEncodeError:
pass
return orig_text
def __repr__(self):
return '<%s matched=%r remainder=%r prodname=%r bindings=%r>' \
% (self.__class__.__name__, self.matched, self.remainder, self.productionname, self.bindings)
class matcher:
def __init__(self, arg):
self.arg = arg
def match(self, ctxt, completions):
raise NotImplementedError
def match_with_results(self, ctxt, completions):
matched_before = len(ctxt.matched)
newctxts = self.match(ctxt, completions)
return [(newctxt, newctxt.matched[matched_before:]) for newctxt in newctxts]
@staticmethod
def try_registered_completion(ctxt, symname, completions):
debugging = ctxt.get_binding('*DEBUG*', False)
if ctxt.remainder or completions is None:
return False
try:
completer = ctxt.get_completer(symname)
except KeyError:
return False
if debugging:
print "Trying completer %r with %r" % (completer, ctxt)
try:
new_compls = completer(ctxt)
except Exception:
if debugging:
import traceback
traceback.print_exc()
return False
if debugging:
print "got %r" % (new_compls,)
completions.update(new_compls)
return True
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.arg)
class choice(matcher):
def match(self, ctxt, completions):
foundctxts = []
for a in self.arg:
subctxts = a.match(ctxt, completions)
foundctxts.extend(subctxts)
return foundctxts
class one_or_none(matcher):
def match(self, ctxt, completions):
return [ctxt] + list(self.arg.match(ctxt, completions))
class repeat(matcher):
def match(self, ctxt, completions):
found = [ctxt]
ctxts = [ctxt]
while True:
new_ctxts = []
for c in ctxts:
new_ctxts.extend(self.arg.match(c, completions))
if not new_ctxts:
return found
found.extend(new_ctxts)
ctxts = new_ctxts
class rule_reference(matcher):
def match(self, ctxt, completions):
prevname = ctxt.productionname
try:
rule = ctxt.get_production_by_name(self.arg)
except KeyError:
raise ValueError("Can't look up production rule named %r" % (self.arg,))
output = rule.match(ctxt.with_production_named(self.arg), completions)
return [c.with_production_named(prevname) for c in output]
class rule_series(matcher):
def match(self, ctxt, completions):
ctxts = [ctxt]
for patpiece in self.arg:
new_ctxts = []
for c in ctxts:
new_ctxts.extend(patpiece.match(c, completions))
if not new_ctxts:
return ()
ctxts = new_ctxts
return ctxts
class named_symbol(matcher):
def __init__(self, name, arg):
matcher.__init__(self, arg)
self.name = name
def match(self, ctxt, completions):
pass_in_compls = completions
if self.try_registered_completion(ctxt, self.name, completions):
# don't collect other completions under this; use a dummy
pass_in_compls = set()
results = self.arg.match_with_results(ctxt, pass_in_compls)
return [c.with_binding(self.name, ctxt.extract_orig(matchtoks)) for (c, matchtoks) in results]
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.arg)
class named_collector(named_symbol):
def match(self, ctxt, completions):
pass_in_compls = completions
if self.try_registered_completion(ctxt, self.name, completions):
# don't collect other completions under this; use a dummy
pass_in_compls = set()
output = []
for ctxt, matchtoks in self.arg.match_with_results(ctxt, pass_in_compls):
oldval = ctxt.get_binding(self.name, ())
output.append(ctxt.with_binding(self.name, oldval + (ctxt.extract_orig(matchtoks),)))
return output
class terminal_matcher(matcher):
def pattern(self):
raise NotImplementedError
class regex_rule(terminal_matcher):
def __init__(self, pat):
terminal_matcher.__init__(self, pat)
self.regex = pat
self.re = re.compile(pat + '$', re.I | re.S)
def match(self, ctxt, completions):
if ctxt.remainder:
if self.re.match(ctxt.remainder[0][1]):
return [ctxt.with_match(1)]
elif completions is not None:
completions.add(Hint('<%s>' % ctxt.productionname))
return []
def pattern(self):
return self.regex
class text_match(terminal_matcher):
alpha_re = re.compile(r'[a-zA-Z]')
def __init__(self, text):
try:
terminal_matcher.__init__(self, eval(text))
except SyntaxError:
print "bad syntax %r" % (text,)
def match(self, ctxt, completions):
if ctxt.remainder:
if self.arg.lower() == ctxt.remainder[0][1].lower():
return [ctxt.with_match(1)]
elif completions is not None:
completions.add(self.arg)
return []
def pattern(self):
# can't use (?i) here- Scanner component regex flags won't be applied
def ignorecaseify(matchobj):
c = matchobj.group(0)
return '[%s%s]' % (c.upper(), c.lower())
return self.alpha_re.sub(ignorecaseify, re.escape(self.arg))
class case_match(text_match):
def match(self, ctxt, completions):
if ctxt.remainder:
if self.arg == ctxt.remainder[0][1]:
return [ctxt.with_match(1)]
elif completions is not None:
completions.add(self.arg)
return []
def pattern(self):
return re.escape(self.arg)
class word_match(text_match):
def pattern(self):
return r'\b' + text_match.pattern(self) + r'\b'
class case_word_match(case_match):
def pattern(self):
return r'\b' + case_match.pattern(self) + r'\b'
class terminal_type_matcher(matcher):
def __init__(self, tokentype, submatcher):
matcher.__init__(self, tokentype)
self.tokentype = tokentype
self.submatcher = submatcher
def match(self, ctxt, completions):
if ctxt.remainder:
if ctxt.remainder[0][0] == self.tokentype:
return [ctxt.with_match(1)]
elif completions is not None:
self.submatcher.match(ctxt, completions)
return []
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.tokentype, self.submatcher)
class ParsingRuleSet:
RuleSpecScanner = SaferScanner([
(r'::=', lambda s,t: t),
(r'\[[a-z0-9_]+\]=', lambda s,t: ('named_collector', t[1:-2])),
(r'[a-z0-9_]+=', lambda s,t: ('named_symbol', t[:-1])),
(r'/(\[\^?.[^]]*\]|[^/]|\\.)*/', lambda s,t: ('regex', t[1:-1].replace(r'\/', '/'))),
(r'"([^"]|\\.)*"', lambda s,t: ('litstring', t)),
(r'<[^>]*>', lambda s,t: ('reference', t[1:-1])),
(r'\bJUNK\b', lambda s,t: ('junk', t)),
(r'[@()|?*;]', lambda s,t: t),
(r'\s+', None),
(r'#[^\n]*', None),
], re.I | re.S)
def __init__(self):
self.ruleset = {}
self.scanner = None
self.terminals = []
@classmethod
def from_rule_defs(cls, rule_defs):
prs = cls()
prs.ruleset, prs.terminals = cls.parse_rules(rule_defs)
return prs
@classmethod
def parse_rules(cls, rulestr):
tokens, unmatched = cls.RuleSpecScanner.scan(rulestr)
if unmatched:
raise LexingError.from_text(rulestr, unmatched, msg="Syntax rules unparseable")
rules = {}
terminals = []
tokeniter = iter(tokens)
for t in tokeniter:
if isinstance(t, tuple) and t[0] in ('reference', 'junk'):
assign = tokeniter.next()
if assign != '::=':
raise ValueError('Unexpected token %r; expected "::="' % (assign,))
name = t[1]
production = cls.read_rule_tokens_until(';', tokeniter)
if isinstance(production, terminal_matcher):
terminals.append((name, production))
production = terminal_type_matcher(name, production)
rules[name] = production
else:
raise ValueError('Unexpected token %r; expected name' % (t,))
return rules, terminals
@staticmethod
def mkrule(pieces):
if isinstance(pieces, (tuple, list)):
if len(pieces) == 1:
return pieces[0]
return rule_series(pieces)
return pieces
@classmethod
def read_rule_tokens_until(cls, endtoks, tokeniter):
if isinstance(endtoks, basestring):
endtoks = (endtoks,)
counttarget = None
if isinstance(endtoks, int):
counttarget = endtoks
endtoks = ()
countsofar = 0
myrules = []
mybranches = [myrules]
for t in tokeniter:
countsofar += 1
if t in endtoks:
if len(mybranches) == 1:
return cls.mkrule(mybranches[0])
return choice(map(cls.mkrule, mybranches))
if isinstance(t, tuple):
if t[0] == 'reference':
t = rule_reference(t[1])
elif t[0] == 'litstring':
if t[1][1].isalnum() or t[1][1] == '_':
t = word_match(t[1])
else:
t = text_match(t[1])
elif t[0] == 'regex':
t = regex_rule(t[1])
elif t[0] == 'named_collector':
t = named_collector(t[1], cls.read_rule_tokens_until(1, tokeniter))
elif t[0] == 'named_symbol':
t = named_symbol(t[1], cls.read_rule_tokens_until(1, tokeniter))
elif t == '(':
t = cls.read_rule_tokens_until(')', tokeniter)
elif t == '?':
t = one_or_none(myrules.pop(-1))
elif t == '*':
t = repeat(myrules.pop(-1))
elif t == '@':
x = tokeniter.next()
if not isinstance(x, tuple) or x[0] != 'litstring':
raise ValueError("Unexpected token %r following '@'" % (x,))
t = case_match(x[1])
elif t == '|':
myrules = []
mybranches.append(myrules)
continue
else:
raise ValueError('Unparseable rule token %r after %r' % (t, myrules[-1]))
myrules.append(t)
if countsofar == counttarget:
if len(mybranches) == 1:
return cls.mkrule(mybranches[0])
return choice(map(cls.mkrule, mybranches))
raise ValueError('Unexpected end of rule tokens')
def append_rules(self, rulestr):
rules, terminals = self.parse_rules(rulestr)
self.ruleset.update(rules)
self.terminals.extend(terminals)
if terminals:
self.scanner = None # recreate it if/when necessary
def register_completer(self, func, rulename, symname):
self.ruleset[(rulename, symname)] = func
def make_lexer(self):
def make_handler(name):
if name == 'JUNK':
return None
return lambda s, t: (name, t, s.match.span())
regexes = [(p.pattern(), make_handler(name)) for (name, p) in self.terminals]
return SaferScanner(regexes, re.I | re.S).scan
def lex(self, text):
if self.scanner is None:
self.scanner = self.make_lexer()
tokens, unmatched = self.scanner(text)
if unmatched:
raise LexingError.from_text(text, unmatched, 'text could not be lexed')
return tokens
def parse(self, startsymbol, tokens, init_bindings=None):
if init_bindings is None:
init_bindings = {}
ctxt = ParseContext(self.ruleset, init_bindings, (), tuple(tokens), startsymbol)
pattern = self.ruleset[startsymbol]
return pattern.match(ctxt, None)
def whole_match(self, startsymbol, tokens, srcstr=None):
bindings = {}
if srcstr is not None:
bindings['*SRC*'] = srcstr
for c in self.parse(startsymbol, tokens, init_bindings=bindings):
if not c.remainder:
return c
def lex_and_parse(self, text, startsymbol='Start'):
return self.parse(startsymbol, self.lex(text), init_bindings={'*SRC*': text})
def lex_and_whole_match(self, text, startsymbol='Start'):
tokens = self.lex(text)
return self.whole_match(startsymbol, tokens, srcstr=text)
def complete(self, startsymbol, tokens, init_bindings=None):
if init_bindings is None:
init_bindings = {}
ctxt = ParseContext(self.ruleset, init_bindings, (), tuple(tokens), startsymbol)
pattern = self.ruleset[startsymbol]
if init_bindings.get('*DEBUG*', False):
completions = Debugotron(stream=sys.stderr)
else:
completions = set()
pattern.match(ctxt, completions)
return completions
import sys, traceback
class Debugotron(set):
depth = 10
def __init__(self, initializer=(), stream=sys.stdout):
set.__init__(self, initializer)
self.stream = stream
def add(self, item):
self._note_addition(item)
set.add(self, item)
def _note_addition(self, foo):
self.stream.write("\nitem %r added by:\n" % (foo,))
frame = sys._getframe().f_back.f_back
for i in range(self.depth):
name = frame.f_code.co_name
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if 'self' in frame.f_locals:
clsobj = frame.f_locals['self']
line = '%s.%s() (%s:%d)' % (clsobj, name, filename, lineno)
else:
line = '%s (%s:%d)' % (name, filename, lineno)
self.stream.write(' - %s\n' % (line,))
if i == 0 and 'ctxt' in frame.f_locals:
self.stream.write(' - %s\n' % (frame.f_locals['ctxt'],))
frame = frame.f_back
def update(self, items):
if items:
self._note_addition(items)
set.update(self, items)
|
apache-2.0
|
TalShafir/ansible
|
lib/ansible/module_utils/ldap.py
|
78
|
2327
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Peter Sagerson <psagers@ignorare.net>
# Copyright: (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
# Copyright: (c) 2017-2018 Keller Fuchs (@KellerFuchs) <kellerfuchs@hashbang.sh>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import traceback
from ansible.module_utils._text import to_native
try:
import ldap
import ldap.sasl
HAS_LDAP = True
except ImportError:
HAS_LDAP = False
def gen_specs(**specs):
specs.update({
'bind_dn': dict(),
'bind_pw': dict(default='', no_log=True),
'dn': dict(required=True),
'server_uri': dict(default='ldapi:///'),
'start_tls': dict(default=False, type='bool'),
'validate_certs': dict(default=True, type='bool'),
})
return specs
class LdapGeneric(object):
def __init__(self, module):
# Shortcuts
self.module = module
self.bind_dn = self.module.params['bind_dn']
self.bind_pw = self.module.params['bind_pw']
self.dn = self.module.params['dn']
self.server_uri = self.module.params['server_uri']
self.start_tls = self.module.params['start_tls']
self.verify_cert = self.module.params['validate_certs']
# Establish connection
self.connection = self._connect_to_ldap()
def fail(self, msg, exn):
self.module.fail_json(
msg=msg,
details=to_native(exn),
exception=traceback.format_exc()
)
def _connect_to_ldap(self):
if not self.verify_cert:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
connection = ldap.initialize(self.server_uri)
if self.start_tls:
try:
connection.start_tls_s()
except ldap.LDAPError as e:
self.fail("Cannot start TLS.", e)
try:
if self.bind_dn is not None:
connection.simple_bind_s(self.bind_dn, self.bind_pw)
else:
connection.sasl_interactive_bind_s('', ldap.sasl.external())
except ldap.LDAPError as e:
self.fail("Cannot bind to the server.", e)
return connection
|
gpl-3.0
|
rosudrag/Freemium-winner
|
VirtualEnvironment/Lib/site-packages/flask/testsuite/reqctx.py
|
1
|
5967
|
# -*- coding: utf-8 -*-
"""
theflasktest.testsuite.reqctx
~~~~~~~~~~~~~~~~~~~~~~
Tests the request context.
:copyright: (c) 2012 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
try:
from greenlet import greenlet
except ImportError:
greenlet = None
from flask.testsuite import FlaskTestCase
class RequestContextTestCase(FlaskTestCase):
def test_teardown_on_pop(self):
buffer = []
app = flask.Flask(__name__)
@app.teardown_request
def end_of_request(exception):
buffer.append(exception)
ctx = app.test_request_context()
ctx.push()
self.assert_equal(buffer, [])
ctx.pop()
self.assert_equal(buffer, [None])
def test_proper_test_request_context(self):
app = flask.Flask(__name__)
app.config.update(
SERVER_NAME='localhost.localdomain:5000'
)
@app.route('/')
def index():
return None
@app.route('/', subdomain='foo')
def sub():
return None
with app.test_request_context('/'):
self.assert_equal(flask.url_for('index', _external=True), 'http://localhost.localdomain:5000/')
with app.test_request_context('/'):
self.assert_equal(flask.url_for('sub', _external=True), 'http://foo.localhost.localdomain:5000/')
try:
with app.test_request_context('/', environ_overrides={'HTTP_HOST': 'localhost'}):
pass
except Exception as e:
self.assert_true(isinstance(e, ValueError))
self.assert_equal(str(e), "the server name provided " +
"('localhost.localdomain:5000') does not match the " + \
"server name from the WSGI environment ('localhost')")
try:
app.config.update(SERVER_NAME='localhost')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost'}):
pass
except ValueError as e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
)
try:
app.config.update(SERVER_NAME='localhost:80')
with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost:80'}):
pass
except ValueError as e:
raise ValueError(
"No ValueError exception should have been raised \"%s\"" % e
)
def test_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'Hello %s!' % flask.request.args['name']
@app.route('/meh')
def meh():
return flask.request.url
with app.test_request_context('/?name=World'):
self.assert_equal(index(), 'Hello World!')
with app.test_request_context('/meh'):
self.assert_equal(meh(), 'http://localhost/meh')
self.assert_true(flask._request_ctx_stack.top is None)
def test_context_test(self):
app = flask.Flask(__name__)
self.assert_false(flask.request)
self.assert_false(flask.has_request_context())
ctx = app.test_request_context()
ctx.push()
try:
self.assert_true(flask.request)
self.assert_true(flask.has_request_context())
finally:
ctx.pop()
def test_manual_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'Hello %s!' % flask.request.args['name']
ctx = app.test_request_context('/?name=World')
ctx.push()
self.assert_equal(index(), 'Hello World!')
ctx.pop()
try:
index()
except RuntimeError:
pass
else:
self.assert_true(0, 'expected runtime error')
def test_greenlet_context_copying(self):
app = flask.Flask(__name__)
greenlets = []
@app.route('/')
def index():
reqctx = flask._request_ctx_stack.top.copy()
def g():
self.assert_false(flask.request)
self.assert_false(flask.current_app)
with reqctx:
self.assert_true(flask.request)
self.assert_equal(flask.current_app, app)
self.assert_equal(flask.request.path, '/')
self.assert_equal(flask.request.args['foo'], 'bar')
self.assert_false(flask.request)
return 42
greenlets.append(greenlet(g))
return 'Hello World!'
rv = app.test_client().get('/?foo=bar')
self.assert_equal(rv.data, b'Hello World!')
result = greenlets[0].run()
self.assert_equal(result, 42)
def test_greenlet_context_copying_api(self):
app = flask.Flask(__name__)
greenlets = []
@app.route('/')
def index():
reqctx = flask._request_ctx_stack.top.copy()
@flask.copy_current_request_context
def g():
self.assert_true(flask.request)
self.assert_equal(flask.current_app, app)
self.assert_equal(flask.request.path, '/')
self.assert_equal(flask.request.args['foo'], 'bar')
return 42
greenlets.append(greenlet(g))
return 'Hello World!'
rv = app.test_client().get('/?foo=bar')
self.assert_equal(rv.data, b'Hello World!')
result = greenlets[0].run()
self.assert_equal(result, 42)
# Disable test if we don't have greenlets available
if greenlet is None:
test_greenlet_context_copying = None
test_greenlet_context_copying_api = None
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RequestContextTestCase))
return suite
|
mit
|
Mvrcu/catch-the-money
|
final_project.py
|
1
|
11237
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Main.py
import os
import sys
import random
import pygame as pg
CAPTION = "Catch the falling funds with your wallet!"
SCREEN_SIZE = (850, 600)
TRANSPARENT = (0, 0, 0, 0)
# This global constant serves as a very useful convenience for me.
DIRECT_DICT = {pg.K_LEFT : (-2, 0),
pg.K_RIGHT : ( 2, 0),
pg.K_UP : ( 0,-2),
pg.K_DOWN : ( 0, 2)}
def load_images():
"""Loads all images in 'img' folder for use on call."""
def load_image(img_file):
"""
This function gathers all relevant images in the image folder of the
game.
"""
directory = os.path.dirname(os.path.abspath(__file__))
file_name = os.path.join(directory, 'img', img_file)
return pg.image.load(file_name).convert_alpha()
return {'money': load_image('money.png'),
'wallet': load_image('wallet.png'),
'bonus_card': load_image('bonus_card.png'),
'saw': load_image('saw.png')}
class Money(pg.sprite.Sprite):
def __init__(self, screen_rect, *groups):
"""
The pos argument is a tuple for the center of the player (x,y);
speed is given in pixels/frame.
"""
super(Money, self).__init__(*groups)
self.image = IMAGES["money"]
self.rect = self.image.get_rect()
self.reset(screen_rect)
def reset(self, screen_rect):
self.speed = random.randrange(3, 4)
self.rect.y = random.randrange(-300, -self.rect.h)
self.rect.x = random.randrange(0, screen_rect.w - self.rect.w)
def update(self, screen_rect, *args):
self.rect.y += self.speed
if self.rect.top > screen_rect.h:
self.reset(screen_rect)
class Saw(pg.sprite.Sprite):
def __init__(self, screen_rect, *groups):
"""
The pos argument is a tuple for the center of the player (x,y);
speed is given in pixels/frame.
"""
super(Saw, self).__init__(*groups)
self.image = IMAGES["saw"]
self.rect = self.image.get_rect()
self.reset(screen_rect)
def reset(self, screen_rect):
self.speed = random.randrange(2, 4)
self.rect.y = random.randrange(-300, -self.rect.h)
self.rect.x = random.randrange(0, screen_rect.w - self.rect.w)
def update(self, screen_rect, *args):
self.rect.y += self.speed
if self.rect.top > screen_rect.h:
self.reset(screen_rect)
class Bonus_Card(pg.sprite.Sprite):
def __init__(self, screen_rect, *groups):
"""
The pos argument is a tuple for the center of the bonus card (x,y);
speed is given in pixels/frame.
"""
super(Bonus_Card, self).__init__(*groups)
self.image = IMAGES["bonus_card"]
self.rect = self.image.get_rect()
self.reset(screen_rect)
def reset(self, screen_rect):
self.speed = random.randrange(4, 7)
self.rect.y = random.randrange(-300, -self.rect.h)
self.rect.x = random.randrange(0, screen_rect.w - self.rect.w)
def update(self, screen_rect, *args):
self.rect.y += self.speed
if self.rect.top > screen_rect.h:
self.reset(screen_rect)
class Player(pg.sprite.Sprite):
"""
This class will represent our user controlled character.
"""
def __init__(self, pos, speed, *groups):
"""
The pos argument is a tuple for the center of the player (x,y);
speed is given in pixels/frame.
"""
super(Player, self).__init__(*groups)
self.image = IMAGES["wallet"]
self.rect = self.image.get_rect(center=pos)
self.speed = speed
self.score = 0
def update(self, screen_rect, keys):
"""
Updates our player appropriately every frame.
"""
for key in DIRECT_DICT:
if keys[key]:
self.rect.x += DIRECT_DICT[key][0]*self.speed
self.rect.y += DIRECT_DICT[key][1]*self.speed
self.rect.clamp_ip(screen_rect)
class Application(object):
"""
A class to manage our event, game loop, and overall program flow.
"""
def __init__(self):
"""
Get a reference to the display surface; set up required attributes;
and create a Player instance.
"""
self.screen = pg.display.get_surface()
self.screen_rect = self.screen.get_rect()
self.clock = pg.time.Clock()
self.fps = 60
self.done = False
self.session_number = 0
self.keys = pg.key.get_pressed()
self.allsprites = pg.sprite.Group()
self.card_sprite = pg.sprite.Group()
self.money_sprite = pg.sprite.Group()
self.saw_sprite = pg.sprite.Group()
self.player = Player(self.screen_rect.center, 5, self.allsprites)
self.score_text = None
for _ in range(8):
Money(self.screen_rect, self.allsprites, self.money_sprite)
for _ in range(1):
Bonus_Card(self.screen_rect, self.allsprites, self.card_sprite)
for _ in range(3):
Saw(self.screen_rect, self.allsprites, self.saw_sprite)
joystick_count = pg.joystick.get_count()
print ("There is ", joystick_count, " joystick's.")
if joystick_count == 0:
print ("No joystick's where found.")
else:
my_joystick = pg.joystick.Joystick(0)
my_joystick.init()
def event_loop(self):
"""
One event loop. Never cut your game off from the event loop.
Your OS may decide your program has hung if the event queue is not
accessed for a prolonged period of time.
"""
for event in pg.event.get():
if event.type == pg.QUIT or self.keys[pg.K_ESCAPE]:
self.done = True
elif event.type in (pg.KEYUP, pg.KEYDOWN):
self.keys = pg.key.get_pressed()
elif event.type in (pg.JOYBUTTONDOWN, pg.JOYBUTTONUP):
h_axis_pos = my_joystick.get_axis(0)
v_axis_pos = my_joystick.get_axis(1)
print (h_axis_pos, v_axis_pos)
self.rect.x = int(x + h_axis_pos * 5)
self.rect.y = int(y + v_axis_pos * 5)
if self.keys[pg.K_SPACE]:
print('User let go of the space bar key')
if not self.done:
self.session_number += 1
self.done = False
print("Not done!")
elif self.keys[pg.K_l]:
print('Lose Game!')
self.done = False
def render(self):
"""
Perform all necessary drawing and update the screen.
"""
self.screen.fill(pg.Color(51, 153, 255))
if not self.done and self.session_number >= 1:
if self.session_number >= 1:
self.allsprites.draw(self.screen)
self.screen.blit(self.score_text, (5, 5))
else:
self.title_screen()
if self.player.score <= -10 and self.session_number == 0:
self.game_over_screen()
print(self.session_number)
pg.display.update()
def update(self):
saw_hits = pg.sprite.spritecollide(self.player, self.saw_sprite, False)
money_hits = pg.sprite.spritecollide(self.player, self.money_sprite, False)
card_hits = pg.sprite.spritecollide(self.player, self.card_sprite, False)
for hit in saw_hits:
hit.reset(self.screen_rect)
self.player.score -= 5
for hit in money_hits:
hit.reset(self.screen_rect)
self.player.score += 1
for hit in card_hits:
hit.reset(self.screen_rect)
self.player.score += 3
self.update_score()
self.allsprites.update(self.screen_rect, self.keys)
def game_over_screen(self):
game_over = pg.font.SysFont('serif', 25)
click_enter = pg.font.SysFont('serif', 15)
main_text = game_over.render('Game Over', True, pg.Color("black"))
sub_text = \
click_enter.render('(Click the space bar to play again)',
True, BLACK)
center_x = SCREEN_SIZE[0] // 2 - main_text.get_width() // 2
center_y = SCREEN_SIZE[1] // 2 - main_text.get_height() // 2
screen.blit(main_text, [center_x, center_y])
center_x = SCREEN_SIZE[0] // 2 - sub_text.get_width() // 2
center_y = SCREEN_SIZE[1] // 2 - (sub_text.get_height() // 2
- 20)
self.screen.blit(sub_text, [center_x, center_y])
def title_screen(self):
# First drawn screen the user is prompted by
new_begin = pg.font.SysFont('serif', 35)
new_begin_sub = pg.font.SysFont('serif', 15)
begin_text = new_begin.render('Press the space bar to play',
True, pg.Color("black"))
begin_text_sub = new_begin_sub.render('(Use arrow keys or controller to interact)',
True, pg.Color("black"))
center_x = SCREEN_SIZE[0] // 2 - begin_text.get_width() // 2
center_y = SCREEN_SIZE[1] // 2 - begin_text.get_height() // 2
center_x_sub = SCREEN_SIZE[0] // 2 - begin_text_sub.get_width() // 2
center_y_sub = SCREEN_SIZE[1] // 2 - begin_text_sub.get_height() // 2 + 35
self.screen.blit(begin_text, [center_x, center_y])
self.screen.blit(begin_text_sub, [center_x_sub, center_y_sub])
def update_score(self):
score_raw = "Score: {}".format(self.player.score)
if self.player.score <= 0:
self.score_text = FONT.render(score_raw, True, pg.Color("red"))
elif self.player.score >= 1 and self.player.score <= 9:
self.score_text = FONT.render(score_raw, True, pg.Color(255,165,0))
elif self.player.score >= 10:
self.score_text = FONT.render(score_raw, True, pg.Color("green"))
else:
self.score_text = FONT.render(score_raw, True, pg.Color("black"))
def main_loop(self):
"""
One game loop. Simple and clean.
"""
while not self.done:
self.event_loop()
self.update()
self.render()
self.clock.tick(self.fps)
def main():
"""
Prepare our environment, create a display, and start the program.
"""
global IMAGES, FONT
os.environ['SDL_VIDEO_CENTERED'] = '1'
pg.init()
pg.joystick.init()
pg.display.set_caption(CAPTION)
pg.display.set_mode(SCREEN_SIZE, pg.NOFRAME)
IMAGES = load_images()
FONT = pg.font.SysFont('Calibri', 25, True, False)
JOYSTICKS = [pg.joystick.Joystick(x) for x in range(pg.joystick.get_count())]
Application().main_loop()
pg.quit()
pg.joystick.quit()
sys.exit()
if __name__ == "__main__":
main()
|
gpl-3.0
|
sensepost/Snoopy
|
snoopy/server/transforms/fetchTweetsByLocation.py
|
4
|
2424
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# glenn@sensepost.com
# Snoopy // 2012
# By using this code you agree to abide by the supplied LICENSE.txt
from Maltego import *
import logging
import requests
import json
import stawk_db
import re
logging.basicConfig(level=logging.DEBUG,filename='/tmp/maltego_logs.txt',format='%(asctime)s %(levelname)s: %(message)s',datefmt='%Y-%m-%d %H:%M:%S')
sys.stderr = sys.stdout
def main():
print "Content-type: xml\n\n";
MaltegoXML_in = sys.stdin.read()
if MaltegoXML_in <> '':
m = MaltegoMsg(MaltegoXML_in)
cursor=stawk_db.dbconnect()
TRX = MaltegoTransform()
try:
logging.debug("Here we go")
for item in m.TransformSettings.keys():
logging.debug("N:"+item+" V:"+m.TransformSettings[item])
# logging.debug(MaltegoXML_in)
radius="5" #miles
lat=m.AdditionalFields['lat']
lng=m.AdditionalFields['long']
if 'radius' in m.AdditionalFields:
radius=m.AdditionalFields
logging.debug("Tweep cords to search - %s,%s (%s miles)" %(lat,lng,radius))
r=requests.get("https://search.twitter.com/search.json?q=geocode:%s,%s,%smi"%(lat,lng,radius))
tw=json.loads(r.text)
logging.debug("Tweep results - %d"%len(tw['results']))
for tweep in tw['results']:
name=tweep['from_user_name'].encode('utf8','xmlcharrefreplace')
username=tweep['from_user'].encode('utf8','xmlcharrefreplace')
uid=tweep['from_user_id_str'].encode('utf8','xmlcharrefreplace')
recent_tweet=tweep['text'].encode('utf8','xmlcharrefreplace')
img=tweep['profile_image_url'].encode('utf8','xmlcharrefreplace')
profile_page="http://twitter.com/%s"%username
largephoto=re.sub('_normal','',img)
NewEnt=TRX.addEntity("maltego.affiliation.Twitter", name)
NewEnt.addAdditionalFields("uid","UID","strict",uid)
NewEnt.addAdditionalFields("affiliation.profile-url","Profile URL","strict",profile_page)
NewEnt.addAdditionalFields("twitter.screen-name","Screen Name","strict",username)
NewEnt.addAdditionalFields("person.fullname","Real Name","strict",name)
NewEnt.addAdditionalFields("photo","Photo","nostrict",largephoto)
NewEnt.addAdditionalFields("tweet","Recent Tweet","nostrict",recent_tweet)
NewEnt.setIconURL(img)
except Exception, e:
logging.debug("Exception:")
logging.debug(e)
TRX.returnOutput()
main()
|
mit
|
bbqlinux/pyparted
|
src/parted/alignment.py
|
2
|
5275
|
#
# geometry.py
# Python bindings for libparted (built on top of the _ped Python module).
#
# Copyright (C) 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
# David Cantrell <dcantrell@redhat.com>
#
import parted
import _ped
from decorators import localeC
class Alignment(object):
"""Alignment()
An Alignment object describes constraints on how sectors and Geometry
objects are aligned. Being aligned means that the sector be located at
a specific sector multiple on a device, or that a geometry must start
and end at sectors at those specific multiples. Most methods on this
object raise ArithmeticError if calculating alignments fails."""
@localeC
def __init__(self, *args, **kwargs):
"""Create a new Alignment object from the sectors offset and
grainSize."""
if kwargs.has_key("PedAlignment"):
self.__alignment = kwargs.get("PedAlignment")
elif kwargs.has_key("offset") and kwargs.has_key("grainSize"):
self.__alignment = _ped.Alignment(kwargs.get("offset"),
kwargs.get("grainSize"))
else:
raise parted.AlignmentException, "no offset+grainSize or PedAlignment specified"
offset = property(lambda s: s.__alignment.offset, lambda s, v: setattr(s.__alignment, "offset", v))
grainSize = property(lambda s: s.__alignment.grain_size, lambda s, v: setattr(s.__alignment, "grain_size", v))
def __eq__(self, other):
return not self.__ne__(other)
def __ne__(self, other):
if hash(self) == hash(other):
return False
if type(self) != type(other):
return True
return self.offset != other.offset or self.grainSize != other.grainSize
def __str__(self):
s = ("parted.Alignment instance --\n"
" offset: %(offset)s grainSize: %(grainSize)s\n"
" PedAlignment: %(ped)r" %
{"offset": self.offset, "grainSize": self.grainSize,
"ped": self.__alignment})
return s
@localeC
def intersect(self, b):
"""Create and return a new Alignment that describes the intersection of
self and alignment b. A sector will satisfy the new alignment iff
it satisfies both of the original alignments. Whether a sector
satisfies a given alignment is determined by is_aligned()."""
return parted.Alignment(PedAlignment=self.__alignment.intersect(b.getPedAlignment()))
@localeC
def alignUp(self, geom, sector):
"""Return the closest sector to the provided sector that lies inside
geom and satisfies the alignment constraint self. This method
prefers, but does not guarantee, that the result is beyond sector.
If no such sector can be found, an ArithmeticError is raised."""
return self.__alignment.align_up(geom.getPedGeometry(), sector)
@localeC
def alignDown(self, geom, sector):
"""Return the closest sector to the provided sector that lies inside
geom and satisfies the alignment constraint self. This method
prefers, but does not guarantee, that the result is below sector.
If no such sector can be found, an ArithmeticError is raised."""
return self.__alignment.align_down(geom.getPedGeometry(), sector)
@localeC
def alignNearest(self, geom, sector):
"""Return the closest sector to the input sector that lies inside
geom and satisfies the alignment constraint self. If no such sector
can be found, an ArithmeticError is raised."""
return self.__alignment.align_nearest(geom.getPedGeometry(), sector)
@localeC
def isAligned(self, geom, sector):
"""Determine whether sector lies inside geom and satisfies the
alignment constraint self."""
if not geom:
raise TypeError, "missing parted.Geometry parameter"
if sector is None:
raise TypeError, "missing sector parameter"
return self.__alignment.is_aligned(geom.getPedGeometry(), sector)
def getPedAlignment(self):
"""Return the _ped.Alignment object contained in this Alignment.
For internal module use only."""
return self.__alignment
|
gpl-2.0
|
jboy/nim-pymod
|
tests/00-pymod_basics/002-expect_1/test_expect_1.py
|
1
|
5550
|
import pytest
def test_0_compile_pymod_test_mod(pmgen_py_compile):
pmgen_py_compile(__name__)
def test_cfloatExpect1(pymod_test_mod):
pymod_test_mod.cfloatExpect1(1.0)
def test_cdoubleExpect1(pymod_test_mod):
pymod_test_mod.cdoubleExpect1(1.0)
def test_cshortExpect1(pymod_test_mod):
pymod_test_mod.cshortExpect1(1)
def test_cintExpect1(pymod_test_mod):
pymod_test_mod.cintExpect1(1)
def test_clongExpect1(pymod_test_mod):
pymod_test_mod.clongExpect1(1)
def test_cushortExpect1(pymod_test_mod):
pymod_test_mod.cushortExpect1(1)
def test_cuintExpect1(pymod_test_mod):
pymod_test_mod.cuintExpect1(1)
def test_culongExpect1(pymod_test_mod):
pymod_test_mod.culongExpect1(1)
def test_floatExpect1(pymod_test_mod):
pymod_test_mod.floatExpect1(1.0)
def test_float32Expect1(pymod_test_mod):
pymod_test_mod.float32Expect1(1.0)
def test_float64Expect1(pymod_test_mod):
pymod_test_mod.float64Expect1(1.0)
def test_intExpect1(pymod_test_mod):
pymod_test_mod.intExpect1(1)
# TODO
#def test_int8Expect1(pymod_test_mod):
# pymod_test_mod.int8Expect1(1)
def test_int16Expect1(pymod_test_mod):
pymod_test_mod.int16Expect1(1)
def test_int32Expect1(pymod_test_mod):
pymod_test_mod.int32Expect1(1)
def test_int64Expect1(pymod_test_mod):
pymod_test_mod.int64Expect1(1)
def test_uintExpect1(pymod_test_mod):
pymod_test_mod.uintExpect1(1)
def test_uint8Expect1(pymod_test_mod):
pymod_test_mod.uint8Expect1(1)
def test_uint16Expect1(pymod_test_mod):
pymod_test_mod.uint16Expect1(1)
def test_uint32Expect1(pymod_test_mod):
pymod_test_mod.uint32Expect1(1)
def test_uint64Expect1(pymod_test_mod):
pymod_test_mod.uint64Expect1(1)
# TODO
#def test_boolExpect1(pymod_test_mod):
# pymod_test_mod.boolExpect1(True
def test_byteExpect1(pymod_test_mod):
pymod_test_mod.byteExpect1(1)
def test_ccharExpect1(pymod_test_mod, python_major_version):
if python_major_version == 2:
pymod_test_mod.ccharExpect1("a")
else: # Python 3 or above: bytes vs strings, yay!
pymod_test_mod.ccharExpect1(b"a")
def test_charExpect1(pymod_test_mod, python_major_version):
if python_major_version == 2:
pymod_test_mod.charExpect1("a")
else: # Python 3 or above: bytes vs strings, yay!
pymod_test_mod.charExpect1(b"a")
def test_stringExpect1(pymod_test_mod):
pymod_test_mod.stringExpect1("abc")
# TODO
#def test_unicodeRuneExpect1(pymod_test_mod, python_major_version):
# if python_major_version == 2:
# pymod_test_mod.unicodeRuneExpect1(u"a")
# else: # Python 3 or above: bytes vs strings, yay!
# pymod_test_mod.unicodeRuneExpect1("a")
# TODO
#def test_seqCharExpect1(pymod_test_mod, python_major_version):
# if python_major_version == 2:
# pymod_test_mod.seqCharExpect1("abc")
# else: # Python 3 or above: bytes vs strings, yay!
# pymod_test_mod.seqCharExpect1(b"abc")
# TODO
#def test_seqRuneExpect1(pymod_test_mod, python_major_version):
# if python_major_version == 2:
# pymod_test_mod.seqRuneExpect1(u"abc")
# else: # Python 3 or above: bytes vs strings, yay!
# pymod_test_mod.seqRuneExpect1("abc")
def test_floatExpect_but_supply_int(pymod_test_mod, python_major_version):
pymod_test_mod.floatExpect1(1)
def test_floatExpect_but_supply_str(pymod_test_mod, python_major_version):
with pytest.raises(TypeError) as excinfo:
pymod_test_mod.floatExpect1('a')
if python_major_version == 2:
assert str(excinfo.value) == "a float is required"
else: # Python 3 or above
assert str(excinfo.value) == "a float is required"
def test_intExpect_but_supply_float(pymod_test_mod, python_major_version):
with pytest.raises(TypeError) as excinfo:
pymod_test_mod.intExpect1(1.0)
if python_major_version == 2:
assert str(excinfo.value) == "integer argument expected, got float"
else: # Python 3 or above
assert str(excinfo.value) == "integer argument expected, got float"
def test_intExpect_but_supply_str(pymod_test_mod, python_major_version):
with pytest.raises(TypeError) as excinfo:
pymod_test_mod.intExpect1('a')
if python_major_version == 2:
assert str(excinfo.value) == "an integer is required"
else: # Python 3 or above
assert str(excinfo.value) == "an integer is required (got type str)"
def test_stringExpect_but_supply_float(pymod_test_mod, python_major_version):
with pytest.raises(TypeError) as excinfo:
pymod_test_mod.stringExpect1(1.0)
if python_major_version == 2:
assert str(excinfo.value) == "argument 1 must be string, not float"
else: # Python 3 or above
assert str(excinfo.value) == "argument 1 must be str, not float"
def test_stringExpect_but_supply_int(pymod_test_mod, python_major_version):
with pytest.raises(TypeError) as excinfo:
pymod_test_mod.stringExpect1(1)
if python_major_version == 2:
assert str(excinfo.value) == "argument 1 must be string, not int"
else: # Python 3 or above
assert str(excinfo.value) == "argument 1 must be str, not int"
def test_charExpect_but_supply_str(pymod_test_mod, python_major_version):
with pytest.raises(TypeError) as excinfo:
pymod_test_mod.charExpect1("abc")
if python_major_version == 2:
assert str(excinfo.value) == "argument 1 must be char, not str"
else: # Python 3 or above
assert str(excinfo.value) == "argument 1 must be a byte string of length 1, not str"
|
mit
|
enormandeau/Scripts
|
fasta_remove.py
|
1
|
2142
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Extract sequences from a fasta file if their name is not in a 'remove' file.
Remove file contains one sequence name per line.
Usage:
%program <input_file> <remove_file> <output_file>"""
# Importing modules
import gzip
import sys
# Defining classes
class Fasta(object):
"""Fasta object with name and sequence
"""
def __init__(self, name, sequence):
self.name = name
self.sequence = sequence
def write_to_file(self, handle):
handle.write(">" + self.name + "\n")
handle.write(self.sequence + "\n")
def __repr__(self):
return self.name + " " + self.sequence[:31]
# Defining functions
def myopen(_file, mode="rt"):
if _file.endswith(".gz"):
return gzip.open(_file, mode=mode)
else:
return open(_file, mode=mode)
def fasta_iterator(input_file):
"""Takes a fasta file input_file and returns a fasta iterator
"""
with myopen(input_file) as f:
sequence = []
name = ""
begun = False
for line in f:
line = line.strip()
if line.startswith(">"):
if begun:
yield Fasta(name, "".join(sequence))
name = line[1:]
sequence = ""
begun = True
else:
sequence += line
if name != "":
yield Fasta(name, "".join(sequence))
# Parsing user input
try:
fasta_file = sys.argv[1] # Input fasta file
remove_file = sys.argv[2] # Input remove file, one gene name per line
result_file = sys.argv[3] # Output fasta file
except:
print(__doc__)
sys.exit(0)
remove = set()
with open(remove_file) as f:
for line in f:
line = line.strip()
if line != "":
remove.add(line)
# Iterate through sequences and write to files
fasta_sequences = fasta_iterator(fasta_file)
with myopen(result_file, "wt") as outf:
for seq in fasta_sequences:
name = seq.name
if name.split(" ")[0] not in remove and len(str(seq.sequence)) > 0:
seq.write_to_file(outf)
|
gpl-3.0
|
ubuntu-core/snapcraft
|
tests/unit/pluginhandler/test_plugin_loader.py
|
1
|
3041
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017-2018,2020 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import pathlib
import sys
from textwrap import dedent
import fixtures
from testtools.matchers import Equals, IsInstance
from snapcraft.plugins.v1 import PluginV1
from snapcraft.plugins.v2 import PluginV2
from snapcraft.plugins._plugin_finder import _PLUGINS
from snapcraft.internal import errors
from tests import unit
class NonLocalTest(unit.TestCase):
def test_unknown_plugin_must_raise_exception(self):
path = copy.copy(sys.path)
raised = self.assertRaises(
errors.PluginError, self.load_part, "fake-part", "not-found"
)
self.assertThat(raised.message, Equals("unknown plugin: 'not-found'"))
# Make sure that nothing was added to sys.path.
self.assertThat(path, Equals(sys.path))
def test_local_plugin(self):
plugin_path = pathlib.Path("snap/plugins/x_local.py")
plugin_path.parent.mkdir(parents=True)
with open(plugin_path, "w") as plugin:
print(
dedent(
"""\
import snapcraft.plugins.v1
class Local(snapcraft.plugins.v1.PluginV1):
pass
"""
),
file=plugin,
)
self.load_part("test-part", plugin_name="x-local")
class InTreePluginsTest(unit.TestCase):
def test_all_known_legacy(self):
# We don't want validation to take place here.
self.useFixture(fixtures.MockPatch("jsonschema.validate"))
for plugin_name in _PLUGINS["legacy"]:
plugin_handler = self.load_part(
"test-part", plugin_name=plugin_name, base="core18"
)
self.expectThat(plugin_handler.plugin, IsInstance(PluginV1))
def test_all_core20(self):
self.useFixture(fixtures.MockPatch("jsonschema.validate"))
for plugin_name in _PLUGINS["core20"]:
plugin_handler = self.load_part(
"test-part", plugin_name=plugin_name, base="core20"
)
self.expectThat(plugin_handler.plugin, IsInstance(PluginV2))
def test_fail_on_schema(self):
# conda requires some part_properties to be set.
self.assertRaises(
errors.PluginError,
self.load_part,
"test-part",
plugin_name="conda",
base="core18",
)
|
gpl-3.0
|
redreamality/semanticizer
|
semanticizer/wpm/namespace.py
|
3
|
5589
|
# Copyright 2012-2013, University of Amsterdam. This program is free software:
# you can redistribute it and/or modify it under the terms of the GNU Lesser
# General Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class WpmNS:
def __init__(self, db, langcode, version=None):
self.sep = ':'
self.lc = langcode
self.db = db
self.manual_version = version
def version (self):
if self.manual_version:
return self.manual_version
version = self.db.get(self.db_version())
if not version:
raise Exception("No database version")
return version
def db_version(self):
"""
key
<langcode>:db:version
value
string(cache version)
"""
return self.sep.join( (self.lc, "db", "version") )
def wiki_language_name(self):
"""
key
<langcode>:<version>:wiki:lname
value
string(wiki name)
"""
return self.sep.join( (self.lc, self.version(), "wiki", "lname") )
def wiki_path(self):
"""
key
<langcode>:<version>:wiki:path
value
string(wiki path)
"""
return self.sep.join( (self.lc, self.version(), "wiki", "path") )
def wiki_stats(self, statName):
"""
key
<langcode>:<version>:wiki:stats:<statName>
value
string(stats)
"""
return self.sep.join( (self.lc, self.version(), "wiki", "stats", statName) )
def label(self, name):
"""
key
<langcode>:<version>:label:<name>
value
list( LinkOccCount, LinkDocCount, TextOccCount, TextDocCount, SenseId, SenseId, ..)
"""
return self.sep.join( (self.lc, self.version(), "label", name) )
def label_sense(self, name, senseid):
"""
key
<langcode>:<version>:label:<name>:<senseid>
value
list( sLinkDocCount, sLinkOccCount, FromTitle, FromRedirect)
"""
return self.sep.join( (self.lc, self.version(), "label", name, senseid) )
def normalized(self, name):
"""
key
<langcode>:<version>:norm:<name>
value
set( name, name, ... )
"""
return self.sep.join( (self.lc, self.version(), "norm", name) )
def translation_sense(self, senseid):
"""
key
<langcode>:<version>:trnsl:<senseid>
value
list( langcode, langcode, ... )
"""
return self.sep.join( (self.lc, self.version(), "trnsl", senseid) )
def translation_sense_language(self, senseid, langcode):
"""
key
<langcode>:<version>:trnsl:<senseid>:<langcode>
value
string(name)
"""
return self.sep.join( (self.lc, self.version(), "trnsl", senseid, langcode) )
def page_id(self, name):
"""
key
<langcode>:<version>:page:id<name>
value
string(id)
"""
return self.sep.join( (self.lc, self.version(), "page", "id", name) )
def page_title(self, pageid):
"""
key
<langcode>:<version>:page:<pageid>:name
value
string(name)
"""
return self.sep.join( (self.lc, self.version(), "page", pageid, "name") )
def page_labels(self, pageid):
"""
key
<langcode>:<version>:page:<pageid>:labels
value
list( json([title, occurances, fromRedirect, fromTitle isPrimary, proportion]), ...)
"""
return self.sep.join( (self.lc, self.version(), "page", pageid, "labels") )
def page_definition(self, pageid):
"""
key
<langcode>:<version>:page:<pageid>:definition
value
string(synopsis)
"""
return self.sep.join( (self.lc, self.version(), "page", pageid, "definition") )
def page_inlinks(self, pageid):
"""
key
<langcode>:<version>:page:<pageid>:inlinks
value
list( pageid, pageid, ... )
"""
return self.sep.join( (self.lc, self.version(), "page", pageid, "inlinks") )
def page_outlinks(self, pageid):
"""
key
<langcode>:<version>:page:<pageid>:outlinks
value
list( pageid, pageid, ... )
"""
return self.sep.join( (self.lc, self.version(), "page", pageid, "outlinks") )
def page_categories(self, pageid):
"""
key
<langcode>:<version>:page:<pageid>:categories
value
list( category, category, ... )
"""
return self.sep.join( (self.lc, self.version(), "page", pageid, "categories") )
def ngramscore(self, n):
"""
key
<langcode>:<version>:<n>grms
value
zset([words{score}, [...]])translation_sense
"""
return self.sep.join( (self.lc, self.version(), "%sgrms" % n) )
|
gpl-3.0
|
chand3040/cloud_that
|
lms/envs/test_static_optimized.py
|
18
|
1648
|
"""
Settings used when generating static assets for use in tests.
For example, Bok Choy uses two different settings files:
1. test_static_optimized is used when invoking collectstatic
2. bok_choy is used when running CMS and LMS
Note: it isn't possible to have a single settings file, because Django doesn't
support both generating static assets to a directory and also serving static
from the same directory.
"""
# Start with the common settings
from .common import * # pylint: disable=wildcard-import, unused-wildcard-import
# Use an in-memory database since this settings file is only used for updating assets
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
}
# Provide a dummy XQUEUE_INTERFACE setting as LMS expects it to exist on start up
XQUEUE_INTERFACE = {
"url": "https://sandbox-xqueue.edx.org",
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
######################### Static file overrides ####################################
# Redirect to the test_root folder within the repo
TEST_ROOT = REPO_ROOT / "test_root" # pylint: disable=no-value-for-parameter
LOG_DIR = (TEST_ROOT / "log").abspath()
# Store the static files under test root so that they don't overwrite existing static assets
STATIC_ROOT = (TEST_ROOT / "staticfiles" / "lms").abspath()
# Disable uglify when tests are running (used by build.js).
# 1. Uglify is by far the slowest part of the build process
# 2. Having full source code makes debugging tests easier for developers
os.environ['REQUIRE_BUILD_PROFILE_OPTIMIZE'] = 'none'
|
agpl-3.0
|
amith01994/intellij-community
|
python/lib/Lib/site-packages/django/core/cache/backends/memcached.py
|
78
|
6968
|
"Memcached cache backend"
import time
from threading import local
from django.core.cache.backends.base import BaseCache, InvalidCacheBackendError
from django.utils import importlib
class BaseMemcachedCache(BaseCache):
def __init__(self, server, params, library, value_not_found_exception):
super(BaseMemcachedCache, self).__init__(params)
if isinstance(server, basestring):
self._servers = server.split(';')
else:
self._servers = server
# The exception type to catch from the underlying library for a key
# that was not found. This is a ValueError for python-memcache,
# pylibmc.NotFound for pylibmc, and cmemcache will return None without
# raising an exception.
self.LibraryValueNotFoundException = value_not_found_exception
self._lib = library
self._options = params.get('OPTIONS', None)
@property
def _cache(self):
"""
Implements transparent thread-safe access to a memcached client.
"""
return self._lib.Client(self._servers)
def _get_memcache_timeout(self, timeout):
"""
Memcached deals with long (> 30 days) timeouts in a special
way. Call this function to obtain a safe value for your timeout.
"""
timeout = timeout or self.default_timeout
if timeout > 2592000: # 60*60*24*30, 30 days
# See http://code.google.com/p/memcached/wiki/FAQ
# "You can set expire times up to 30 days in the future. After that
# memcached interprets it as a date, and will expire the item after
# said date. This is a simple (but obscure) mechanic."
#
# This means that we have to switch to absolute timestamps.
timeout += int(time.time())
return timeout
def add(self, key, value, timeout=0, version=None):
key = self.make_key(key, version=version)
if isinstance(value, unicode):
value = value.encode('utf-8')
return self._cache.add(key, value, self._get_memcache_timeout(timeout))
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
val = self._cache.get(key)
if val is None:
return default
return val
def set(self, key, value, timeout=0, version=None):
key = self.make_key(key, version=version)
self._cache.set(key, value, self._get_memcache_timeout(timeout))
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self._cache.delete(key)
def get_many(self, keys, version=None):
new_keys = map(lambda x: self.make_key(x, version=version), keys)
ret = self._cache.get_multi(new_keys)
if ret:
_ = {}
m = dict(zip(new_keys, keys))
for k, v in ret.items():
_[m[k]] = v
ret = _
return ret
def close(self, **kwargs):
self._cache.disconnect_all()
def incr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
try:
val = self._cache.incr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def decr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
try:
val = self._cache.decr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def set_many(self, data, timeout=0, version=None):
safe_data = {}
for key, value in data.items():
key = self.make_key(key, version=version)
if isinstance(value, unicode):
value = value.encode('utf-8')
safe_data[key] = value
self._cache.set_multi(safe_data, self._get_memcache_timeout(timeout))
def delete_many(self, keys, version=None):
l = lambda x: self.make_key(x, version=version)
self._cache.delete_multi(map(l, keys))
def clear(self):
self._cache.flush_all()
# For backwards compatibility -- the default cache class tries a
# cascading lookup of cmemcache, then memcache.
class CacheClass(BaseMemcachedCache):
def __init__(self, server, params):
try:
import cmemcache as memcache
import warnings
warnings.warn(
"Support for the 'cmemcache' library has been deprecated. Please use python-memcached or pyblimc instead.",
DeprecationWarning
)
except ImportError:
try:
import memcache
except:
raise InvalidCacheBackendError(
"Memcached cache backend requires either the 'memcache' or 'cmemcache' library"
)
super(CacheClass, self).__init__(server, params,
library=memcache,
value_not_found_exception=ValueError)
class MemcachedCache(BaseMemcachedCache):
"An implementation of a cache binding using python-memcached"
def __init__(self, server, params):
import memcache
super(MemcachedCache, self).__init__(server, params,
library=memcache,
value_not_found_exception=ValueError)
class PyLibMCCache(BaseMemcachedCache):
"An implementation of a cache binding using pylibmc"
def __init__(self, server, params):
import pylibmc
self._local = local()
super(PyLibMCCache, self).__init__(server, params,
library=pylibmc,
value_not_found_exception=pylibmc.NotFound)
@property
def _cache(self):
# PylibMC uses cache options as the 'behaviors' attribute.
# It also needs to use threadlocals, because some versions of
# PylibMC don't play well with the GIL.
client = getattr(self._local, 'client', None)
if client:
return client
client = self._lib.Client(self._servers)
if self._options:
client.behaviors = self._options
self._local.client = client
return client
|
apache-2.0
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/config/__init__.py
|
1
|
13581
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters for OSPFv2 extensions relating
to MPLS for the interface
"""
__slots__ = ("_path_helper", "_extmethods", "__traffic_engineering_metric")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__traffic_engineering_metric = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="traffic-engineering-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"interfaces",
"interface",
"mpls",
"config",
]
def _get_traffic_engineering_metric(self):
"""
Getter method for traffic_engineering_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/config/traffic_engineering_metric (uint32)
YANG Description: A link metric that should only be considered for traffic
engineering purposes.
"""
return self.__traffic_engineering_metric
def _set_traffic_engineering_metric(self, v, load=False):
"""
Setter method for traffic_engineering_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/config/traffic_engineering_metric (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_traffic_engineering_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_traffic_engineering_metric() directly.
YANG Description: A link metric that should only be considered for traffic
engineering purposes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="traffic-engineering-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """traffic_engineering_metric must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="traffic-engineering-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__traffic_engineering_metric = t
if hasattr(self, "_set"):
self._set()
def _unset_traffic_engineering_metric(self):
self.__traffic_engineering_metric = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="traffic-engineering-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
traffic_engineering_metric = __builtin__.property(
_get_traffic_engineering_metric, _set_traffic_engineering_metric
)
_pyangbind_elements = OrderedDict(
[("traffic_engineering_metric", traffic_engineering_metric)]
)
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters for OSPFv2 extensions relating
to MPLS for the interface
"""
__slots__ = ("_path_helper", "_extmethods", "__traffic_engineering_metric")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__traffic_engineering_metric = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="traffic-engineering-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"interfaces",
"interface",
"mpls",
"config",
]
def _get_traffic_engineering_metric(self):
"""
Getter method for traffic_engineering_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/config/traffic_engineering_metric (uint32)
YANG Description: A link metric that should only be considered for traffic
engineering purposes.
"""
return self.__traffic_engineering_metric
def _set_traffic_engineering_metric(self, v, load=False):
"""
Setter method for traffic_engineering_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/interfaces/interface/mpls/config/traffic_engineering_metric (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_traffic_engineering_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_traffic_engineering_metric() directly.
YANG Description: A link metric that should only be considered for traffic
engineering purposes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="traffic-engineering-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """traffic_engineering_metric must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="traffic-engineering-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__traffic_engineering_metric = t
if hasattr(self, "_set"):
self._set()
def _unset_traffic_engineering_metric(self):
self.__traffic_engineering_metric = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="traffic-engineering-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
traffic_engineering_metric = __builtin__.property(
_get_traffic_engineering_metric, _set_traffic_engineering_metric
)
_pyangbind_elements = OrderedDict(
[("traffic_engineering_metric", traffic_engineering_metric)]
)
|
apache-2.0
|
siggame/Joueur.py
|
games/stumped/game_object.py
|
1
|
2542
|
# GameObject: An object in the game. The most basic class that all game classes should inherit from automatically.
# DO NOT MODIFY THIS FILE
# Never try to directly create an instance of this class, or modify its member variables.
# Instead, you should only be reading its variables and calling its functions.
from typing import List
from joueur.base_game_object import BaseGameObject
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class GameObject(BaseGameObject):
"""The class representing the GameObject in the Stumped game.
An object in the game. The most basic class that all game classes should inherit from automatically.
"""
def __init__(self):
"""Initializes a GameObject with basic logic as provided by the Creer code generator.
"""
BaseGameObject.__init__(self)
# private attributes to hold the properties so they appear read only
self._game_object_name = ""
self._id = ""
self._logs = []
@property
def game_object_name(self) -> str:
"""str: String representing the top level Class that this game object is an instance of. Used for reflection to create new instances on clients, but exposed for convenience should AIs want this data.
"""
return self._game_object_name
@property
def id(self) -> str:
"""str: A unique id for each instance of a GameObject or a sub class. Used for client and server communication. Should never change value after being set.
"""
return self._id
@property
def logs(self) -> List[str]:
"""list[str]: Any strings logged will be stored here. Intended for debugging.
"""
return self._logs
def log(self, message: str) -> None:
"""Adds a message to this GameObject's logs. Intended for your own debugging purposes, as strings stored here are saved in the gamelog.
Args:
message (str): A string to add to this GameObject's log. Intended for debugging.
"""
return self._run_on_server('log', {
'message': message
})
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
|
mit
|
kaiweifan/horizon
|
openstack_dashboard/dashboards/project/instances/workflows/create_instance.py
|
5
|
27722
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.conf import settings # noqa
from django.template.defaultfilters import filesizeformat # noqa
from django.utils.text import normalize_newlines # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from django.utils.translation import ungettext_lazy # noqa
from django.views.decorators.debug import sensitive_variables # noqa
from horizon import exceptions
from horizon import forms
from horizon.utils import fields
from horizon.utils import functions
from horizon.utils import validators
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.project.images_and_snapshots import utils
LOG = logging.getLogger(__name__)
class SelectProjectUserAction(workflows.Action):
project_id = forms.ChoiceField(label=_("Project"))
user_id = forms.ChoiceField(label=_("User"))
def __init__(self, request, *args, **kwargs):
super(SelectProjectUserAction, self).__init__(request, *args, **kwargs)
# Set our project choices
projects = [(tenant.id, tenant.name)
for tenant in request.user.authorized_tenants]
self.fields['project_id'].choices = projects
# Set our user options
users = [(request.user.id, request.user.username)]
self.fields['user_id'].choices = users
class Meta:
name = _("Project & User")
# Unusable permission so this is always hidden. However, we
# keep this step in the workflow for validation/verification purposes.
permissions = ("!",)
class SelectProjectUser(workflows.Step):
action_class = SelectProjectUserAction
contributes = ("project_id", "user_id")
class SetInstanceDetailsAction(workflows.Action):
SOURCE_TYPE_CHOICES = (
('', _("--- Select source ---")),
("image_id", _("Boot from image.")),
("instance_snapshot_id", _("Boot from snapshot.")),
("volume_id", _("Boot from volume.")),
("volume_image_id", _("Boot from image "
"(creates a new volume).")),
("volume_snapshot_id", _("Boot from volume snapshot "
"(creates a new volume).")),
)
availability_zone = forms.ChoiceField(label=_("Availability Zone"),
required=False)
name = forms.CharField(max_length=80, label=_("Instance Name"))
flavor = forms.ChoiceField(label=_("Flavor"),
help_text=_("Size of image to launch."))
count = forms.IntegerField(label=_("Instance Count"),
min_value=1,
initial=1,
help_text=_("Number of instances to launch."))
source_type = forms.ChoiceField(label=_("Instance Boot Source"),
required=True,
choices=SOURCE_TYPE_CHOICES,
help_text=_("Choose Your Boot Source "
"Type."))
instance_snapshot_id = forms.ChoiceField(label=_("Instance Snapshot"),
required=False)
volume_id = forms.ChoiceField(label=_("Volume"), required=False)
volume_snapshot_id = forms.ChoiceField(label=_("Volume Snapshot"),
required=False)
image_id = forms.ChoiceField(
label=_("Image Name"),
required=False,
widget=fields.SelectWidget(
data_attrs=('volume_size',),
transform=lambda x: ("%s (%s)" % (x.name,
filesizeformat(x.bytes)))))
volume_size = forms.CharField(label=_("Device size (GB)"),
required=False,
help_text=_("Volume size in gigabytes "
"(integer value)."))
device_name = forms.CharField(label=_("Device Name"),
required=False,
initial="vda",
help_text=_("Volume mount point (e.g. 'vda' "
"mounts at '/dev/vda')."))
delete_on_terminate = forms.BooleanField(label=_("Delete on Terminate"),
initial=False,
required=False,
help_text=_("Delete volume on "
"instance terminate"))
class Meta:
name = _("Details")
help_text_template = ("project/instances/"
"_launch_details_help.html")
def __init__(self, request, context, *args, **kwargs):
self._init_images_cache()
super(SetInstanceDetailsAction, self).__init__(
request, context, *args, **kwargs)
def clean(self):
cleaned_data = super(SetInstanceDetailsAction, self).clean()
count = cleaned_data.get('count', 1)
# Prevent launching more instances than the quota allows
usages = quotas.tenant_quota_usages(self.request)
available_count = usages['instances']['available']
if available_count < count:
error_message = ungettext_lazy('The requested instance '
'cannot be launched as you only '
'have %(avail)i of your quota '
'available. ',
'The requested %(req)i instances '
'cannot be launched as you only '
'have %(avail)i of your quota '
'available.',
count)
params = {'req': count,
'avail': available_count}
raise forms.ValidationError(error_message % params)
# Validate our instance source.
source_type = self.data.get('source_type', None)
if source_type == 'image_id':
if not cleaned_data.get('image_id'):
msg = _("You must select an image.")
self._errors['image_id'] = self.error_class([msg])
elif source_type == 'instance_snapshot_id':
if not cleaned_data['instance_snapshot_id']:
msg = _("You must select a snapshot.")
self._errors['instance_snapshot_id'] = self.error_class([msg])
elif source_type == 'volume_id':
if not cleaned_data.get('volume_id'):
msg = _("You must select a volume.")
self._errors['volume_id'] = self.error_class([msg])
# Prevent launching multiple instances with the same volume.
# TODO(gabriel): is it safe to launch multiple instances with
# a snapshot since it should be cloned to new volumes?
if count > 1:
msg = _('Launching multiple instances is only supported for '
'images and instance snapshots.')
raise forms.ValidationError(msg)
elif source_type == 'volume_image_id':
if not cleaned_data['image_id']:
msg = _("You must select an image.")
self._errors['image_id'] = self.error_class([msg])
if not self.data.get('volume_size', None):
msg = _("You must set volume size")
self._errors['volume_size'] = self.error_class([msg])
if not cleaned_data.get('device_name'):
msg = _("You must set device name")
self._errors['device_name'] = self.error_class([msg])
elif source_type == 'volume_snapshot_id':
if not cleaned_data.get('volume_snapshot_id'):
msg = _("You must select a snapshot.")
self._errors['volume_snapshot_id'] = self.error_class([msg])
if not cleaned_data.get('device_name'):
msg = _("You must set device name")
self._errors['device_name'] = self.error_class([msg])
return cleaned_data
def populate_flavor_choices(self, request, context):
"""By default, returns the available flavors, sorted by RAM
usage (ascending).
Override these behaviours with a CREATE_INSTANCE_FLAVOR_SORT dict
in local_settings.py."""
try:
flavors = api.nova.flavor_list(request)
flavor_sort = getattr(settings, 'CREATE_INSTANCE_FLAVOR_SORT', {})
rev = flavor_sort.get('reverse', False)
key = flavor_sort.get('key', lambda flavor: flavor.ram)
flavor_list = [(flavor.id, "%s" % flavor.name)
for flavor in sorted(flavors, key=key, reverse=rev)]
except Exception:
flavor_list = []
exceptions.handle(request,
_('Unable to retrieve instance flavors.'))
return flavor_list
def populate_availability_zone_choices(self, request, context):
try:
zones = api.nova.availability_zone_list(request)
except Exception:
zones = []
exceptions.handle(request,
_('Unable to retrieve availability zones.'))
zone_list = [(zone.zoneName, zone.zoneName)
for zone in zones if zone.zoneState['available']]
zone_list.sort()
if not zone_list:
zone_list.insert(0, ("", _("No availability zones found.")))
elif len(zone_list) > 1:
zone_list.insert(0, ("", _("Any Availability Zone")))
return zone_list
def get_help_text(self):
extra = {}
try:
extra['usages'] = api.nova.tenant_absolute_limits(self.request)
extra['usages_json'] = json.dumps(extra['usages'])
flavors = json.dumps([f._info for f in
api.nova.flavor_list(self.request)])
extra['flavors'] = flavors
except Exception:
exceptions.handle(self.request,
_("Unable to retrieve quota information."))
return super(SetInstanceDetailsAction, self).get_help_text(extra)
def _init_images_cache(self):
if not hasattr(self, '_images_cache'):
self._images_cache = {}
def _get_volume_display_name(self, volume):
if hasattr(volume, "volume_id"):
vol_type = "snap"
visible_label = _("Snapshot")
else:
vol_type = "vol"
visible_label = _("Volume")
return (("%s:%s" % (volume.id, vol_type)),
(_("%(name)s - %(size)s GB (%(label)s)") %
{'name': volume.display_name or volume.id,
'size': volume.size,
'label': visible_label}))
def populate_image_id_choices(self, request, context):
choices = []
images = utils.get_available_images(request,
context.get('project_id'),
self._images_cache)
for image in images:
image.bytes = image.size
image.volume_size = functions.bytes_to_gigabytes(image.bytes)
choices.append((image.id, image))
if choices:
choices.insert(0, ("", _("Select Image")))
else:
choices.insert(0, ("", _("No images available")))
return choices
def populate_instance_snapshot_id_choices(self, request, context):
images = utils.get_available_images(request,
context.get('project_id'),
self._images_cache)
choices = [(image.id, image.name)
for image in images
if image.properties.get("image_type", '') == "snapshot"]
if choices:
choices.insert(0, ("", _("Select Instance Snapshot")))
else:
choices.insert(0, ("", _("No snapshots available.")))
return choices
def populate_volume_id_choices(self, request, context):
try:
volumes = [self._get_volume_display_name(v)
for v in cinder.volume_list(self.request)
if v.status == api.cinder.VOLUME_STATE_AVAILABLE]
except Exception:
volumes = []
exceptions.handle(self.request,
_('Unable to retrieve list of volumes.'))
if volumes:
volumes.insert(0, ("", _("Select Volume")))
else:
volumes.insert(0, ("", _("No volumes available.")))
return volumes
def populate_volume_snapshot_id_choices(self, request, context):
try:
snapshots = cinder.volume_snapshot_list(self.request)
snapshots = [self._get_volume_display_name(s) for s in snapshots
if s.status == api.cinder.VOLUME_STATE_AVAILABLE]
except Exception:
snapshots = []
exceptions.handle(self.request,
_('Unable to retrieve list of volume '
'snapshots.'))
if snapshots:
snapshots.insert(0, ("", _("Select Volume Snapshot")))
else:
snapshots.insert(0, ("", _("No volume snapshots available.")))
return snapshots
class SetInstanceDetails(workflows.Step):
action_class = SetInstanceDetailsAction
depends_on = ("project_id", "user_id")
contributes = ("source_type", "source_id",
"availability_zone", "name", "count", "flavor",
"device_name", # Can be None for an image.
"delete_on_terminate")
def prepare_action_context(self, request, context):
if 'source_type' in context and 'source_id' in context:
context[context['source_type']] = context['source_id']
return context
def contribute(self, data, context):
context = super(SetInstanceDetails, self).contribute(data, context)
# Allow setting the source dynamically.
if ("source_type" in context and "source_id" in context
and context["source_type"] not in context):
context[context["source_type"]] = context["source_id"]
# Translate form input to context for source values.
if "source_type" in data:
if data["source_type"] in ["image_id", "volume_image_id"]:
context["source_id"] = data.get("image_id", None)
else:
context["source_id"] = data.get(data["source_type"], None)
if "volume_size" in data:
context["volume_size"] = data["volume_size"]
return context
KEYPAIR_IMPORT_URL = "horizon:project:access_and_security:keypairs:import"
class SetAccessControlsAction(workflows.Action):
keypair = forms.DynamicChoiceField(label=_("Keypair"),
required=False,
help_text=_("Which keypair to use for "
"authentication."),
add_item_link=KEYPAIR_IMPORT_URL)
admin_pass = forms.RegexField(
label=_("Admin Pass"),
required=False,
widget=forms.PasswordInput(render_value=False),
regex=validators.password_validator(),
error_messages={'invalid': validators.password_validator_msg()})
confirm_admin_pass = forms.CharField(
label=_("Confirm Admin Pass"),
required=False,
widget=forms.PasswordInput(render_value=False))
groups = forms.MultipleChoiceField(label=_("Security Groups"),
required=True,
initial=["default"],
widget=forms.CheckboxSelectMultiple(),
help_text=_("Launch instance in these "
"security groups."))
class Meta:
name = _("Access & Security")
help_text = _("Control access to your instance via keypairs, "
"security groups, and other mechanisms.")
def populate_keypair_choices(self, request, context):
try:
keypairs = api.nova.keypair_list(request)
keypair_list = [(kp.name, kp.name) for kp in keypairs]
except Exception:
keypair_list = []
exceptions.handle(request,
_('Unable to retrieve keypairs.'))
if keypair_list:
if len(keypair_list) == 1:
self.fields['keypair'].initial = keypair_list[0][0]
keypair_list.insert(0, ("", _("Select a keypair")))
else:
keypair_list = (("", _("No keypairs available.")),)
return keypair_list
def populate_groups_choices(self, request, context):
try:
groups = api.network.security_group_list(request)
security_group_list = [(sg.name, sg.name) for sg in groups]
except Exception:
exceptions.handle(request,
_('Unable to retrieve list of security groups'))
security_group_list = []
return security_group_list
def clean(self):
'''Check to make sure password fields match.'''
cleaned_data = super(SetAccessControlsAction, self).clean()
if 'admin_pass' in cleaned_data:
if cleaned_data['admin_pass'] != cleaned_data.get(
'confirm_admin_pass', None):
raise forms.ValidationError(_('Passwords do not match.'))
return cleaned_data
class SetAccessControls(workflows.Step):
action_class = SetAccessControlsAction
depends_on = ("project_id", "user_id")
contributes = ("keypair_id", "security_group_ids",
"admin_pass", "confirm_admin_pass")
def contribute(self, data, context):
if data:
post = self.workflow.request.POST
context['security_group_ids'] = post.getlist("groups")
context['keypair_id'] = data.get("keypair", "")
context['admin_pass'] = data.get("admin_pass", "")
context['confirm_admin_pass'] = data.get("confirm_admin_pass", "")
return context
class CustomizeAction(workflows.Action):
customization_script = forms.CharField(widget=forms.Textarea,
label=_("Customization Script"),
required=False,
help_text=_("A script or set of "
"commands to be "
"executed after the "
"instance has been "
"built (max 16kb)."))
class Meta:
name = _("Post-Creation")
help_text_template = ("project/instances/"
"_launch_customize_help.html")
class PostCreationStep(workflows.Step):
action_class = CustomizeAction
contributes = ("customization_script",)
class SetNetworkAction(workflows.Action):
network = forms.MultipleChoiceField(label=_("Networks"),
required=True,
widget=forms.CheckboxSelectMultiple(),
error_messages={
'required': _(
"At least one network must"
" be specified.")},
help_text=_("Launch instance with"
" these networks"))
if api.neutron.is_port_profiles_supported():
profile = forms.ChoiceField(label=_("Policy Profiles"),
required=False,
help_text=_("Launch instance with "
"this policy profile"))
class Meta:
name = _("Networking")
permissions = ('openstack.services.network',)
help_text = _("Select networks for your instance.")
def populate_network_choices(self, request, context):
try:
tenant_id = self.request.user.tenant_id
networks = api.neutron.network_list_for_tenant(request, tenant_id)
for n in networks:
n.set_id_as_name_if_empty()
network_list = [(network.id, network.name) for network in networks]
except Exception:
network_list = []
exceptions.handle(request,
_('Unable to retrieve networks.'))
return network_list
def populate_profile_choices(self, request, context):
try:
profiles = api.neutron.profile_list(request, 'policy')
profile_list = [(profile.id, profile.name) for profile in profiles]
except Exception:
profile_list = []
exceptions.handle(request, _("Unable to retrieve profiles."))
return profile_list
class SetNetwork(workflows.Step):
action_class = SetNetworkAction
# Disabling the template drag/drop only in the case port profiles
# are used till the issue with the drag/drop affecting the
# profile_id detection is fixed.
if api.neutron.is_port_profiles_supported():
contributes = ("network_id", "profile_id",)
else:
template_name = "project/instances/_update_networks.html"
contributes = ("network_id",)
def contribute(self, data, context):
if data:
networks = self.workflow.request.POST.getlist("network")
# If no networks are explicitly specified, network list
# contains an empty string, so remove it.
networks = [n for n in networks if n != '']
if networks:
context['network_id'] = networks
if api.neutron.is_port_profiles_supported():
context['profile_id'] = data.get('profile', None)
return context
class LaunchInstance(workflows.Workflow):
slug = "launch_instance"
name = _("Launch Instance")
finalize_button_name = _("Launch")
success_message = _('Launched %(count)s named "%(name)s".')
failure_message = _('Unable to launch %(count)s named "%(name)s".')
success_url = "horizon:project:instances:index"
default_steps = (SelectProjectUser,
SetInstanceDetails,
SetAccessControls,
SetNetwork,
PostCreationStep)
def format_status_message(self, message):
name = self.context.get('name', 'unknown instance')
count = self.context.get('count', 1)
if int(count) > 1:
return message % {"count": _("%s instances") % count,
"name": name}
else:
return message % {"count": _("instance"), "name": name}
@sensitive_variables('context')
def handle(self, request, context):
custom_script = context.get('customization_script', '')
dev_mapping_1 = None
dev_mapping_2 = None
image_id = ''
# Determine volume mapping options
source_type = context.get('source_type', None)
if source_type in ['image_id', 'instance_snapshot_id']:
image_id = context['source_id']
elif source_type in ['volume_id', 'volume_snapshot_id']:
dev_mapping_1 = {context['device_name']: '%s::%s' %
(context['source_id'],
int(bool(context['delete_on_terminate'])))}
elif source_type == 'volume_image_id':
dev_mapping_2 = [
{'device_name': str(context['device_name']),
'source_type': 'image',
'destination_type': 'volume',
'delete_on_termination':
int(bool(context['delete_on_terminate'])),
'uuid': context['source_id'],
'boot_index': '0',
'volume_size': context['volume_size']
}
]
netids = context.get('network_id', None)
if netids:
nics = [{"net-id": netid, "v4-fixed-ip": ""}
for netid in netids]
else:
nics = None
avail_zone = context.get('availability_zone', None)
# Create port with Network Name and Port Profile
# for the use with the plugin supporting port profiles.
# neutron port-create <Network name> --n1kv:profile <Port Profile ID>
# for net_id in context['network_id']:
## HACK for now use first network
if api.neutron.is_port_profiles_supported():
net_id = context['network_id'][0]
LOG.debug("Horizon->Create Port with %(netid)s %(profile_id)s",
{'netid': net_id, 'profile_id': context['profile_id']})
try:
port = api.neutron.port_create(request, net_id,
policy_profile_id=
context['profile_id'])
except Exception:
msg = (_('Port not created for profile-id (%s).') %
context['profile_id'])
exceptions.handle(request, msg)
if port and port.id:
nics = [{"port-id": port.id}]
try:
api.nova.server_create(request,
context['name'],
image_id,
context['flavor'],
context['keypair_id'],
normalize_newlines(custom_script),
context['security_group_ids'],
block_device_mapping=dev_mapping_1,
block_device_mapping_v2=dev_mapping_2,
nics=nics,
availability_zone=avail_zone,
instance_count=int(context['count']),
admin_pass=context['admin_pass'])
return True
except Exception:
exceptions.handle(request)
return False
|
apache-2.0
|
michaelBenin/sqlalchemy
|
test/dialect/postgresql/test_query.py
|
1
|
34275
|
# coding: utf-8
from sqlalchemy.testing.assertions import eq_, assert_raises, \
assert_raises_message, is_, AssertsExecutionResults, \
AssertsCompiledSQL, ComparesTables
from sqlalchemy.testing import engines, fixtures
from sqlalchemy import testing
from sqlalchemy import Table, Column, select, MetaData, text, Integer, \
String, Sequence, ForeignKey, join, Numeric, \
PrimaryKeyConstraint, DateTime, tuple_, Float, BigInteger, \
func, literal_column, literal, bindparam, cast, extract, \
SmallInteger, Enum, REAL, update, insert, Index, delete, \
and_, Date, TypeDecorator, Time, Unicode, Interval, or_, Text
from sqlalchemy import exc
from sqlalchemy.dialects import postgresql
import datetime
class InsertTest(fixtures.TestBase, AssertsExecutionResults):
__only_on__ = 'postgresql'
@classmethod
def setup_class(cls):
global metadata
cls.engine = testing.db
metadata = MetaData(testing.db)
def teardown(self):
metadata.drop_all()
metadata.clear()
if self.engine is not testing.db:
self.engine.dispose()
def test_compiled_insert(self):
table = Table('testtable', metadata, Column('id', Integer,
primary_key=True), Column('data', String(30)))
metadata.create_all()
ins = table.insert(inline=True, values={'data': bindparam('x'
)}).compile()
ins.execute({'x': 'five'}, {'x': 'seven'})
assert table.select().execute().fetchall() == [(1, 'five'), (2,
'seven')]
def test_foreignkey_missing_insert(self):
t1 = Table('t1', metadata, Column('id', Integer,
primary_key=True))
t2 = Table('t2', metadata, Column('id', Integer,
ForeignKey('t1.id'), primary_key=True))
metadata.create_all()
# want to ensure that "null value in column "id" violates not-
# null constraint" is raised (IntegrityError on psycoopg2, but
# ProgrammingError on pg8000), and not "ProgrammingError:
# (ProgrammingError) relationship "t2_id_seq" does not exist".
# the latter corresponds to autoincrement behavior, which is not
# the case here due to the foreign key.
for eng in [engines.testing_engine(options={'implicit_returning'
: False}),
engines.testing_engine(options={'implicit_returning'
: True})]:
assert_raises_message(exc.DBAPIError,
'violates not-null constraint',
eng.execute, t2.insert())
def test_sequence_insert(self):
table = Table('testtable', metadata, Column('id', Integer,
Sequence('my_seq'), primary_key=True),
Column('data', String(30)))
metadata.create_all()
self._assert_data_with_sequence(table, 'my_seq')
@testing.requires.returning
def test_sequence_returning_insert(self):
table = Table('testtable', metadata, Column('id', Integer,
Sequence('my_seq'), primary_key=True),
Column('data', String(30)))
metadata.create_all()
self._assert_data_with_sequence_returning(table, 'my_seq')
def test_opt_sequence_insert(self):
table = Table('testtable', metadata, Column('id', Integer,
Sequence('my_seq', optional=True),
primary_key=True), Column('data', String(30)))
metadata.create_all()
self._assert_data_autoincrement(table)
@testing.requires.returning
def test_opt_sequence_returning_insert(self):
table = Table('testtable', metadata, Column('id', Integer,
Sequence('my_seq', optional=True),
primary_key=True), Column('data', String(30)))
metadata.create_all()
self._assert_data_autoincrement_returning(table)
def test_autoincrement_insert(self):
table = Table('testtable', metadata, Column('id', Integer,
primary_key=True), Column('data', String(30)))
metadata.create_all()
self._assert_data_autoincrement(table)
@testing.requires.returning
def test_autoincrement_returning_insert(self):
table = Table('testtable', metadata, Column('id', Integer,
primary_key=True), Column('data', String(30)))
metadata.create_all()
self._assert_data_autoincrement_returning(table)
def test_noautoincrement_insert(self):
table = Table('testtable', metadata, Column('id', Integer,
primary_key=True, autoincrement=False),
Column('data', String(30)))
metadata.create_all()
self._assert_data_noautoincrement(table)
def _assert_data_autoincrement(self, table):
self.engine = \
engines.testing_engine(options={'implicit_returning'
: False})
metadata.bind = self.engine
def go():
# execute with explicit id
r = table.insert().execute({'id': 30, 'data': 'd1'})
assert r.inserted_primary_key == [30]
# execute with prefetch id
r = table.insert().execute({'data': 'd2'})
assert r.inserted_primary_key == [1]
# executemany with explicit ids
table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32,
'data': 'd4'})
# executemany, uses SERIAL
table.insert().execute({'data': 'd5'}, {'data': 'd6'})
# single execute, explicit id, inline
table.insert(inline=True).execute({'id': 33, 'data': 'd7'})
# single execute, inline, uses SERIAL
table.insert(inline=True).execute({'data': 'd8'})
# note that the test framework doesn't capture the "preexecute"
# of a seqeuence or default. we just see it in the bind params.
self.assert_sql(self.engine, go, [], with_sequences=[
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 30, 'data': 'd1'}),
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 1, 'data': 'd2'}),
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
('INSERT INTO testtable (data) VALUES (:data)', [{'data'
: 'd5'}, {'data': 'd6'}]),
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 33, 'data': 'd7'}]),
('INSERT INTO testtable (data) VALUES (:data)', [{'data'
: 'd8'}]),
])
assert table.select().execute().fetchall() == [
(30, 'd1'),
(1, 'd2'),
(31, 'd3'),
(32, 'd4'),
(2, 'd5'),
(3, 'd6'),
(33, 'd7'),
(4, 'd8'),
]
table.delete().execute()
# test the same series of events using a reflected version of
# the table
m2 = MetaData(self.engine)
table = Table(table.name, m2, autoload=True)
def go():
table.insert().execute({'id': 30, 'data': 'd1'})
r = table.insert().execute({'data': 'd2'})
assert r.inserted_primary_key == [5]
table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32,
'data': 'd4'})
table.insert().execute({'data': 'd5'}, {'data': 'd6'})
table.insert(inline=True).execute({'id': 33, 'data': 'd7'})
table.insert(inline=True).execute({'data': 'd8'})
self.assert_sql(self.engine, go, [], with_sequences=[
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 30, 'data': 'd1'}),
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 5, 'data': 'd2'}),
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
('INSERT INTO testtable (data) VALUES (:data)', [{'data'
: 'd5'}, {'data': 'd6'}]),
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 33, 'data': 'd7'}]),
('INSERT INTO testtable (data) VALUES (:data)', [{'data'
: 'd8'}]),
])
assert table.select().execute().fetchall() == [
(30, 'd1'),
(5, 'd2'),
(31, 'd3'),
(32, 'd4'),
(6, 'd5'),
(7, 'd6'),
(33, 'd7'),
(8, 'd8'),
]
table.delete().execute()
def _assert_data_autoincrement_returning(self, table):
self.engine = \
engines.testing_engine(options={'implicit_returning': True})
metadata.bind = self.engine
def go():
# execute with explicit id
r = table.insert().execute({'id': 30, 'data': 'd1'})
assert r.inserted_primary_key == [30]
# execute with prefetch id
r = table.insert().execute({'data': 'd2'})
assert r.inserted_primary_key == [1]
# executemany with explicit ids
table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32,
'data': 'd4'})
# executemany, uses SERIAL
table.insert().execute({'data': 'd5'}, {'data': 'd6'})
# single execute, explicit id, inline
table.insert(inline=True).execute({'id': 33, 'data': 'd7'})
# single execute, inline, uses SERIAL
table.insert(inline=True).execute({'data': 'd8'})
self.assert_sql(self.engine, go, [], with_sequences=[
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 30, 'data': 'd1'}),
('INSERT INTO testtable (data) VALUES (:data) RETURNING '
'testtable.id', {'data': 'd2'}),
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
('INSERT INTO testtable (data) VALUES (:data)', [{'data'
: 'd5'}, {'data': 'd6'}]),
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 33, 'data': 'd7'}]),
('INSERT INTO testtable (data) VALUES (:data)', [{'data'
: 'd8'}]),
])
assert table.select().execute().fetchall() == [
(30, 'd1'),
(1, 'd2'),
(31, 'd3'),
(32, 'd4'),
(2, 'd5'),
(3, 'd6'),
(33, 'd7'),
(4, 'd8'),
]
table.delete().execute()
# test the same series of events using a reflected version of
# the table
m2 = MetaData(self.engine)
table = Table(table.name, m2, autoload=True)
def go():
table.insert().execute({'id': 30, 'data': 'd1'})
r = table.insert().execute({'data': 'd2'})
assert r.inserted_primary_key == [5]
table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32,
'data': 'd4'})
table.insert().execute({'data': 'd5'}, {'data': 'd6'})
table.insert(inline=True).execute({'id': 33, 'data': 'd7'})
table.insert(inline=True).execute({'data': 'd8'})
self.assert_sql(self.engine, go, [], with_sequences=[
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 30, 'data': 'd1'}),
('INSERT INTO testtable (data) VALUES (:data) RETURNING '
'testtable.id', {'data': 'd2'}),
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
('INSERT INTO testtable (data) VALUES (:data)', [{'data'
: 'd5'}, {'data': 'd6'}]),
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 33, 'data': 'd7'}]),
('INSERT INTO testtable (data) VALUES (:data)', [{'data'
: 'd8'}]),
])
assert table.select().execute().fetchall() == [
(30, 'd1'),
(5, 'd2'),
(31, 'd3'),
(32, 'd4'),
(6, 'd5'),
(7, 'd6'),
(33, 'd7'),
(8, 'd8'),
]
table.delete().execute()
def _assert_data_with_sequence(self, table, seqname):
self.engine = \
engines.testing_engine(options={'implicit_returning'
: False})
metadata.bind = self.engine
def go():
table.insert().execute({'id': 30, 'data': 'd1'})
table.insert().execute({'data': 'd2'})
table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32,
'data': 'd4'})
table.insert().execute({'data': 'd5'}, {'data': 'd6'})
table.insert(inline=True).execute({'id': 33, 'data': 'd7'})
table.insert(inline=True).execute({'data': 'd8'})
self.assert_sql(self.engine, go, [], with_sequences=[
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 30, 'data': 'd1'}),
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 1, 'data': 'd2'}),
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
("INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
":data)" % seqname, [{'data': 'd5'}, {'data': 'd6'}]),
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 33, 'data': 'd7'}]),
("INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
":data)" % seqname, [{'data': 'd8'}]),
])
assert table.select().execute().fetchall() == [
(30, 'd1'),
(1, 'd2'),
(31, 'd3'),
(32, 'd4'),
(2, 'd5'),
(3, 'd6'),
(33, 'd7'),
(4, 'd8'),
]
# cant test reflection here since the Sequence must be
# explicitly specified
def _assert_data_with_sequence_returning(self, table, seqname):
self.engine = \
engines.testing_engine(options={'implicit_returning': True})
metadata.bind = self.engine
def go():
table.insert().execute({'id': 30, 'data': 'd1'})
table.insert().execute({'data': 'd2'})
table.insert().execute({'id': 31, 'data': 'd3'}, {'id': 32,
'data': 'd4'})
table.insert().execute({'data': 'd5'}, {'data': 'd6'})
table.insert(inline=True).execute({'id': 33, 'data': 'd7'})
table.insert(inline=True).execute({'data': 'd8'})
self.assert_sql(self.engine, go, [], with_sequences=[
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
{'id': 30, 'data': 'd1'}),
("INSERT INTO testtable (id, data) VALUES "
"(nextval('my_seq'), :data) RETURNING testtable.id",
{'data': 'd2'}),
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 31, 'data': 'd3'}, {'id': 32, 'data': 'd4'}]),
("INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
":data)" % seqname, [{'data': 'd5'}, {'data': 'd6'}]),
('INSERT INTO testtable (id, data) VALUES (:id, :data)',
[{'id': 33, 'data': 'd7'}]),
("INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
":data)" % seqname, [{'data': 'd8'}]),
])
assert table.select().execute().fetchall() == [
(30, 'd1'),
(1, 'd2'),
(31, 'd3'),
(32, 'd4'),
(2, 'd5'),
(3, 'd6'),
(33, 'd7'),
(4, 'd8'),
]
# cant test reflection here since the Sequence must be
# explicitly specified
def _assert_data_noautoincrement(self, table):
self.engine = \
engines.testing_engine(options={'implicit_returning'
: False})
metadata.bind = self.engine
table.insert().execute({'id': 30, 'data': 'd1'})
if self.engine.driver == 'pg8000':
exception_cls = exc.ProgrammingError
elif self.engine.driver == 'pypostgresql':
exception_cls = Exception
else:
exception_cls = exc.IntegrityError
assert_raises_message(exception_cls,
'violates not-null constraint',
table.insert().execute, {'data': 'd2'})
assert_raises_message(exception_cls,
'violates not-null constraint',
table.insert().execute, {'data': 'd2'},
{'data': 'd3'})
assert_raises_message(exception_cls,
'violates not-null constraint',
table.insert().execute, {'data': 'd2'})
assert_raises_message(exception_cls,
'violates not-null constraint',
table.insert().execute, {'data': 'd2'},
{'data': 'd3'})
table.insert().execute({'id': 31, 'data': 'd2'}, {'id': 32,
'data': 'd3'})
table.insert(inline=True).execute({'id': 33, 'data': 'd4'})
assert table.select().execute().fetchall() == [(30, 'd1'), (31,
'd2'), (32, 'd3'), (33, 'd4')]
table.delete().execute()
# test the same series of events using a reflected version of
# the table
m2 = MetaData(self.engine)
table = Table(table.name, m2, autoload=True)
table.insert().execute({'id': 30, 'data': 'd1'})
assert_raises_message(exception_cls,
'violates not-null constraint',
table.insert().execute, {'data': 'd2'})
assert_raises_message(exception_cls,
'violates not-null constraint',
table.insert().execute, {'data': 'd2'},
{'data': 'd3'})
table.insert().execute({'id': 31, 'data': 'd2'}, {'id': 32,
'data': 'd3'})
table.insert(inline=True).execute({'id': 33, 'data': 'd4'})
assert table.select().execute().fetchall() == [(30, 'd1'), (31,
'd2'), (32, 'd3'), (33, 'd4')]
class ServerSideCursorsTest(fixtures.TestBase, AssertsExecutionResults):
__only_on__ = 'postgresql+psycopg2'
def _fixture(self, server_side_cursors):
self.engine = engines.testing_engine(
options={'server_side_cursors':server_side_cursors}
)
return self.engine
def tearDown(self):
engines.testing_reaper.close_all()
self.engine.dispose()
def test_global_string(self):
engine = self._fixture(True)
result = engine.execute('select 1')
assert result.cursor.name
def test_global_text(self):
engine = self._fixture(True)
result = engine.execute(text('select 1'))
assert result.cursor.name
def test_global_expr(self):
engine = self._fixture(True)
result = engine.execute(select([1]))
assert result.cursor.name
def test_global_off_explicit(self):
engine = self._fixture(False)
result = engine.execute(text('select 1'))
# It should be off globally ...
assert not result.cursor.name
def test_stmt_option(self):
engine = self._fixture(False)
s = select([1]).execution_options(stream_results=True)
result = engine.execute(s)
# ... but enabled for this one.
assert result.cursor.name
def test_conn_option(self):
engine = self._fixture(False)
# and this one
result = \
engine.connect().execution_options(stream_results=True).\
execute('select 1'
)
assert result.cursor.name
def test_stmt_enabled_conn_option_disabled(self):
engine = self._fixture(False)
s = select([1]).execution_options(stream_results=True)
# not this one
result = \
engine.connect().execution_options(stream_results=False).\
execute(s)
assert not result.cursor.name
def test_stmt_option_disabled(self):
engine = self._fixture(True)
s = select([1]).execution_options(stream_results=False)
result = engine.execute(s)
assert not result.cursor.name
def test_aliases_and_ss(self):
engine = self._fixture(False)
s1 = select([1]).execution_options(stream_results=True).alias()
result = engine.execute(s1)
assert result.cursor.name
# s1's options shouldn't affect s2 when s2 is used as a
# from_obj.
s2 = select([1], from_obj=s1)
result = engine.execute(s2)
assert not result.cursor.name
def test_for_update_expr(self):
engine = self._fixture(True)
s1 = select([1], for_update=True)
result = engine.execute(s1)
assert result.cursor.name
def test_for_update_string(self):
engine = self._fixture(True)
result = engine.execute('SELECT 1 FOR UPDATE')
assert result.cursor.name
def test_text_no_ss(self):
engine = self._fixture(False)
s = text('select 42')
result = engine.execute(s)
assert not result.cursor.name
def test_text_ss_option(self):
engine = self._fixture(False)
s = text('select 42').execution_options(stream_results=True)
result = engine.execute(s)
assert result.cursor.name
def test_roundtrip(self):
engine = self._fixture(True)
test_table = Table('test_table', MetaData(engine),
Column('id', Integer, primary_key=True),
Column('data', String(50)))
test_table.create(checkfirst=True)
try:
test_table.insert().execute(data='data1')
nextid = engine.execute(Sequence('test_table_id_seq'))
test_table.insert().execute(id=nextid, data='data2')
eq_(test_table.select().execute().fetchall(), [(1, 'data1'
), (2, 'data2')])
test_table.update().where(test_table.c.id
== 2).values(data=test_table.c.data + ' updated'
).execute()
eq_(test_table.select().execute().fetchall(), [(1, 'data1'
), (2, 'data2 updated')])
test_table.delete().execute()
eq_(test_table.count().scalar(), 0)
finally:
test_table.drop(checkfirst=True)
class MatchTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'postgresql >= 8.3'
@classmethod
def setup_class(cls):
global metadata, cattable, matchtable
metadata = MetaData(testing.db)
cattable = Table('cattable', metadata, Column('id', Integer,
primary_key=True), Column('description',
String(50)))
matchtable = Table('matchtable', metadata, Column('id',
Integer, primary_key=True), Column('title',
String(200)), Column('category_id', Integer,
ForeignKey('cattable.id')))
metadata.create_all()
cattable.insert().execute([{'id': 1, 'description': 'Python'},
{'id': 2, 'description': 'Ruby'}])
matchtable.insert().execute([{'id': 1, 'title'
: 'Agile Web Development with Rails'
, 'category_id': 2},
{'id': 2,
'title': 'Dive Into Python',
'category_id': 1},
{'id': 3, 'title'
: "Programming Matz's Ruby",
'category_id': 2},
{'id': 4, 'title'
: 'The Definitive Guide to Django',
'category_id': 1},
{'id': 5, 'title'
: 'Python in a Nutshell',
'category_id': 1}])
@classmethod
def teardown_class(cls):
metadata.drop_all()
@testing.fails_on('postgresql+pg8000', 'uses positional')
@testing.fails_on('postgresql+zxjdbc', 'uses qmark')
def test_expression_pyformat(self):
self.assert_compile(matchtable.c.title.match('somstr'),
'matchtable.title @@ to_tsquery(%(title_1)s'
')')
@testing.fails_on('postgresql+psycopg2', 'uses pyformat')
@testing.fails_on('postgresql+pypostgresql', 'uses pyformat')
@testing.fails_on('postgresql+zxjdbc', 'uses qmark')
def test_expression_positional(self):
self.assert_compile(matchtable.c.title.match('somstr'),
'matchtable.title @@ to_tsquery(%s)')
def test_simple_match(self):
results = \
matchtable.select().where(matchtable.c.title.match('python'
)).order_by(matchtable.c.id).execute().fetchall()
eq_([2, 5], [r.id for r in results])
def test_simple_match_with_apostrophe(self):
results = \
matchtable.select().where(matchtable.c.title.match("Matz's"
)).execute().fetchall()
eq_([3], [r.id for r in results])
def test_simple_derivative_match(self):
results = \
matchtable.select().where(matchtable.c.title.match('nutshells'
)).execute().fetchall()
eq_([5], [r.id for r in results])
def test_or_match(self):
results1 = \
matchtable.select().where(or_(matchtable.c.title.match('nutshells'
), matchtable.c.title.match('rubies'
))).order_by(matchtable.c.id).execute().fetchall()
eq_([3, 5], [r.id for r in results1])
results2 = \
matchtable.select().where(
matchtable.c.title.match('nutshells | rubies'
)).order_by(matchtable.c.id).execute().fetchall()
eq_([3, 5], [r.id for r in results2])
def test_and_match(self):
results1 = \
matchtable.select().where(and_(matchtable.c.title.match('python'
), matchtable.c.title.match('nutshells'
))).execute().fetchall()
eq_([5], [r.id for r in results1])
results2 = \
matchtable.select().where(
matchtable.c.title.match('python & nutshells'
)).execute().fetchall()
eq_([5], [r.id for r in results2])
def test_match_across_joins(self):
results = matchtable.select().where(and_(cattable.c.id
== matchtable.c.category_id,
or_(cattable.c.description.match('Ruby'),
matchtable.c.title.match('nutshells'
)))).order_by(matchtable.c.id).execute().fetchall()
eq_([1, 3, 5], [r.id for r in results])
class TupleTest(fixtures.TestBase):
__only_on__ = 'postgresql'
def test_tuple_containment(self):
for test, exp in [
([('a', 'b')], True),
([('a', 'c')], False),
([('f', 'q'), ('a', 'b')], True),
([('f', 'q'), ('a', 'c')], False)
]:
eq_(
testing.db.execute(
select([
tuple_(
literal_column("'a'"),
literal_column("'b'")
).\
in_([
tuple_(*[
literal_column("'%s'" % letter)
for letter in elem
]) for elem in test
])
])
).scalar(),
exp
)
class ExtractTest(fixtures.TablesTest):
"""The rationale behind this test is that for many years we've had a system
of embedding type casts into the expressions rendered by visit_extract()
on the postgreql platform. The reason for this cast is not clear.
So here we try to produce a wide range of cases to ensure that these casts
are not needed; see [ticket:2740].
"""
__only_on__ = 'postgresql'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('t', metadata,
Column('id', Integer, primary_key=True),
Column('dtme', DateTime),
Column('dt', Date),
Column('tm', Time),
Column('intv', postgresql.INTERVAL),
Column('dttz', DateTime(timezone=True))
)
@classmethod
def insert_data(cls):
# TODO: why does setting hours to anything
# not affect the TZ in the DB col ?
class TZ(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=4)
conn = testing.db.connect()
# we aren't resetting this at the moment but we don't have
# any other tests that are TZ specific
conn.execute("SET SESSION TIME ZONE 0")
conn.execute(
cls.tables.t.insert(),
{
'dtme': datetime.datetime(2012, 5, 10, 12, 15, 25),
'dt': datetime.date(2012, 5, 10),
'tm': datetime.time(12, 15, 25),
'intv': datetime.timedelta(seconds=570),
'dttz': datetime.datetime(2012, 5, 10, 12, 15, 25, tzinfo=TZ())
},
)
def _test(self, expr, field="all", overrides=None):
t = self.tables.t
if field == "all":
fields = {"year": 2012, "month": 5, "day": 10,
"epoch": 1336652125.0,
"hour": 12, "minute": 15}
elif field == "time":
fields = {"hour": 12, "minute": 15, "second": 25}
elif field == 'date':
fields = {"year": 2012, "month": 5, "day": 10}
elif field == 'all+tz':
fields = {"year": 2012, "month": 5, "day": 10,
"epoch": 1336637725.0,
"hour": 8,
"timezone": 0
}
else:
fields = field
if overrides:
fields.update(overrides)
for field in fields:
result = testing.db.scalar(
select([extract(field, expr)]).select_from(t))
eq_(result, fields[field])
def test_one(self):
t = self.tables.t
self._test(t.c.dtme, "all")
def test_two(self):
t = self.tables.t
self._test(t.c.dtme + t.c.intv,
overrides={"epoch": 1336652695.0, "minute": 24})
def test_three(self):
t = self.tables.t
actual_ts = testing.db.scalar(func.current_timestamp()) - \
datetime.timedelta(days=5)
self._test(func.current_timestamp() - datetime.timedelta(days=5),
{"hour": actual_ts.hour, "year": actual_ts.year,
"month": actual_ts.month}
)
def test_four(self):
t = self.tables.t
self._test(datetime.timedelta(days=5) + t.c.dt,
overrides={"day": 15, "epoch": 1337040000.0, "hour": 0,
"minute": 0}
)
def test_five(self):
t = self.tables.t
self._test(func.coalesce(t.c.dtme, func.current_timestamp()),
overrides={"epoch": 1336652125.0})
def test_six(self):
t = self.tables.t
self._test(t.c.tm + datetime.timedelta(seconds=30), "time",
overrides={"second": 55})
def test_seven(self):
self._test(literal(datetime.timedelta(seconds=10))
- literal(datetime.timedelta(seconds=10)), "all",
overrides={"hour": 0, "minute": 0, "month": 0,
"year": 0, "day": 0, "epoch": 0})
def test_eight(self):
t = self.tables.t
self._test(t.c.tm + datetime.timedelta(seconds=30),
{"hour": 12, "minute": 15, "second": 55})
def test_nine(self):
self._test(text("t.dt + t.tm"))
def test_ten(self):
t = self.tables.t
self._test(t.c.dt + t.c.tm)
def test_eleven(self):
self._test(func.current_timestamp() - func.current_timestamp(),
{"year": 0, "month": 0, "day": 0, "hour": 0}
)
def test_twelve(self):
t = self.tables.t
actual_ts = testing.db.scalar(
func.current_timestamp()).replace(tzinfo=None) - \
datetime.datetime(2012, 5, 10, 12, 15, 25)
self._test(func.current_timestamp() - func.coalesce(t.c.dtme,
func.current_timestamp()),
{"day": actual_ts.days}
)
def test_thirteen(self):
t = self.tables.t
self._test(t.c.dttz, "all+tz")
def test_fourteen(self):
t = self.tables.t
self._test(t.c.tm, "time")
def test_fifteen(self):
t = self.tables.t
self._test(datetime.timedelta(days=5) + t.c.dtme,
overrides={"day": 15, "epoch": 1337084125.0}
)
|
mit
|
ryneeverett/mezzanine
|
mezzanine/utils/cache.py
|
3
|
4108
|
from __future__ import unicode_literals
from hashlib import md5
from inspect import getmro
from time import time
from django.core.cache import cache
from django.utils.lru_cache import lru_cache
from django.utils.cache import _i18n_cache_key_suffix
from django.utils.module_loading import import_string
from mezzanine.conf import settings
from mezzanine.utils.deprecation import get_middleware_setting
from mezzanine.utils.device import device_from_request
from mezzanine.utils.sites import current_site_id
def _hashed_key(key):
"""
Hash keys when talking directly to the cache API, to avoid
keys longer than the backend supports (eg memcache limit is 255)
"""
return md5(key.encode("utf-8")).hexdigest()
def cache_set(key, value, timeout=None, refreshed=False):
"""
Wrapper for ``cache.set``. Stores the cache entry packed with
the desired cache expiry time. When the entry is retrieved from
cache, the packed expiry time is also checked, and if past,
the stale cache entry is stored again with an expiry that has
``CACHE_SET_DELAY_SECONDS`` added to it. In this case the entry
is not returned, so that a cache miss occurs and the entry
should be set by the caller, but all other callers will still get
the stale entry, so no real cache misses ever occur.
"""
if timeout is None:
timeout = settings.CACHE_MIDDLEWARE_SECONDS
refresh_time = timeout + time()
real_timeout = timeout + settings.CACHE_SET_DELAY_SECONDS
packed = (value, refresh_time, refreshed)
return cache.set(_hashed_key(key), packed, real_timeout)
def cache_get(key):
"""
Wrapper for ``cache.get``. The expiry time for the cache entry
is stored with the entry. If the expiry time has past, put the
stale entry back into cache, and don't return it to trigger a
fake cache miss.
"""
packed = cache.get(_hashed_key(key))
if packed is None:
return None
value, refresh_time, refreshed = packed
if (time() > refresh_time) and not refreshed:
cache_set(key, value, settings.CACHE_SET_DELAY_SECONDS, True)
return None
return value
@lru_cache(maxsize=None)
def cache_installed():
"""
Returns ``True`` if a cache backend is configured, and the
cache middleware classes or subclasses thereof are present.
This will be evaluated once per run, and then cached.
"""
has_key = bool(getattr(settings, "NEVERCACHE_KEY", ""))
def flatten(seqs):
return (item for seq in seqs for item in seq)
middleware_classes = map(import_string, get_middleware_setting())
middleware_ancestors = set(flatten(map(getmro, middleware_classes)))
mezzanine_cache_middleware_classes = {
import_string("mezzanine.core.middleware.UpdateCacheMiddleware"),
import_string("mezzanine.core.middleware.FetchFromCacheMiddleware"),
}
return (has_key and settings.CACHES and not settings.TESTING and
mezzanine_cache_middleware_classes.issubset(middleware_ancestors))
def cache_key_prefix(request):
"""
Cache key for Mezzanine's cache middleware. Adds the current
device and site ID.
"""
cache_key = "%s.%s.%s." % (
settings.CACHE_MIDDLEWARE_KEY_PREFIX,
current_site_id(),
device_from_request(request) or "default",
)
return _i18n_cache_key_suffix(request, cache_key)
def nevercache_token():
"""
Returns the secret token that delimits content wrapped in
the ``nevercache`` template tag.
"""
return "nevercache." + settings.NEVERCACHE_KEY
def add_cache_bypass(url):
"""
Adds the current time to the querystring of the URL to force a
cache reload. Used for when a form post redirects back to a
page that should display updated content, such as new comments or
ratings.
"""
if not cache_installed():
return url
hash_str = ""
if "#" in url:
url, hash_str = url.split("#", 1)
hash_str = "#" + hash_str
url += "?" if "?" not in url else "&"
return url + "t=" + str(time()).replace(".", "") + hash_str
|
bsd-2-clause
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/poamsa/package.py
|
3
|
2159
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Poamsa(MakefilePackage):
"""POA is Partial Order Alignment, a fast program for multiple sequence
alignment in bioinformatics. Its advantages are speed, scalability,
sensitivity, and the superior ability to handle branching / indels
in the alignment."""
homepage = "https://sourceforge.net/projects/poamsa"
url = "https://downloads.sourceforge.net/project/poamsa/poamsa/2.0/poaV2.tar.gz"
version('2.0', '9e2eb270d4867114406f53dab1311b2b')
def url_for_version(self, version):
url = "https://downloads.sourceforge.net/project/poamsa/poamsa/{0}/poaV{1}.tar.gz"
return url.format(version.dotted, version.up_to(1))
def build(self, spec, prefix):
make('poa')
def install(self, spec, prefix):
mkdirp(prefix.bin)
mkdirp(prefix.lib)
install('poa', prefix.bin)
install('liblpo.a', prefix.lib)
|
lgpl-2.1
|
UC3Music/songbook-tools
|
song-directory-transpose.py
|
1
|
9369
|
#!/usr/bin/env python
import sys, os
import mmap # Thanks Steven @ http://stackoverflow.com/questions/4940032/search-for-string-in-txt-file-python
import subprocess
import readline
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.parse_and_bind("set match-hidden-files off")
import argparse
import re
from pychord import Chord
from pychord import ChordProgression
globalHalfTones = 0
songHalfTones = 0
applyCapoDropCorrection = True
def query(question, default, skipQuery=False):
if skipQuery:
return default
sys.stdout.write(question + " [" + default + "] ? ")
choice = input()
if choice == '':
return default
return choice
def recursivelyProcessBlockWithParenthesisAndExceptionsTreated( stringToProcess, processed):
global songHalfTones
#print('String to process "' + stringToProcess + '".')
afterSplit = re.split(" |_|!|\.\.\.|\.\.|: |\*|high|open|bass|riff|palm mute|notes|madd11/|m7add11/|7sus2|8|m7b5|madd13|add13", stringToProcess, 1) # 3rd parameter is maxsplit # Also works with single space, do this to catch faulty txt.
#print('* Split by delimiters "' + str(afterSplit) + '".')
#print('songHalfTones:',songHalfTones)
if len(afterSplit[0]) != 0:
chord = Chord(afterSplit[0])
#print('* Extracted "' + chord.chord + '" chord.')
chord.transpose( songHalfTones, "C#" )
#print('* Transposed to "' + chord.chord + '" chord.')
processed += chord.chord
#print('* Processed after chord "' + processed + '".')
#else:
#print('* No chord to extract.')
if len(afterSplit) == 1:
return processed
delimiterWas = ''
if len(afterSplit[1]) == 0:
delimiterWas = stringToProcess[len(afterSplit[0]):]
else:
delimiterWas = stringToProcess[len(afterSplit[0]):-len(afterSplit[1])]
#print('* Delimiter was "' + delimiterWas + '".')
processed += delimiterWas
#print('* Processed after delimiter "' + processed + '".')
#print('* Still must process "' + afterSplit[1] + '".')
return recursivelyProcessBlockWithParenthesisAndExceptionsTreated( afterSplit[1], processed )
def processBlockWithParenthesis(matchobj):
global songHalfTones
# Print for debugging purposes: what is being treated
print("--- " + matchobj.group(0))
# Treat exceptions that are simply skipped and return
if matchobj.group(0).find("(bpm") != -1:
return matchobj.group(0)
if matchobj.group(0).find("(tempo") != -1:
return matchobj.group(0)
if matchobj.group(0).find("(key") != -1:
return matchobj.group(0)
if matchobj.group(0).find("(all") != -1:
return matchobj.group(0)
# Treat exception that affects songHalfTones and returns: capo
if matchobj.group(0).find("capo") != -1:
if applyCapoDropCorrection:
m = matchobj.group(0)
got = re.findall('\d+', m)
if len(got) != 1:
print('*** ERROR (len(got) != 1)')
quit()
print('*** capo:',int(got[0]))
songHalfTones += int(got[0])
print('*** new songHalfTones:',songHalfTones)
# Print for debugging purposes: info on modification and original source
betweenParenthesis = matchobj.group(0).replace("(","").replace(")","")
print("+++ (chords for no capo; generated from " + betweenParenthesis + ")")
return "(chords for no capo; generated from " + betweenParenthesis + ")"
else:
return matchobj.group(0)
# Treat exception that affects songHalfTones and returns: drop
if matchobj.group(0).find("drop") != -1:
if applyCapoDropCorrection:
m = matchobj.group(0)
got = re.findall('\d+', m)
if len(got) != 1:
print('*** ERROR (len(got) != 1)')
quit()
print('*** drop:',int(got[0]))
songHalfTones -= int(got[0])
print('*** new songHalfTones:',songHalfTones)
# Print for debugging purposes: info on modification and original source
betweenParenthesis = matchobj.group(0).replace("(","").replace(")","")
print("+++ (chords for no drop; generated from " + betweenParenthesis + ")")
return "(chords for no drop; generated from " + betweenParenthesis + ")"
else:
return matchobj.group(0)
# Get betweenParenthesis and call actual process:
betweenParenthesis = matchobj.group(0).replace("(","").replace(")","")
final = recursivelyProcessBlockWithParenthesisAndExceptionsTreated( betweenParenthesis, "" )
# Print for debugging purposes: final after processing betweenParenthesis
print("+++ " + "(" + final + ")")
return "(" + final + ")"
class MyArgumentDefaultsHelpFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
return text.splitlines()
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: "%(default)s")'
return help
if __name__ == '__main__':
print("-----------------------------------")
print("Welcome to song-directory-transpose")
print("-----------------------------------")
parser = argparse.ArgumentParser(formatter_class = MyArgumentDefaultsHelpFormatter)
parser.add_argument('--input',
help='path of the default song input directory',
default='examples/')
parser.add_argument('--output',
help='path of the default song output directory',
default='out/')
parser.add_argument('--transpose',
help='half tones of transposition',
default='0')
parser.add_argument('--capoDropCorrection',
help='if automatic capo/drop correction should be applied',
default='yes')
parser.add_argument('--yes',
help='accept all, skip all queries',
nargs='?',
default='NULL') # required, see below
args = parser.parse_args()
skipQueries = False
if args.yes is not 'NULL': # if exists and no contents, replaces 'NULL' by None
print("Detected --yes parameter: will skip queries")
skipQueries = True
# Query the path of the song input directory
inputDirectory = query("Please specify the path of the song input directory", args.input, skipQueries)
print("Will use song input directory: " + inputDirectory)
# Query the path of the song output directory
outputDirectory = query("Please specify the path of the song output directory", args.output, skipQueries)
if os.path.isdir(outputDirectory):
yesNo = query('Path "' + outputDirectory + '" already exists, are you sure (confirm with "y" or "yes" without quotes)', 'yes', skipQueries)
if yesNo != "yes" and yesNo != "y":
print("Ok, bye!")
quit()
else:
print("Will use (existing) song output directory: " + outputDirectory)
else:
os.makedirs(outputDirectory)
print("Will use (newly created) song output directory: " + outputDirectory)
# Query transposition
globalHalfTones = int( query("Please specify half tones of transposition (e.g. 7 or -5 for soprano ukelele and guitalele)", args.transpose, skipQueries) )
print("Will use half tones of transposition: " + str(globalHalfTones))
# Query capoDropCorrection
while True:
yesNo = query('Apply capo/drop correction (answer with "y"/"yes" or "n"/"no" without quotes)?', args.capoDropCorrection, skipQueries)
if yesNo == "yes" or yesNo == "y":
print("Will apply capo/drop correction")
applyCapoDropCorrection = True
break
elif yesNo == "no" or yesNo == "n":
print("Will not apply capo/drop correction")
applyCapoDropCorrection = False
break
print("----------------------")
for dirname, dirnames, filenames in os.walk(inputDirectory):
for filename in sorted(filenames):
songHalfTones = globalHalfTones
#debug
print(filename)
print('*** songHalfTones:',songHalfTones)
name, extension = os.path.splitext(filename)
songIn = open( os.path.join(dirname, filename), encoding="utf8" )
songOut = open(os.path.join(outputDirectory, filename), "w", encoding="utf8")
contents = ""
if globalHalfTones != 0:
contents += "(all chords have been pre-transposed " + str(globalHalfTones) + " semitones)" + os.linesep + os.linesep
print("+++ (all chords have been pre-transposed " + str(globalHalfTones) + " semitones)")
contents += songIn.read()
contents = re.sub("\([^)]*\)", processBlockWithParenthesis, contents) # line that really does it
songOut.write(contents)
songOut.close()
songIn.close()
|
unlicense
|
tjduigna/exatomic
|
exatomic/qe/pw/classes.py
|
10
|
2496
|
# -*- coding: utf-8 -*-
#'''
#Classes for pw.x
#=======================================
#'''
#import pprint
#from exatomic import Universe
#from exatomic.frame import _min_frame_from_atom
#from exqe.classes import classes
#
#
#class PWInput:
# '''
# '''
# _order = ['control', 'system', 'electrons', 'ions', 'cell', 'atomic_species',
# 'atomic_positions', 'k_points', 'cell_parameters', 'occupations',
# 'constraints', 'atomic_forces']
#
# def to_universe(self):
# '''
# '''
# atom = self.atomic_positions
# atom['frame'] = 0
# atom['label'] = range(len(atom))
# atom.index.names = ['atom']
# frame = _min_frame_from_atom(atom)
# return Universe(frame=frame, atom=atom)
#
# def __str__(self):
# '''
# '''
# blocks = []
# for block_name in self._order:
# block = self[block_name]
# if block is not None:
# blocks.append(str(block))
# return '\n'.join(blocks)
#
# def __getitem__(self, key):
# return self.__dict__[key]
#
# def __setitem__(self, key, value):
# setattr(self, key, value)
#
# def __init__(self, control=None, system=None, electrons=None, ions=None,
# cell=None, atomic_species=None, atomic_positions=None,
# k_points=None, cell_parameters=None, occupations=None,
# constraints=None, atomic_forces=None):
# '''
# '''
# self.control = control
# self.system = system
# self.electrons = electrons
# self.ions = ions
# self.cell = cell
# self.atomic_species = atomic_species
# self.atomic_positions = atomic_positions
# self.k_points = k_points
# self.cell_parameters = cell_parameters
# self.occupations = occupations
# self.constraints = constraints
# self.atomic_forces = atomic_forces
#
# def __repr__(self):
# obj = str(self).split('\n')
# pprint.pprint(obj, indent=0, width=128)
# return 'PWInput(len: {0})'.format(len(obj))
#
#
#class PWOutput:
# '''
# '''
# def to_universe(self):
# uni = Universe(atom=self.atom, frame=self.frame)
# return uni
#
# def __init__(self, timings=None, atom=None, frame=None, scf=None,
# orbital=None):
# self.timings = timings
# self.atom = atom
# self.frame = frame
# self.scf = scf
# self.orbital = orbital
#
|
apache-2.0
|
edmundgentle/schoolscript
|
SchoolScript/bin/Debug/pythonlib/Lib/lib2to3/fixes/fix_isinstance.py
|
5
|
1660
|
# Copyright 2008 Armin Ronacher.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that cleans up a tuple argument to isinstance after the tokens
in it were fixed. This is mainly used to remove double occurrences of
tokens as a leftover of the long -> int / unicode -> str conversion.
eg. isinstance(x, (int, long)) -> isinstance(x, (int, int))
-> isinstance(x, int)
"""
from .. import fixer_base
from ..fixer_util import token
class FixIsinstance(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power<
'isinstance'
trailer< '(' arglist< any ',' atom< '('
args=testlist_gexp< any+ >
')' > > ')' >
>
"""
run_order = 6
def transform(self, node, results):
names_inserted = set()
testlist = results["args"]
args = testlist.children
new_args = []
iterator = enumerate(args)
for idx, arg in iterator:
if arg.type == token.NAME and arg.value in names_inserted:
if idx < len(args) - 1 and args[idx + 1].type == token.COMMA:
next(iterator)
continue
else:
new_args.append(arg)
if arg.type == token.NAME:
names_inserted.add(arg.value)
if new_args and new_args[-1].type == token.COMMA:
del new_args[-1]
if len(new_args) == 1:
atom = testlist.parent
new_args[0].prefix = atom.prefix
atom.replace(new_args[0])
else:
args[:] = new_args
node.changed()
|
gpl-2.0
|
hkchenhongyi/django
|
django/contrib/gis/sitemaps/kml.py
|
398
|
2544
|
from django.apps import apps
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.sitemaps import Sitemap
from django.core import urlresolvers
from django.db import models
class KMLSitemap(Sitemap):
"""
A minimal hook to produce KML sitemaps.
"""
geo_format = 'kml'
def __init__(self, locations=None):
# If no locations specified, then we try to build for
# every model in installed applications.
self.locations = self._build_kml_sources(locations)
def _build_kml_sources(self, sources):
"""
Goes through the given sources and returns a 3-tuple of
the application label, module name, and field name of every
GeometryField encountered in the sources.
If no sources are provided, then all models.
"""
kml_sources = []
if sources is None:
sources = apps.get_models()
for source in sources:
if isinstance(source, models.base.ModelBase):
for field in source._meta.fields:
if isinstance(field, GeometryField):
kml_sources.append((source._meta.app_label,
source._meta.model_name,
field.name))
elif isinstance(source, (list, tuple)):
if len(source) != 3:
raise ValueError('Must specify a 3-tuple of (app_label, module_name, field_name).')
kml_sources.append(source)
else:
raise TypeError('KML Sources must be a model or a 3-tuple.')
return kml_sources
def get_urls(self, page=1, site=None, protocol=None):
"""
This method is overrridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site, protocol=protocol)
for url in urls:
url['geo_format'] = self.geo_format
return urls
def items(self):
return self.locations
def location(self, obj):
return urlresolvers.reverse('django.contrib.gis.sitemaps.views.%s' % self.geo_format,
kwargs={'label': obj[0],
'model': obj[1],
'field_name': obj[2],
}
)
class KMZSitemap(KMLSitemap):
geo_format = 'kmz'
|
bsd-3-clause
|
ben-hopps/nupic
|
tests/unit/nupic/regions/record_sensor_region_test.py
|
35
|
3069
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for the RecordSensor region."""
import numpy
import unittest2 as unittest
from nupic.engine import Network
from nupic.regions.RecordSensor import RecordSensor
class RecordSensorRegionTest(unittest.TestCase):
"""RecordSensor region unit tests."""
def testVaryingNumberOfCategories(self):
# Setup network with sensor; max number of categories = 2
net = Network()
sensorRegion = net.addRegion(
"sensor", "py.RecordSensor", "{'numCategories': 2}")
sensor = sensorRegion.getSelf()
# Test for # of output categories = max
data = {"_timestamp": None, "_category": [0, 1], "label": "0 1",
"_sequenceId": 0, "y": 2.624902024, "x": 0.0,
"_timestampRecordIdx": None, "_reset": 0}
sensorOutput = numpy.array([0, 0], dtype="int32")
sensor.populateCategoriesOut(data["_category"], sensorOutput)
self.assertSequenceEqual([0, 1], sensorOutput.tolist(),
"Sensor failed to populate the array with record of two categories.")
# Test for # of output categories > max
data["_category"] = [1, 2, 3]
sensorOutput = numpy.array([0, 0], dtype="int32")
sensor.populateCategoriesOut(data["_category"], sensorOutput)
self.assertSequenceEqual([1, 2], sensorOutput.tolist(),
"Sensor failed to populate the array w/ record of three categories.")
# Test for # of output categories < max
data["_category"] = [3]
sensorOutput = numpy.array([0, 0], dtype="int32")
sensor.populateCategoriesOut(data["_category"], sensorOutput)
self.assertSequenceEqual([3, -1], sensorOutput.tolist(),
"Sensor failed to populate the array w/ record of one category.")
# Test for no output categories
data["_category"] = [None]
sensorOutput = numpy.array([0, 0], dtype="int32")
sensor.populateCategoriesOut(data["_category"], sensorOutput)
self.assertSequenceEqual([-1, -1], sensorOutput.tolist(),
"Sensor failed to populate the array w/ record of zero categories.")
if __name__ == "__main__":
unittest.main()
|
agpl-3.0
|
JshWright/home-assistant
|
homeassistant/components/automation/event.py
|
2
|
1901
|
"""
Offer event listening automation rules.
For more details about this automation rule, please refer to the documentation
at https://home-assistant.io/docs/automation/trigger/#event-trigger
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.core import callback, CoreState
from homeassistant.const import CONF_PLATFORM, EVENT_HOMEASSISTANT_START
from homeassistant.helpers import config_validation as cv
CONF_EVENT_TYPE = 'event_type'
CONF_EVENT_DATA = 'event_data'
_LOGGER = logging.getLogger(__name__)
TRIGGER_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'event',
vol.Required(CONF_EVENT_TYPE): cv.string,
vol.Optional(CONF_EVENT_DATA): dict,
})
@asyncio.coroutine
def async_trigger(hass, config, action):
"""Listen for events based on configuration."""
event_type = config.get(CONF_EVENT_TYPE)
event_data = config.get(CONF_EVENT_DATA)
if (event_type == EVENT_HOMEASSISTANT_START and
hass.state == CoreState.starting):
_LOGGER.warning('Deprecation: Automations should not listen to event '
"'homeassistant_start'. Use platform 'homeassistant' "
'instead. Feature will be removed in 0.45')
hass.async_run_job(action, {
'trigger': {
'platform': 'event',
'event': None,
},
})
return lambda: None
@callback
def handle_event(event):
"""Listen for events and calls the action when data matches."""
if not event_data or all(val == event.data.get(key) for key, val
in event_data.items()):
hass.async_run_job(action, {
'trigger': {
'platform': 'event',
'event': event,
},
})
return hass.bus.async_listen(event_type, handle_event)
|
apache-2.0
|
roadhead/satchmo
|
satchmo/discount/templatetags/satchmo_discounts.py
|
2
|
6729
|
try:
from decimal import Decimal
except:
from django.utils._decimal import Decimal
from django import template
from django.conf import settings
from django.core import urlresolvers
from django.template import Context, Template
from django.utils.translation import get_language, ugettext_lazy as _
from satchmo.configuration import config_value
from satchmo.discount.utils import calc_by_percentage, find_best_auto_discount
from satchmo.shop.templatetags import get_filter_args
from satchmo.tax.templatetags import satchmo_tax
import logging
log = logging.getLogger('discount')
register = template.Library()
def sale_price(product):
"""Returns the sale price, including tax if that is the default."""
if config_value('TAX', 'DEFAULT_VIEW_TAX'):
return taxed_sale_price(product)
else:
return untaxed_sale_price(product)
register.filter('sale_price', sale_price)
def untaxed_sale_price(product):
"""Returns the product unit price with the best auto discount applied."""
discount = find_best_auto_discount(product)
price = product.unit_price
if discount and discount.valid_for_product(cartitem.product):
price = calc_by_percentage(price, disc.percentage)
return price
register.filter('untaxed_sale_price', sale_price)
def taxed_sale_price(product):
"""Returns the product unit price with the best auto discount applied and taxes included."""
taxer = satchmo_tax._get_taxprocessor()
price = sale_price(product)
price = price + taxer.by_price(product.taxClass, price)
return price
register.filter('taxed_sale_price', sale_price)
def discount_cart_total(cart, discount):
"""Returns the discounted total for this cart, with tax if that is the default."""
if config_value('TAX', 'DEFAULT_VIEW_TAX'):
return taxed_discount_cart_total(cart, discount)
else:
return untaxed_discount_cart_total(cart, discount)
register.filter('discount_cart_total', discount_cart_total)
def untaxed_discount_cart_total(cart, discount):
"""Returns the discounted total for this cart"""
if discount:
total = Decimal('0.00')
for item in cart:
total += discount_line_total(item, discount)
else:
total = cart.total
return total
register.filter('untaxed_discount_cart_total', discount_cart_total)
def taxed_discount_cart_total(cart, discount):
"""Returns the discounted total for this cart with taxes included"""
if discount:
total = Decimal('0.00')
for item in cart:
total += taxed_discount_line_total(item, discount)
else:
total = discount_cart_total(cart, discount)
return total
register.filter('taxed_discount_cart_total', discount_cart_total)
def discount_line_total(cartitem, discount):
"""Returns the discounted line total for this cart item, including tax if that is the default."""
if config_value('TAX', 'DEFAULT_VIEW_TAX'):
return taxed_discount_line_total(cartitem, discount)
else:
return untaxed_discount_line_total(cartitem, discount)
register.filter('discount_line_total', discount_line_total)
def untaxed_discount_line_total(cartitem, discount):
"""Returns the discounted line total for this cart item"""
price = cartitem.line_total
if discount and discount.valid_for_product(cartitem.product):
price = calc_by_percentage(price, discount.percentage)
return price
register.filter('untaxed_discount_line_total', discount_line_total)
def taxed_discount_line_total(cartitem, discount):
"""Returns the discounted line total for this cart item with taxes included."""
price = discount_line_total(cartitem, discount)
taxer = satchmo_tax._get_taxprocessor()
price = price + taxer.by_price(cartitem.product.taxClass, price)
return price
register.filter('taxed_discount_line_total', discount_line_total)
def discount_price(product, discount):
"""Returns the product price with the discount applied, including tax if that is the default.
Ex: product|discount_price:sale
"""
if config_value('TAX', 'DEFAULT_VIEW_TAX'):
return taxed_discount_price(product, discount)
else:
return untaxed_discount_price(product, discount)
register.filter('discount_price', discount_price)
def untaxed_discount_price(product, discount):
"""Returns the product price with the discount applied.
Ex: product|discount_price:sale
"""
up = product.unit_price
if discount and discount.valid_for_product(product):
pcnt = calc_by_percentage(up, discount.percentage)
return pcnt
else:
return up
register.filter('untaxed_discount_price', discount_price)
def taxed_discount_price(product, discount):
"""Returns the product price with the discount applied, and taxes included.
Ex: product|discount_price:sale
"""
price = discount_price(product, discount)
taxer = satchmo_tax._get_taxprocessor()
price = price + taxer.by_price(product.taxClass, price)
register.filter('taxed_discount_price', discount_price)
def discount_ratio(discount):
"""Returns the discount as a ratio, making sure that the percent is under 1"""
pcnt = discount.percentage
if pcnt > 1:
pcnt = pcnt/100
return 1-pcnt
register.filter('discount_ratio', discount_ratio)
def discount_saved(product, discount):
"""Returns the amount saved by the discount, including tax if that is the default."""
if config_value('TAX', 'DEFAULT_VIEW_TAX'):
return taxed_discount_saved(product, discount)
else:
return untaxed_discount_saved(product, discount)
register.filter('discount_saved', discount_saved)
def untaxed_discount_saved(product, discount):
"""Returns the amount saved by the discount"""
if discount and discount.valid_for_product(product):
price = product.unit_price
discounted = calc_by_percentage(price, discount.percentage)
saved = price-discounted
cents = Decimal("0.01")
return saved.quantize(cents)
else:
return Decimal('0.00')
register.filter('untaxed_discount_saved', discount_saved)
def taxed_discount_saved(product, discount):
"""Returns the amount saved by the discount, after applying taxes."""
if discount and discount.valid_for_product(product):
price = product.unit_price
discounted = taxed_discount_price(product, discount)
saved = price-discounted
cents = Decimal("0.01")
return saved.quantize(cents)
else:
return Decimal('0.00')
register.filter('taxed_discount_saved', discount_saved)
|
bsd-3-clause
|
AnhellO/DAS_Sistemas
|
Ago-Dic-2020/aguirre-juarez-david/practica-3/abstract_factory.py
|
1
|
1613
|
import os
class ClearConsole:
def clear(self):
pass
class PrintColor:
def set_color(self, color):
pass
def print(self, str):
pass
class WindowsClearConsole(ClearConsole):
def clear(self):
return os.system("cls")
class LinuxClearConsole(ClearConsole):
def clear(self):
return os.system("clear")
class WindowsPrintColor(PrintColor):
def __init__(self):
self.__color_com = "color 0f"
def set_color(self, color):
color = color.capitalize()
if color == "Red":
self.__color_com = "color 04"
elif color == "Lima":
self.__color_com = "color 0a"
elif color == "Cyan":
self.__color_com = "color 0b"
else:
self.__color_com = "color 0f"
def print(self, text):
return os.system(f"{self.__color_com} && echo {text}")
class LinuxPrintColor(PrintColor):
def __init__(self):
self.__color_code = ""
def set_color(self, color):
color = color.capitalize()
if color == "Red":
self.__color_com = "\\033[31m"
elif color == "Lima":
self.__color_com = "\\033[32m"
elif color == "Cyan":
self.__color_com = "\\033[36m"
else:
self.__color_com = "\\033[39m"
def print(self, text):
return os.system(f"printf '{self.__color_com}{text}'")
class AbstractFactory:
def get_clear_console(self):
pass
def get_print_color(self):
pass
class LinuxFactory(AbstractFactory):
def get_clear_console(self):
return LinuxClearConsole()
def get_print_color(self):
return LinuxPrintColor()
class WindowsFactory(AbstractFactory):
def get_clear_console(self):
return WindowsClearConsole()
def get_print_color(self):
return WindowsPrintColor()
|
mit
|
ESOedX/edx-platform
|
common/djangoapps/track/shim.py
|
2
|
3557
|
"""Map new event context values to old top-level field values. Ensures events can be parsed by legacy parsers."""
from __future__ import absolute_import
import json
from .transformers import EventTransformerRegistry
CONTEXT_FIELDS_TO_INCLUDE = [
'username',
'session',
'ip',
'agent',
'host',
'referer',
'accept_language'
]
class LegacyFieldMappingProcessor(object):
"""Ensures all required fields are included in emitted events"""
def __call__(self, event):
context = event.get('context', {})
if 'context' in event:
for field in CONTEXT_FIELDS_TO_INCLUDE:
self.move_from_context(field, event)
remove_shim_context(event)
if 'data' in event:
if context.get('event_source', '') == 'browser' and isinstance(event['data'], dict):
event['event'] = json.dumps(event['data'])
else:
event['event'] = event['data']
del event['data']
else:
event['event'] = {}
if 'timestamp' in context:
event['time'] = context['timestamp']
del context['timestamp']
elif 'timestamp' in event:
event['time'] = event['timestamp']
if 'timestamp' in event:
del event['timestamp']
self.move_from_context('event_type', event, event.get('name', ''))
self.move_from_context('event_source', event, 'server')
self.move_from_context('page', event, None)
def move_from_context(self, field, event, default_value=''):
"""Move a field from the context to the top level of the event."""
context = event.get('context', {})
if field in context:
event[field] = context[field]
del context[field]
else:
event[field] = default_value
def remove_shim_context(event):
"""
Remove obsolete fields from event context.
"""
if 'context' in event:
context = event['context']
# These fields are present elsewhere in the event at this point
context_fields_to_remove = set(CONTEXT_FIELDS_TO_INCLUDE)
# This field is only used for Segment web analytics and does not concern researchers
context_fields_to_remove.add('client_id')
for field in context_fields_to_remove:
if field in context:
del context[field]
class GoogleAnalyticsProcessor(object):
"""Adds course_id as label, and sets nonInteraction property"""
# documentation of fields here: https://segment.com/docs/integrations/google-analytics/
# this should *only* be used on events destined for segment.com and eventually google analytics
def __call__(self, event):
context = event.get('context', {})
course_id = context.get('course_id')
copied_event = event.copy()
if course_id is not None:
copied_event['label'] = course_id
copied_event['nonInteraction'] = 1
return copied_event
class PrefixedEventProcessor(object):
"""
Process any events whose name or prefix (ending with a '.') is registered
as an EventTransformer.
"""
def __call__(self, event):
"""
If the event is registered with the EventTransformerRegistry, transform
it. Otherwise do nothing to it, and continue processing.
"""
try:
event = EventTransformerRegistry.create_transformer(event)
except KeyError:
return
event.transform()
return event
|
agpl-3.0
|
kant/inasafe
|
safe/metadata/test/test_metadata.py
|
2
|
2066
|
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**Exception Classes.**
Custom exception classes for the IS application.
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from safe.metadata.utils import insert_xml_element
__author__ = 'marco@opengis.ch'
__revision__ = '$Format:%H$'
__date__ = '12/10/2014'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
from xml.etree import ElementTree
from safe.metadata import BaseMetadata
from safe.metadata import ImpactLayerMetadata
from unittest import TestCase
class TestMetadata(TestCase):
def test_no_BaseMeta_instantiation(self):
"""check that we can't instantiate abstract class BaseMetadata with
abstract methods"""
with self.assertRaises(TypeError):
BaseMetadata('random_layer_id')
def test_metadata(self):
"""Check we can't instantiate with unsupported xml types"""
metadata = ImpactLayerMetadata('random_layer_id')
path = 'gmd:MD_Metadata/gmd:dateStamp/gco:RandomString'
# using unsupported xml types
test_value = 'Random string'
with self.assertRaises(KeyError):
metadata.set('ISO19115_TEST', test_value, path)
def test_insert_xml_element(self):
"""Check we can't insert custom nested elements"""
root = ElementTree.Element('root')
b = ElementTree.SubElement(root, 'b')
ElementTree.SubElement(b, 'c')
new_element_path = 'd/e/f'
expected_xml = '<root><b><c /></b><d><e><f>TESTtext</f></e></d></root>'
element = insert_xml_element(root, new_element_path)
element.text = 'TESTtext'
result_xml = ElementTree.tostring(root)
self.assertEquals(expected_xml, result_xml)
|
gpl-3.0
|
jsoref/django
|
tests/template_backends/test_dummy.py
|
306
|
3603
|
# coding: utf-8
from __future__ import unicode_literals
from django.forms import CharField, Form, Media
from django.http import HttpRequest
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.template.backends.dummy import TemplateStrings
from django.test import SimpleTestCase
class TemplateStringsTests(SimpleTestCase):
engine_class = TemplateStrings
backend_name = 'dummy'
options = {}
@classmethod
def setUpClass(cls):
super(TemplateStringsTests, cls).setUpClass()
params = {
'DIRS': [],
'APP_DIRS': True,
'NAME': cls.backend_name,
'OPTIONS': cls.options,
}
cls.engine = cls.engine_class(params)
def test_from_string(self):
template = self.engine.from_string("Hello!\n")
content = template.render()
self.assertEqual(content, "Hello!\n")
def test_get_template(self):
template = self.engine.get_template('template_backends/hello.html')
content = template.render({'name': 'world'})
self.assertEqual(content, "Hello world!\n")
def test_get_template_non_existing(self):
with self.assertRaises(TemplateDoesNotExist) as e:
self.engine.get_template('template_backends/non_existing.html')
self.assertEqual(e.exception.backend, self.engine)
def test_get_template_syntax_error(self):
# There's no way to trigger a syntax error with the dummy backend.
# The test still lives here to factor it between other backends.
if self.backend_name == 'dummy':
self.skipTest("test doesn't apply to dummy backend")
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('template_backends/syntax_error.html')
def test_html_escaping(self):
template = self.engine.get_template('template_backends/hello.html')
context = {'name': '<script>alert("XSS!");</script>'}
content = template.render(context)
self.assertIn('<script>', content)
self.assertNotIn('<script>', content)
def test_django_html_escaping(self):
if self.backend_name == 'dummy':
self.skipTest("test doesn't apply to dummy backend")
class TestForm(Form):
test_field = CharField()
media = Media(js=['my-script.js'])
form = TestForm()
template = self.engine.get_template('template_backends/django_escaping.html')
content = template.render({'media': media, 'test_form': form})
expected = '{}\n\n{}\n\n{}'.format(media, form, form['test_field'])
self.assertHTMLEqual(content, expected)
def test_csrf_token(self):
request = HttpRequest()
CsrfViewMiddleware().process_view(request, lambda r: None, (), {})
template = self.engine.get_template('template_backends/csrf.html')
content = template.render(request=request)
expected = (
'<input type="hidden" name="csrfmiddlewaretoken" '
'value="{}" />'.format(get_token(request)))
self.assertHTMLEqual(content, expected)
def test_no_directory_traversal(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('../forbidden/template_backends/hello.html')
def test_non_ascii_characters(self):
template = self.engine.get_template('template_backends/hello.html')
content = template.render({'name': 'Jérôme'})
self.assertEqual(content, "Hello Jérôme!\n")
|
bsd-3-clause
|
opennode/opennode-knot
|
version.py
|
5
|
2715
|
# -*- coding: utf-8 -*-
# Author: Douglas Creager <dcreager@dcreager.net>
# This file is placed into the public domain.
# Calculates the current version number. If possible, this is the
# output of “git describe”, modified to conform to the versioning
# scheme that setuptools uses. If “git describe” returns an error
# (most likely because we're in an unpacked copy of a release tarball,
# rather than in a git working copy), then we fall back on reading the
# contents of the RELEASE-VERSION file.
#
# To use this script, simply import it your setup.py file, and use the
# results of get_git_version() as your package version:
#
# from version import *
#
# setup(
# version=get_git_version(),
# .
# .
# .
# )
#
# This will automatically update the RELEASE-VERSION file, if
# necessary. Note that the RELEASE-VERSION file should *not* be
# checked into git; please add it to your top-level .gitignore file.
#
# You'll probably want to distribute the RELEASE-VERSION file in your
# sdist tarballs; to do this, just create a MANIFEST.in file that
# contains the following line:
#
# include RELEASE-VERSION
__all__ = ("get_git_version")
from subprocess import Popen, PIPE
def call_git_describe(abbrev=4):
try:
p = Popen(['git', 'describe', '--abbrev=%d' % abbrev, '--match', '[0-9]*'],
stdout=PIPE, stderr=PIPE)
p.stderr.close()
line = p.stdout.readlines()[0]
return line.strip()
except:
return None
def read_release_version():
try:
f = open("RELEASE-VERSION", "r")
try:
version = f.readlines()[0]
return version.strip()
finally:
f.close()
except:
return None
def write_release_version(version):
f = open("RELEASE-VERSION", "w")
f.write("%s\n" % version)
f.close()
def get_git_version(abbrev=4):
# Read in the version that's currently in RELEASE-VERSION.
release_version = read_release_version()
# First try to get the current version using “git describe”.
version = call_git_describe(abbrev)
# If that doesn't work, fall back on the value that's in
# RELEASE-VERSION.
if version is None:
version = release_version
# If we still don't have anything, that's an error.
if version is None:
raise ValueError("Cannot find the version number!")
# If the current version is different from what's in the
# RELEASE-VERSION file, update the file to be current.
if version != release_version:
write_release_version(version)
# Finally, return the current version.
return version
if __name__ == "__main__":
print get_git_version()
|
gpl-3.0
|
myles/couchdb-python
|
couchdb/tools/dump.py
|
1
|
2449
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from base64 import b64decode
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart
from optparse import OptionParser
import simplejson as json
import sys
from couchdb import __version__ as VERSION
from couchdb.client import Database
def dump_db(dburl, username=None, password=None, boundary=None):
envelope = MIMEMultipart('mixed', boundary)
db = Database(dburl)
if username is not None and password is not None:
db.resource.http.add_credentials(username, password)
for docid in db:
doc = db.get(docid, attachments=True)
print>>sys.stderr, 'Dumping document %r' % doc.id
attachments = doc.pop('_attachments', {})
part = MIMEBase('application', 'json')
part.set_payload(json.dumps(doc, sort_keys=True, indent=2))
if attachments:
inner = MIMEMultipart('mixed')
inner.attach(part)
for name, info in attachments.items():
content_type = info.get('content_type')
if content_type is None: # CouchDB < 0.8
content_type = info.get('content-type')
maintype, subtype = content_type.split('/', 1)
subpart = MIMEBase(maintype, subtype)
subpart['Content-ID'] = name
subpart.set_payload(b64decode(info['data']))
inner.attach(subpart)
part = inner
part['Content-ID'] = doc.id
part['ETag'] = doc.rev
envelope.attach(part)
return envelope.as_string()
def main():
parser = OptionParser(usage='%prog [options] dburl', version=VERSION)
parser.add_option('-u', '--username', action='store', dest='username',
help='the username to use for authentication')
parser.add_option('-p', '--password', action='store', dest='password',
help='the password to use for authentication')
parser.set_defaults()
options, args = parser.parse_args()
if len(args) != 1:
return parser.error('incorrect number of arguments')
print dump_db(args[0], username=options.username,
password=options.password)
if __name__ == '__main__':
main()
|
bsd-3-clause
|
gram526/VTK
|
ThirdParty/Twisted/twisted/names/secondary.py
|
39
|
5723
|
# -*- test-case-name: twisted.names.test.test_names -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
__all__ = ['SecondaryAuthority', 'SecondaryAuthorityService']
from twisted.internet import task, defer
from twisted.names import dns
from twisted.names import common
from twisted.names import client
from twisted.names import resolve
from twisted.names.authority import FileAuthority
from twisted.python import log, failure
from twisted.application import service
class SecondaryAuthorityService(service.Service):
calls = None
_port = 53
def __init__(self, primary, domains):
"""
@param primary: The IP address of the server from which to perform
zone transfers.
@param domains: A sequence of domain names for which to perform
zone transfers.
"""
self.primary = primary
self.domains = [SecondaryAuthority(primary, d) for d in domains]
@classmethod
def fromServerAddressAndDomains(cls, serverAddress, domains):
"""
Construct a new L{SecondaryAuthorityService} from a tuple giving a
server address and a C{str} giving the name of a domain for which this
is an authority.
@param serverAddress: A two-tuple, the first element of which is a
C{str} giving an IP address and the second element of which is a
C{int} giving a port number. Together, these define where zone
transfers will be attempted from.
@param domain: A C{str} giving the domain to transfer.
@return: A new instance of L{SecondaryAuthorityService}.
"""
service = cls(None, [])
service.primary = serverAddress[0]
service._port = serverAddress[1]
service.domains = [
SecondaryAuthority.fromServerAddressAndDomain(serverAddress, d)
for d in domains]
return service
def getAuthority(self):
return resolve.ResolverChain(self.domains)
def startService(self):
service.Service.startService(self)
self.calls = [task.LoopingCall(d.transfer) for d in self.domains]
i = 0
from twisted.internet import reactor
for c in self.calls:
# XXX Add errbacks, respect proper timeouts
reactor.callLater(i, c.start, 60 * 60)
i += 1
def stopService(self):
service.Service.stopService(self)
for c in self.calls:
c.stop()
class SecondaryAuthority(common.ResolverBase):
"""
An Authority that keeps itself updated by performing zone transfers.
@ivar primary: The IP address of the server from which zone transfers will
be attempted.
@type primary: C{str}
@ivar _port: The port number of the server from which zone transfers will be
attempted.
@type: C{int}
@ivar _reactor: The reactor to use to perform the zone transfers, or C{None}
to use the global reactor.
"""
transferring = False
soa = records = None
_port = 53
_reactor = None
def __init__(self, primaryIP, domain):
common.ResolverBase.__init__(self)
self.primary = primaryIP
self.domain = domain
@classmethod
def fromServerAddressAndDomain(cls, serverAddress, domain):
"""
Construct a new L{SecondaryAuthority} from a tuple giving a server
address and a C{str} giving the name of a domain for which this is an
authority.
@param serverAddress: A two-tuple, the first element of which is a
C{str} giving an IP address and the second element of which is a
C{int} giving a port number. Together, these define where zone
transfers will be attempted from.
@param domain: A C{str} giving the domain to transfer.
@return: A new instance of L{SecondaryAuthority}.
"""
secondary = cls(None, None)
secondary.primary = serverAddress[0]
secondary._port = serverAddress[1]
secondary.domain = domain
return secondary
def transfer(self):
if self.transferring:
return
self.transfering = True
reactor = self._reactor
if reactor is None:
from twisted.internet import reactor
resolver = client.Resolver(
servers=[(self.primary, self._port)], reactor=reactor)
return resolver.lookupZone(self.domain
).addCallback(self._cbZone
).addErrback(self._ebZone
)
def _lookup(self, name, cls, type, timeout=None):
if not self.soa or not self.records:
return defer.fail(failure.Failure(dns.DomainError(name)))
return FileAuthority.__dict__['_lookup'](self, name, cls, type, timeout)
#shouldn't we just subclass? :P
lookupZone = FileAuthority.__dict__['lookupZone']
def _cbZone(self, zone):
ans, _, _ = zone
self.records = r = {}
for rec in ans:
if not self.soa and rec.type == dns.SOA:
self.soa = (str(rec.name).lower(), rec.payload)
else:
r.setdefault(str(rec.name).lower(), []).append(rec.payload)
def _ebZone(self, failure):
log.msg("Updating %s from %s failed during zone transfer" % (self.domain, self.primary))
log.err(failure)
def update(self):
self.transfer().addCallbacks(self._cbTransferred, self._ebTransferred)
def _cbTransferred(self, result):
self.transferring = False
def _ebTransferred(self, failure):
self.transferred = False
log.msg("Transferring %s from %s failed after zone transfer" % (self.domain, self.primary))
log.err(failure)
|
bsd-3-clause
|
cognitive-catalyst/Lightning
|
python/nlcTreeBuilder.py
|
3
|
4253
|
# Copyright IBM
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#Takes a NLC style csv with an optional 3rd field. This extra
#parameter is used to define a classifier name. The csv is then broken
#up into much smaller CSVs and those are used to train a new NLC
#for any which need to be updated
#Usage: nlcTreebuilder.py -i csvName
#TODO: add more options for whether to deploy, train, etc
#TODO: Integrate other lightening scripts instead of calling nlc independently.
import csv
import sys
import os
import shutil
import filecmp
import getopt
import nlc
import requests
#set credentials
username = ""
password = ""
defaultTreeName = "default"
inputcsv = ""
url = "https://gateway.watsonplatform.net/natural-language-classifier/api"
options, args = getopt.getopt(sys.argv[1:], 'i:u:p:')
for opts in options:
if '-i' in opts[0]:
inputcsv = opts[1]
if '-u' in opts[0]:
username = opts[1]
if '-p' in opts[0]:
password = opts[1]
if not inputcsv or not username or not password:
print("usage: nlcTreeBuilder.py -i csvInputFile -u username -p password")
exit(2)
if __name__ == "__main__":
if not os.path.exists("constructedFiles/tmp"):
os.makedirs("constructedFiles/tmp")
with open(inputcsv) as csvfile:
fileList = [];
treeFile = csv.reader(csvfile, delimiter=',', quotechar='"')
newcsvFiles = [];
outputFileList = [];
#Read each row in input csv and break into csvs for each tree
for row in treeFile:
if len(row) > 2:
if row[2] == "":
row[2] = defaultTreeName
if row[2] in fileList:
newcsvFiles[fileList.index(row[2])].writerow([row[0], row[1]])
else:
fileList.append(row[2])
outputFileList.append(open("constructedFiles/tmp/"+row[2]+".csv", 'w', newline=''))
print("Found new tree " + row[2])
newcsvFiles.append(csv.writer(outputFileList[-1], delimiter=',', quotechar='"'))
else:
print("Only 2 columns in this csv. No need to for multi-tree support. Exiting")
exit(1)
#Close all files
for f in outputFileList:
f.close()
#Compare newly created to csvs to any preexisting ones.
#If there are changes overwrite old csv and post to train new nlc
nlcInstance = nlc.NaturalLanguageClassifierInstance(username, password, url)
for file in os.listdir("constructedFiles/tmp/"):
#If file doesn't exist copy and post
if not os.path.exists("constructedFiles/"+file):
shutil.copy("constructedFiles/tmp/"+file, "constructedFiles/")
print("Training classifier for " + file)
try:
nlcInstance.train_classifier(file, training_file="constructedFiles/tmp/"+file)
except requests.HTTPError as e:
print(e)
else:
if filecmp.cmp("constructedFiles/tmp/"+file, "constructedFiles/"+file, shallow=False):
print(file + " hasn't changed. Skipping classifier training")
else:
print("Training classifier for " + file)
shutil.copy("constructedFiles/tmp/"+file, "constructedFiles/")
try:
nlcInstance.train_classifier(file, training_file="constructedFiles/tmp/"+file)
except requests.HTTPError as e:
print(e)
|
apache-2.0
|
vadimtk/chrome4sdp
|
third_party/WebKit/Tools/Scripts/webkitpy/common/version_check.py
|
70
|
1733
|
# Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
if sys.version < '2.7' or sys.version >= '2.8':
sys.stderr.write("Unsupported Python version: webkitpy requires 2.7.x, and you're running %s.\n" % sys.version.split()[0])
sys.exit(1)
|
bsd-3-clause
|
hsaputra/tensorflow
|
tensorflow/contrib/specs/python/specs.py
|
122
|
4976
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder for TensorFlow models specified using specs_ops.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import exec_
from tensorflow.contrib.specs.python import params_ops
from tensorflow.contrib.specs.python import specs_lib
from tensorflow.contrib.specs.python import specs_ops
from tensorflow.python.util import tf_inspect
def eval_params(params, environment=None):
"""Evaluates a parameter specification and returns the environment.
Args:
params: parameter assignments as a string
environment: a dictionary of input bindings
Returns:
Environment with additional bindings created by
executing `params`
Raises:
Exception: other exceptions raised during execution of `params`
"""
specs_lib.check_keywords(params)
bindings = {}
if environment:
bindings.update(environment)
exec_(params, vars(params_ops), bindings) # pylint: disable=exec-used
return bindings
def eval_spec(spec, environment=None):
"""Evaluates a spec and returns the environment.
This function allows you to use a spec to obtain multiple bindings
in an environment. That is useful if you use the spec language to
specify multiple components of a larger network, for example: "left
= Cr(64, [5,5]); right = Fc(64)" Usually, you will want to use
`create_net` or `create_net_fun` below.
Args:
spec: specification as a string
environment: a dictionary of input bindings
Returns:
Environment with additional bindings created by spec.
Raises:
Exception: other exceptions raised during execution of `spec`
"""
specs_lib.check_keywords(spec)
bindings = {}
if environment:
bindings.update(environment)
exec_(spec, vars(specs_ops), bindings) # pylint: disable=exec-used
return bindings
def create_net_fun(spec, environment=None):
"""Evaluates a spec and returns the binding of `net`.
Specs are written in a DSL based on function composition. A spec
like `net = Cr(64, [3, 3])` assigns an object that represents a
single argument function capable of creating a network to
the variable `net`.
Args:
spec: specification as a string, ending with a `net = ...` statement
environment: a dictionary of input bindings
Returns:
A callable that instantiates the `net` binding.
Raises:
ValueError: spec failed to create a `net`
Exception: other exceptions raised during execution of `spec`
"""
bindings = eval_spec(spec, environment)
net = bindings.get("net", None)
if net is None:
raise ValueError("spec failed to create 'net': %s" % (spec,))
return net.funcall
def create_net(spec, inputs, environment=None):
"""Evaluates a spec and creates a network instance given the inputs.
Args:
spec: specification as a string, ending with a `net = ...` statement
inputs: input that `net` is applied to
environment: a dictionary of input bindings
Returns:
A callable that instantiates the `net` binding.
Raises:
ValueError: spec failed to create a `net`
Exception: other exceptions raised during execution of `spec`
"""
return create_net_fun(spec, environment)(inputs)
class LocalImport(object):
"""A class that allows us to temporarily import something.
Attributes:
frame: the frame in which the context manager was invocked
names: a dictionary containing the new bindings
old: variable bindings that have been shadowed by the import
"""
def __init__(self, names):
"""Create a context manager that binds the names in values.
Args:
names: A dictionary or module containing the bindings.
"""
if not isinstance(names, dict):
names = vars(names)
self.names = names
def __enter__(self):
self.frame = tf_inspect.currentframe()
bindings = self.frame.f_back.f_globals
self.old = {k: bindings.get(k, None) for k in self.names.keys()}
bindings.update(self.names)
def __exit__(self, some_type, value, traceback):
del some_type, value, traceback
bindings = self.frame.f_back.f_globals
bindings.update(self.old)
for k, v in self.old.items():
if v is None:
del bindings[k]
del self.frame
ops = LocalImport(specs_ops)
|
apache-2.0
|
psdh/servo
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/dispatch.py
|
652
|
14786
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Dispatch WebSocket request.
"""
import logging
import os
import re
from mod_pywebsocket import common
from mod_pywebsocket import handshake
from mod_pywebsocket import msgutil
from mod_pywebsocket import mux
from mod_pywebsocket import stream
from mod_pywebsocket import util
_SOURCE_PATH_PATTERN = re.compile(r'(?i)_wsh\.py$')
_SOURCE_SUFFIX = '_wsh.py'
_DO_EXTRA_HANDSHAKE_HANDLER_NAME = 'web_socket_do_extra_handshake'
_TRANSFER_DATA_HANDLER_NAME = 'web_socket_transfer_data'
_PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME = (
'web_socket_passive_closing_handshake')
class DispatchException(Exception):
"""Exception in dispatching WebSocket request."""
def __init__(self, name, status=common.HTTP_STATUS_NOT_FOUND):
super(DispatchException, self).__init__(name)
self.status = status
def _default_passive_closing_handshake_handler(request):
"""Default web_socket_passive_closing_handshake handler."""
return common.STATUS_NORMAL_CLOSURE, ''
def _normalize_path(path):
"""Normalize path.
Args:
path: the path to normalize.
Path is converted to the absolute path.
The input path can use either '\\' or '/' as the separator.
The normalized path always uses '/' regardless of the platform.
"""
path = path.replace('\\', os.path.sep)
path = os.path.realpath(path)
path = path.replace('\\', '/')
return path
def _create_path_to_resource_converter(base_dir):
"""Returns a function that converts the path of a WebSocket handler source
file to a resource string by removing the path to the base directory from
its head, removing _SOURCE_SUFFIX from its tail, and replacing path
separators in it with '/'.
Args:
base_dir: the path to the base directory.
"""
base_dir = _normalize_path(base_dir)
base_len = len(base_dir)
suffix_len = len(_SOURCE_SUFFIX)
def converter(path):
if not path.endswith(_SOURCE_SUFFIX):
return None
# _normalize_path must not be used because resolving symlink breaks
# following path check.
path = path.replace('\\', '/')
if not path.startswith(base_dir):
return None
return path[base_len:-suffix_len]
return converter
def _enumerate_handler_file_paths(directory):
"""Returns a generator that enumerates WebSocket Handler source file names
in the given directory.
"""
for root, unused_dirs, files in os.walk(directory):
for base in files:
path = os.path.join(root, base)
if _SOURCE_PATH_PATTERN.search(path):
yield path
class _HandlerSuite(object):
"""A handler suite holder class."""
def __init__(self, do_extra_handshake, transfer_data,
passive_closing_handshake):
self.do_extra_handshake = do_extra_handshake
self.transfer_data = transfer_data
self.passive_closing_handshake = passive_closing_handshake
def _source_handler_file(handler_definition):
"""Source a handler definition string.
Args:
handler_definition: a string containing Python statements that define
handler functions.
"""
global_dic = {}
try:
exec handler_definition in global_dic
except Exception:
raise DispatchException('Error in sourcing handler:' +
util.get_stack_trace())
passive_closing_handshake_handler = None
try:
passive_closing_handshake_handler = _extract_handler(
global_dic, _PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME)
except Exception:
passive_closing_handshake_handler = (
_default_passive_closing_handshake_handler)
return _HandlerSuite(
_extract_handler(global_dic, _DO_EXTRA_HANDSHAKE_HANDLER_NAME),
_extract_handler(global_dic, _TRANSFER_DATA_HANDLER_NAME),
passive_closing_handshake_handler)
def _extract_handler(dic, name):
"""Extracts a callable with the specified name from the given dictionary
dic.
"""
if name not in dic:
raise DispatchException('%s is not defined.' % name)
handler = dic[name]
if not callable(handler):
raise DispatchException('%s is not callable.' % name)
return handler
class Dispatcher(object):
"""Dispatches WebSocket requests.
This class maintains a map from resource name to handlers.
"""
def __init__(
self, root_dir, scan_dir=None,
allow_handlers_outside_root_dir=True):
"""Construct an instance.
Args:
root_dir: The directory where handler definition files are
placed.
scan_dir: The directory where handler definition files are
searched. scan_dir must be a directory under root_dir,
including root_dir itself. If scan_dir is None,
root_dir is used as scan_dir. scan_dir can be useful
in saving scan time when root_dir contains many
subdirectories.
allow_handlers_outside_root_dir: Scans handler files even if their
canonical path is not under root_dir.
"""
self._logger = util.get_class_logger(self)
self._handler_suite_map = {}
self._source_warnings = []
if scan_dir is None:
scan_dir = root_dir
if not os.path.realpath(scan_dir).startswith(
os.path.realpath(root_dir)):
raise DispatchException('scan_dir:%s must be a directory under '
'root_dir:%s.' % (scan_dir, root_dir))
self._source_handler_files_in_dir(
root_dir, scan_dir, allow_handlers_outside_root_dir)
def add_resource_path_alias(self,
alias_resource_path, existing_resource_path):
"""Add resource path alias.
Once added, request to alias_resource_path would be handled by
handler registered for existing_resource_path.
Args:
alias_resource_path: alias resource path
existing_resource_path: existing resource path
"""
try:
handler_suite = self._handler_suite_map[existing_resource_path]
self._handler_suite_map[alias_resource_path] = handler_suite
except KeyError:
raise DispatchException('No handler for: %r' %
existing_resource_path)
def source_warnings(self):
"""Return warnings in sourcing handlers."""
return self._source_warnings
def do_extra_handshake(self, request):
"""Do extra checking in WebSocket handshake.
Select a handler based on request.uri and call its
web_socket_do_extra_handshake function.
Args:
request: mod_python request.
Raises:
DispatchException: when handler was not found
AbortedByUserException: when user handler abort connection
HandshakeException: when opening handshake failed
"""
handler_suite = self.get_handler_suite(request.ws_resource)
if handler_suite is None:
raise DispatchException('No handler for: %r' % request.ws_resource)
do_extra_handshake_ = handler_suite.do_extra_handshake
try:
do_extra_handshake_(request)
except handshake.AbortedByUserException, e:
# Re-raise to tell the caller of this function to finish this
# connection without sending any error.
self._logger.debug('%s', util.get_stack_trace())
raise
except Exception, e:
util.prepend_message_to_exception(
'%s raised exception for %s: ' % (
_DO_EXTRA_HANDSHAKE_HANDLER_NAME,
request.ws_resource),
e)
raise handshake.HandshakeException(e, common.HTTP_STATUS_FORBIDDEN)
def transfer_data(self, request):
"""Let a handler transfer_data with a WebSocket client.
Select a handler based on request.ws_resource and call its
web_socket_transfer_data function.
Args:
request: mod_python request.
Raises:
DispatchException: when handler was not found
AbortedByUserException: when user handler abort connection
"""
# TODO(tyoshino): Terminate underlying TCP connection if possible.
try:
if mux.use_mux(request):
mux.start(request, self)
else:
handler_suite = self.get_handler_suite(request.ws_resource)
if handler_suite is None:
raise DispatchException('No handler for: %r' %
request.ws_resource)
transfer_data_ = handler_suite.transfer_data
transfer_data_(request)
if not request.server_terminated:
request.ws_stream.close_connection()
# Catch non-critical exceptions the handler didn't handle.
except handshake.AbortedByUserException, e:
self._logger.debug('%s', util.get_stack_trace())
raise
except msgutil.BadOperationException, e:
self._logger.debug('%s', e)
request.ws_stream.close_connection(
common.STATUS_INTERNAL_ENDPOINT_ERROR)
except msgutil.InvalidFrameException, e:
# InvalidFrameException must be caught before
# ConnectionTerminatedException that catches InvalidFrameException.
self._logger.debug('%s', e)
request.ws_stream.close_connection(common.STATUS_PROTOCOL_ERROR)
except msgutil.UnsupportedFrameException, e:
self._logger.debug('%s', e)
request.ws_stream.close_connection(common.STATUS_UNSUPPORTED_DATA)
except stream.InvalidUTF8Exception, e:
self._logger.debug('%s', e)
request.ws_stream.close_connection(
common.STATUS_INVALID_FRAME_PAYLOAD_DATA)
except msgutil.ConnectionTerminatedException, e:
self._logger.debug('%s', e)
except Exception, e:
# Any other exceptions are forwarded to the caller of this
# function.
util.prepend_message_to_exception(
'%s raised exception for %s: ' % (
_TRANSFER_DATA_HANDLER_NAME, request.ws_resource),
e)
raise
def passive_closing_handshake(self, request):
"""Prepare code and reason for responding client initiated closing
handshake.
"""
handler_suite = self.get_handler_suite(request.ws_resource)
if handler_suite is None:
return _default_passive_closing_handshake_handler(request)
return handler_suite.passive_closing_handshake(request)
def get_handler_suite(self, resource):
"""Retrieves two handlers (one for extra handshake processing, and one
for data transfer) for the given request as a HandlerSuite object.
"""
fragment = None
if '#' in resource:
resource, fragment = resource.split('#', 1)
if '?' in resource:
resource = resource.split('?', 1)[0]
handler_suite = self._handler_suite_map.get(resource)
if handler_suite and fragment:
raise DispatchException('Fragment identifiers MUST NOT be used on '
'WebSocket URIs',
common.HTTP_STATUS_BAD_REQUEST)
return handler_suite
def _source_handler_files_in_dir(
self, root_dir, scan_dir, allow_handlers_outside_root_dir):
"""Source all the handler source files in the scan_dir directory.
The resource path is determined relative to root_dir.
"""
# We build a map from resource to handler code assuming that there's
# only one path from root_dir to scan_dir and it can be obtained by
# comparing realpath of them.
# Here we cannot use abspath. See
# https://bugs.webkit.org/show_bug.cgi?id=31603
convert = _create_path_to_resource_converter(root_dir)
scan_realpath = os.path.realpath(scan_dir)
root_realpath = os.path.realpath(root_dir)
for path in _enumerate_handler_file_paths(scan_realpath):
if (not allow_handlers_outside_root_dir and
(not os.path.realpath(path).startswith(root_realpath))):
self._logger.debug(
'Canonical path of %s is not under root directory' %
path)
continue
try:
handler_suite = _source_handler_file(open(path).read())
except DispatchException, e:
self._source_warnings.append('%s: %s' % (path, e))
continue
resource = convert(path)
if resource is None:
self._logger.debug(
'Path to resource conversion on %s failed' % path)
else:
self._handler_suite_map[convert(path)] = handler_suite
# vi:sts=4 sw=4 et
|
mpl-2.0
|
YunYinORG/find
|
config.exmaple.py
|
2
|
1526
|
#!/usr/bin/env python
# coding=utf-8
# 数据库信息
DB_TYPE = 'mysql'
try:
import sae.const
IS_SAE = True
DB_NAME = sae.const.MYSQL_DB # 数据库名
DB_USER = sae.const.MYSQL_USER # 用户名
DB_PWD = sae.const.MYSQL_PASS # 密码
DB_HOST = sae.const.MYSQL_HOST # 主库域名(可读写)
DB_PORT = int(sae.const.MYSQL_PORT) # 端口,类型为<type 'str'>,请根据框架要求自行转换为int
# sae.const.MYSQL_HOST_S
except Exception:
IS_SAE = False
DB_HOST = 'localhost'
DB_USER = 'root'
DB_NAME = 'yunyinfind'
DB_PORT = 3306
DB_PWD = ''
# 云印接口
YUNYIN_KEY = "yunyincard"
YUNYIN_API = "http://api.yunyin.org/"
#微博接口
WEIBO_COOKIE="weibo.cn cookie string"
WEIBO_KEY=2681865267
# WEIBO_SECRET='f92c633f332d26009bc71c6bb269683e'
# WEIBO_CALLBACK='http://2.newfuturepy.sinaapp.com/weibo/callback'
# WEIBO_ACCOUNT='xxxxxx'
# WEIBO_PWD='xxxxxx'
# 邮件
MAIL_SMTP = 'smtp.exmail.qq.com'
MAIL_PORT = 465
MAIL_USER = 'test@mail.yunyin.org'
MAIL_PWD = ''
# 短信
SMS_ACCOUNT = ''
SMS_APPID = ''
SMS_TOKEN = ''
SMS_NOTIFY = ''
SMS_LOGIN = ''
SMS_BIND = ''
# cookie加密
COOKIE_KEY = 'qhsdfsffffffffff75V4d7F-sdfsf.wN'
# 学校正则
REGX_SHOOL = (
'^(\d{7}|\d{10})$', # 通用正则
'^(([1][0-5]\d{5})|([1|2]1201[0-5]\d{4}))$', # 南开
'^[1-4]01[0-5]\d{6}$', # 天大
'^((0[1-5][01][0-9])|(1[139][05][1-4]))1[2-5]\d{4}$' # 天商职
)
#快速查看地址
VIEW_BASE="http://find.yunyin.org/record/v/"
|
apache-2.0
|
jtyr/ansible-modules-extras
|
monitoring/airbrake_deployment.py
|
57
|
3911
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Bruce Pennypacker <bruce@pennypacker.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: airbrake_deployment
version_added: "1.2"
author: "Bruce Pennypacker (@bpennypacker)"
short_description: Notify airbrake about app deployments
description:
- Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking)
options:
token:
description:
- API token.
required: true
environment:
description:
- The airbrake environment name, typically 'production', 'staging', etc.
required: true
user:
description:
- The username of the person doing the deployment
required: false
repo:
description:
- URL of the project repository
required: false
revision:
description:
- A hash, number, tag, or other identifier showing what revision was deployed
required: false
url:
description:
- Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
required: false
default: "https://airbrake.io/deploys.txt"
version_added: "1.5"
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
requirements: []
'''
EXAMPLES = '''
- airbrake_deployment: token=AAAAAA
environment='staging'
user='ansible'
revision=4.2
'''
import urllib
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
environment=dict(required=True),
user=dict(required=False),
repo=dict(required=False),
revision=dict(required=False),
url=dict(required=False, default='https://api.airbrake.io/deploys.txt'),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
# build list of params
params = {}
if module.params["environment"]:
params["deploy[rails_env]"] = module.params["environment"]
if module.params["user"]:
params["deploy[local_username]"] = module.params["user"]
if module.params["repo"]:
params["deploy[scm_repository]"] = module.params["repo"]
if module.params["revision"]:
params["deploy[scm_revision]"] = module.params["revision"]
params["api_key"] = module.params["token"]
url = module.params.get('url')
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True)
# Send the data to airbrake
data = urllib.urlencode(params)
response, info = fetch_url(module, url, data=data)
if info['status'] == 200:
module.exit_json(changed=True)
else:
module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
|
gpl-3.0
|
UniversalMasterEgg8679/ansible
|
test/units/parsing/vault/test_vault.py
|
34
|
16056
|
# -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import binascii
import io
import os
from binascii import hexlify
from nose.plugins.skip import SkipTest
from ansible.compat.tests import unittest
from ansible import errors
from ansible.module_utils import six
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.vault import VaultLib
from ansible.parsing import vault
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
class TestVaultIsEncrypted(unittest.TestCase):
def test_bytes_not_encrypted(self):
b_data = b"foobar"
self.assertFalse(vault.is_encrypted(b_data))
def test_bytes_encrypted(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
self.assertTrue(vault.is_encrypted(b_data))
def test_text_not_encrypted(self):
b_data = to_text(b"foobar")
self.assertFalse(vault.is_encrypted(b_data))
def test_text_encrypted(self):
b_data = to_text(b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible"))
self.assertTrue(vault.is_encrypted(b_data))
def test_invalid_text_not_ascii(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s"% u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
self.assertFalse(vault.is_encrypted(data))
def test_invalid_bytes_not_ascii(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s"% u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
b_data = to_bytes(data, encoding='utf-8')
self.assertFalse(vault.is_encrypted(b_data))
class TestVaultIsEncryptedFile(unittest.TestCase):
def test_binary_file_handle_not_encrypted(self):
b_data = b"foobar"
b_data_fo = io.BytesIO(b_data)
self.assertFalse(vault.is_encrypted_file(b_data_fo))
def test_text_file_handle_not_encrypted(self):
data = u"foobar"
data_fo = io.StringIO(data)
self.assertFalse(vault.is_encrypted_file(data_fo))
def test_binary_file_handle_encrypted(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
b_data_fo = io.BytesIO(b_data)
self.assertTrue(vault.is_encrypted_file(b_data_fo))
def test_text_file_handle_encrypted(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % to_text(hexlify(b"ansible"))
data_fo = io.StringIO(data)
self.assertTrue(vault.is_encrypted_file(data_fo))
def test_binary_file_handle_invalid(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
b_data = to_bytes(data)
b_data_fo = io.BytesIO(b_data)
self.assertFalse(vault.is_encrypted_file(b_data_fo))
def test_text_file_handle_invalid(self):
data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % u"ァ ア ィ イ ゥ ウ ェ エ ォ オ カ ガ キ ギ ク グ ケ "
data_fo = io.StringIO(data)
self.assertFalse(vault.is_encrypted_file(data_fo))
def test_file_already_read_from_finds_header(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
b_data_fo = io.BytesIO(b_data)
b_data_fo.read(42) # Arbitrary number
self.assertTrue(vault.is_encrypted_file(b_data_fo))
def test_file_already_read_from_saves_file_pos(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
b_data_fo = io.BytesIO(b_data)
b_data_fo.read(69) # Arbitrary number
vault.is_encrypted_file(b_data_fo)
self.assertEqual(b_data_fo.tell(), 69)
def test_file_with_offset(self):
b_data = b"JUNK$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
b_data_fo = io.BytesIO(b_data)
self.assertTrue(vault.is_encrypted_file(b_data_fo, start_pos=4))
def test_file_with_count(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
vault_length = len(b_data)
b_data = b_data + u'ァ ア'.encode('utf-8')
b_data_fo = io.BytesIO(b_data)
self.assertTrue(vault.is_encrypted_file(b_data_fo, count=vault_length))
def test_file_with_offset_and_count(self):
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible\ntesting\nfile pos")
vault_length = len(b_data)
b_data = b'JUNK' + b_data + u'ァ ア'.encode('utf-8')
b_data_fo = io.BytesIO(b_data)
self.assertTrue(vault.is_encrypted_file(b_data_fo, start_pos=4, count=vault_length))
class TestVaultCipherAes256(unittest.TestCase):
def setUp(self):
self.vault_cipher = vault.VaultAES256()
def test(self):
self.assertIsInstance(self.vault_cipher, vault.VaultAES256)
# TODO: tag these as slow tests
def test_create_key(self):
b_password = b'hunter42'
b_salt = os.urandom(32)
b_key = self.vault_cipher._create_key(b_password, b_salt, keylength=32, ivlength=16)
self.assertIsInstance(b_key, six.binary_type)
def test_create_key_known(self):
b_password = b'hunter42'
# A fixed salt
b_salt = b'q' * 32 # q is the most random letter.
b_key = self.vault_cipher._create_key(b_password, b_salt, keylength=32, ivlength=16)
self.assertIsInstance(b_key, six.binary_type)
# verify we get the same answer
# we could potentially run a few iterations of this and time it to see if it's roughly constant time
# and or that it exceeds some minimal time, but that would likely cause unreliable fails, esp in CI
b_key_2 = self.vault_cipher._create_key(b_password, b_salt, keylength=32, ivlength=16)
self.assertIsInstance(b_key, six.binary_type)
self.assertEqual(b_key, b_key_2)
def test_is_equal_is_equal(self):
self.assertTrue(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'abcdefghijklmnopqrstuvwxyz'))
def test_is_equal_unequal_length(self):
self.assertFalse(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'abcdefghijklmnopqrstuvwx and sometimes y'))
def test_is_equal_not_equal(self):
self.assertFalse(self.vault_cipher._is_equal(b'abcdefghijklmnopqrstuvwxyz', b'AbcdefghijKlmnopQrstuvwxZ'))
def test_is_equal_empty(self):
self.assertTrue(self.vault_cipher._is_equal(b'', b''))
def test_is_equal_non_ascii_equal(self):
utf8_data = to_bytes(u'私はガラスを食べられます。それは私を傷つけません。')
self.assertTrue(self.vault_cipher._is_equal(utf8_data, utf8_data))
def test_is_equal_non_ascii_unequal(self):
utf8_data = to_bytes(u'私はガラスを食べられます。それは私を傷つけません。')
utf8_data2 = to_bytes(u'Pot să mănânc sticlă și ea nu mă rănește.')
# Test for the len optimization path
self.assertFalse(self.vault_cipher._is_equal(utf8_data, utf8_data2))
# Test for the slower, char by char comparison path
self.assertFalse(self.vault_cipher._is_equal(utf8_data, utf8_data[:-1] + b'P'))
def test_is_equal_non_bytes(self):
""" Anything not a byte string should raise a TypeError """
self.assertRaises(TypeError, self.vault_cipher._is_equal, u"One fish", b"two fish")
self.assertRaises(TypeError, self.vault_cipher._is_equal, b"One fish", u"two fish")
self.assertRaises(TypeError, self.vault_cipher._is_equal, 1, b"red fish")
self.assertRaises(TypeError, self.vault_cipher._is_equal, b"blue fish", 2)
class TestVaultLib(unittest.TestCase):
def setUp(self):
self.v = VaultLib('test-vault-password')
def test_encrypt(self):
plaintext = u'Some text to encrypt in a café'
b_vaulttext = self.v.encrypt(plaintext)
self.assertIsInstance(b_vaulttext, six.binary_type)
b_header = b'$ANSIBLE_VAULT;1.1;AES256\n'
self.assertEqual(b_vaulttext[:len(b_header)], b_header)
def test_encrypt_bytes(self):
plaintext = to_bytes(u'Some text to encrypt in a café')
b_vaulttext = self.v.encrypt(plaintext)
self.assertIsInstance(b_vaulttext, six.binary_type)
b_header = b'$ANSIBLE_VAULT;1.1;AES256\n'
self.assertEqual(b_vaulttext[:len(b_header)], b_header)
def test_is_encrypted(self):
self.assertFalse(self.v.is_encrypted(b"foobar"), msg="encryption check on plaintext yielded false positive")
b_data = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
self.assertTrue(self.v.is_encrypted(b_data), msg="encryption check on headered text failed")
def test_format_output(self):
self.v.cipher_name = "TEST"
b_ciphertext = b"ansible"
b_vaulttext = self.v._format_output(b_ciphertext)
b_lines = b_vaulttext.split(b'\n')
self.assertGreater(len(b_lines), 1, msg="failed to properly add header")
b_header = b_lines[0]
self.assertTrue(b_header.endswith(b';TEST'), msg="header does not end with cipher name")
b_header_parts = b_header.split(b';')
self.assertEqual(len(b_header_parts), 3, msg="header has the wrong number of parts")
self.assertEqual(b_header_parts[0], b'$ANSIBLE_VAULT', msg="header does not start with $ANSIBLE_VAULT")
self.assertEqual(b_header_parts[1], self.v.b_version, msg="header version is incorrect")
self.assertEqual(b_header_parts[2], b'TEST', msg="header does not end with cipher name")
def test_split_header(self):
b_vaulttext = b"$ANSIBLE_VAULT;9.9;TEST\nansible"
b_ciphertext = self.v._split_header(b_vaulttext)
b_lines = b_ciphertext.split(b'\n')
self.assertEqual(b_lines[0], b"ansible", msg="Payload was not properly split from the header")
self.assertEqual(self.v.cipher_name, u'TEST', msg="cipher name was not properly set")
self.assertEqual(self.v.b_version, b"9.9", msg="version was not properly set")
def test_encrypt_decrypt_aes(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
self.v.cipher_name = u'AES'
self.v.b_password = b'ansible'
# AES encryption code has been removed, so this is old output for
# AES-encrypted 'foobar' with password 'ansible'.
b_vaulttext = b'''$ANSIBLE_VAULT;1.1;AES
53616c7465645f5fc107ce1ef4d7b455e038a13b053225776458052f8f8f332d554809d3f150bfa3
fe3db930508b65e0ff5947e4386b79af8ab094017629590ef6ba486814cf70f8e4ab0ed0c7d2587e
786a5a15efeb787e1958cbdd480d076c
'''
b_plaintext = self.v.decrypt(b_vaulttext)
self.assertEqual(b_plaintext, b"foobar", msg="decryption failed")
def test_encrypt_decrypt_aes256(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
self.v.cipher_name = u'AES256'
plaintext = u"foobar"
b_vaulttext = self.v.encrypt(plaintext)
b_plaintext = self.v.decrypt(b_vaulttext)
self.assertNotEqual(b_vaulttext, b"foobar", msg="encryption failed")
self.assertEqual(b_plaintext, b"foobar", msg="decryption failed")
def test_encrypt_decrypt_aes256_existing_vault(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
self.v.cipher_name = u'AES256'
b_orig_plaintext = b"Setec Astronomy"
vaulttext = u'''$ANSIBLE_VAULT;1.1;AES256
33363965326261303234626463623963633531343539616138316433353830356566396130353436
3562643163366231316662386565383735653432386435610a306664636137376132643732393835
63383038383730306639353234326630666539346233376330303938323639306661313032396437
6233623062366136310a633866373936313238333730653739323461656662303864663666653563
3138'''
b_plaintext = self.v.decrypt(vaulttext)
self.assertEqual(b_plaintext, b_plaintext, msg="decryption failed")
b_vaulttext = to_bytes(vaulttext, encoding='ascii', errors='strict')
b_plaintext = self.v.decrypt(b_vaulttext)
self.assertEqual(b_plaintext, b_orig_plaintext, msg="decryption failed")
def test_encrypt_decrypt_aes256_bad_hmac(self):
# FIXME This test isn't working quite yet.
raise SkipTest
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
self.v.cipher_name = 'AES256'
# plaintext = "Setec Astronomy"
enc_data = '''$ANSIBLE_VAULT;1.1;AES256
33363965326261303234626463623963633531343539616138316433353830356566396130353436
3562643163366231316662386565383735653432386435610a306664636137376132643732393835
63383038383730306639353234326630666539346233376330303938323639306661313032396437
6233623062366136310a633866373936313238333730653739323461656662303864663666653563
3138'''
b_data = to_bytes(enc_data, errors='strict', encoding='utf-8')
b_data = self.v._split_header(b_data)
foo = binascii.unhexlify(b_data)
lines = foo.splitlines()
# line 0 is salt, line 1 is hmac, line 2+ is ciphertext
b_salt = lines[0]
b_hmac = lines[1]
b_ciphertext_data = b'\n'.join(lines[2:])
b_ciphertext = binascii.unhexlify(b_ciphertext_data)
# b_orig_ciphertext = b_ciphertext[:]
# now muck with the text
# b_munged_ciphertext = b_ciphertext[:10] + b'\x00' + b_ciphertext[11:]
# b_munged_ciphertext = b_ciphertext
# assert b_orig_ciphertext != b_munged_ciphertext
b_ciphertext_data = binascii.hexlify(b_ciphertext)
b_payload = b'\n'.join([b_salt, b_hmac, b_ciphertext_data])
# reformat
b_invalid_ciphertext = self.v._format_output(b_payload)
# assert we throw an error
self.v.decrypt(b_invalid_ciphertext)
def test_encrypt_encrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
self.v.cipher_name = u'AES'
b_vaulttext = b"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible")
vaulttext = to_text(b_vaulttext, errors='strict')
self.assertRaises(errors.AnsibleError, self.v.encrypt, b_vaulttext)
self.assertRaises(errors.AnsibleError, self.v.encrypt, vaulttext)
def test_decrypt_decrypted(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
plaintext = u"ansible"
self.assertRaises(errors.AnsibleError, self.v.decrypt, plaintext)
b_plaintext = b"ansible"
self.assertRaises(errors.AnsibleError, self.v.decrypt, b_plaintext)
def test_cipher_not_set(self):
# not setting the cipher should default to AES256
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
plaintext = u"ansible"
self.v.encrypt(plaintext)
self.assertEquals(self.v.cipher_name, "AES256")
|
gpl-3.0
|
yangchandle/django_ecommerce
|
env/lib/python3.5/site-packages/pip/req/req_set.py
|
14
|
32320
|
from __future__ import absolute_import
from collections import defaultdict
from itertools import chain
import logging
import os
from pip._vendor import pkg_resources
from pip._vendor import requests
from pip.compat import expanduser
from pip.download import (is_file_url, is_dir_url, is_vcs_url, url_to_path,
unpack_url)
from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError,
HashError, HashErrors, HashUnpinned,
DirectoryUrlHashUnsupported, VcsHashUnsupported)
from pip.req.req_install import InstallRequirement
from pip.utils import (
display_path, dist_in_usersite, ensure_dir, normalize_path)
from pip.utils.hashes import MissingHashes
from pip.utils.logging import indent_log
from pip.vcs import vcs
logger = logging.getLogger(__name__)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class DistAbstraction(object):
"""Abstracts out the wheel vs non-wheel prepare_files logic.
The requirements for anything installable are as follows:
- we must be able to determine the requirement name
(or we can't correctly handle the non-upgrade case).
- we must be able to generate a list of run-time dependencies
without installing any additional packages (or we would
have to either burn time by doing temporary isolated installs
or alternatively violate pips 'don't start installing unless
all requirements are available' rule - neither of which are
desirable).
- for packages with setup requirements, we must also be able
to determine their requirements without installing additional
packages (for the same reason as run-time dependencies)
- we must be able to create a Distribution object exposing the
above metadata.
"""
def __init__(self, req_to_install):
self.req_to_install = req_to_install
def dist(self, finder):
"""Return a setuptools Dist object."""
raise NotImplementedError(self.dist)
def prep_for_dist(self):
"""Ensure that we can get a Dist for this requirement."""
raise NotImplementedError(self.dist)
def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install)
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install)
else:
return IsSDist(req_to_install)
class IsWheel(DistAbstraction):
def dist(self, finder):
return list(pkg_resources.find_distributions(
self.req_to_install.source_dir))[0]
def prep_for_dist(self):
# FIXME:https://github.com/pypa/pip/issues/1112
pass
class IsSDist(DistAbstraction):
def dist(self, finder):
dist = self.req_to_install.get_dist()
# FIXME: shouldn't be globally added:
if dist.has_metadata('dependency_links.txt'):
finder.add_dependency_links(
dist.get_metadata_lines('dependency_links.txt')
)
return dist
def prep_for_dist(self):
self.req_to_install.run_egg_info()
self.req_to_install.assert_source_matches_version()
class Installed(DistAbstraction):
def dist(self, finder):
return self.req_to_install.satisfied_by
def prep_for_dist(self):
pass
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, upgrade=False,
ignore_installed=False, as_egg=False, target_dir=None,
ignore_dependencies=False, force_reinstall=False,
use_user_site=False, session=None, pycompile=True,
isolated=False, wheel_download_dir=None,
wheel_cache=None, require_hashes=False):
"""Create a RequirementSet.
:param wheel_download_dir: Where still-packed .whl files should be
written to. If None they are written to the download_dir parameter.
Separate to download_dir to permit only keeping wheel archives for
pip wheel.
:param download_dir: Where still packed archives should be written to.
If None they are not saved, and are deleted immediately after
unpacking.
:param wheel_cache: The pip wheel cache, for passing to
InstallRequirement.
"""
if session is None:
raise TypeError(
"RequirementSet() missing 1 required keyword argument: "
"'session'"
)
self.build_dir = build_dir
self.src_dir = src_dir
# XXX: download_dir and wheel_download_dir overlap semantically and may
# be combined if we're willing to have non-wheel archives present in
# the wheelhouse output by 'pip wheel'.
self.download_dir = download_dir
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir # set from --target option
self.session = session
self.pycompile = pycompile
self.isolated = isolated
if wheel_download_dir:
wheel_download_dir = normalize_path(wheel_download_dir)
self.wheel_download_dir = wheel_download_dir
self._wheel_cache = wheel_cache
self.require_hashes = require_hashes
# Maps from install_req -> dependencies_of_install_req
self._dependencies = defaultdict(list)
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def __repr__(self):
reqs = [req for req in self.requirements.values()]
reqs.sort(key=lambda req: req.name.lower())
reqs_str = ', '.join([str(req.req) for req in reqs])
return ('<%s object; %d requirement(s): %s>'
% (self.__class__.__name__, len(reqs), reqs_str))
def add_requirement(self, install_req, parent_req_name=None):
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
name = install_req.name
if not install_req.match_markers():
logger.warning("Ignoring %s: markers %r don't match your "
"environment", install_req.name,
install_req.markers)
return []
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
return [install_req]
else:
try:
existing_req = self.get_requirement(name)
except KeyError:
existing_req = None
if (parent_req_name is None and existing_req and not
existing_req.constraint and
existing_req.extras == install_req.extras and not
existing_req.req.specifier == install_req.req.specifier):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, existing_req, name))
if not existing_req:
# Add requirement
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
result = [install_req]
else:
# Assume there's no need to scan, and that we've already
# encountered this for scanning.
result = []
if not install_req.constraint and existing_req.constraint:
if (install_req.link and not (existing_req.link and
install_req.link.path == existing_req.link.path)):
self.reqs_to_cleanup.append(install_req)
raise InstallationError(
"Could not satisfy constraints for '%s': "
"installation from path or url cannot be "
"constrained to a version" % name)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
existing_req.extras = tuple(
sorted(set(existing_req.extras).union(
set(install_req.extras))))
logger.debug("Setting %s extras to: %s",
existing_req, existing_req.extras)
# And now we need to scan this.
result = [existing_req]
# Canonicalise to the already-added object for the backref
# check below.
install_req = existing_req
if parent_req_name:
parent_req = self.get_requirement(parent_req_name)
self._dependencies[parent_req].append(install_req)
return result
def has_requirement(self, project_name):
name = project_name.lower()
if (name in self.requirements and
not self.requirements[name].constraint or
name in self.requirement_aliases and
not self.requirements[self.requirement_aliases[name]].constraint):
return True
return False
@property
def has_requirements(self):
return list(req for req in self.requirements.values() if not
req.constraint) or self.unnamed_requirements
@property
def is_download(self):
if self.download_dir:
self.download_dir = expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
if req.constraint:
continue
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
# make the wheelhouse
if self.wheel_download_dir:
ensure_dir(self.wheel_download_dir)
# If any top-level requirement has a hash specified, enter
# hash-checking mode, which requires hashes from all.
root_reqs = self.unnamed_requirements + self.requirements.values()
require_hashes = (self.require_hashes or
any(req.has_hash_options for req in root_reqs))
if require_hashes and self.as_egg:
raise InstallationError(
'--egg is not allowed with --require-hashes mode, since it '
'delegates dependency resolution to setuptools and could thus '
'result in installation of unhashed packages.')
# Actually prepare the files, and collect any exceptions. Most hash
# exceptions cannot be checked ahead of time, because
# req.populate_link() needs to be called before we can make decisions
# based on link type.
discovered_reqs = []
hash_errors = HashErrors()
for req in chain(root_reqs, discovered_reqs):
try:
discovered_reqs.extend(self._prepare_file(
finder,
req,
require_hashes=require_hashes,
ignore_dependencies=self.ignore_dependencies))
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors
def _check_skip_installed(self, req_to_install, finder):
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
# Check whether to upgrade/reinstall this req or not.
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
skip_reason = 'satisfied (use --upgrade to upgrade)'
if self.upgrade:
best_installed = False
# For link based requirements we have to pull the
# tree down and inspect to assess the version #, so
# its handled way down.
if not (self.force_reinstall or req_to_install.link):
try:
finder.find_requirement(req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
skip_reason = 'up-to-date'
best_installed = True
except DistributionNotFound:
# No distribution found, so we squash the
# error - it will be raised later when we
# re-try later to do the install.
# Why don't we just raise here?
pass
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
return skip_reason
else:
return None
def _prepare_file(self,
finder,
req_to_install,
require_hashes=False,
ignore_dependencies=False):
"""Prepare a single requirements file.
:return: A list of additional InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
# ###################### #
# # print log messages # #
# ###################### #
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
else:
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req_to_install.satisfied_by is None
if not self.ignore_installed:
skip_reason = self._check_skip_installed(
req_to_install, finder)
if req_to_install.satisfied_by:
assert skip_reason is not None, (
'_check_skip_installed returned None but '
'req_to_install.satisfied_by is set to %r'
% (req_to_install.satisfied_by,))
logger.info(
'Requirement already %s: %s', skip_reason,
req_to_install)
else:
if (req_to_install.link and
req_to_install.link.scheme == 'file'):
path = url_to_path(req_to_install.link.url)
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
if req_to_install.editable:
if require_hashes:
raise InstallationError(
'The editable requirement %s cannot be installed when '
'requiring hashes, because there is no single file to '
'hash.' % req_to_install)
req_to_install.ensure_has_source_dir(self.src_dir)
req_to_install.update_editable(not self.is_download)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
req_to_install.archive(self.download_dir)
elif req_to_install.satisfied_by:
if require_hashes:
logger.debug(
'Since it is already installed, we are trusting this '
'package without checking its hash. To ensure a '
'completely repeatable environment, install into an '
'empty virtualenv.')
abstract_dist = Installed(req_to_install)
else:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
req_to_install.ensure_has_source_dir(self.build_dir)
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req_to_install.source_dir`
if os.path.exists(
os.path.join(req_to_install.source_dir, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, req_to_install.source_dir)
)
req_to_install.populate_link(
finder, self.upgrade, require_hashes)
# We can't hit this spot and have populate_link return None.
# req_to_install.satisfied_by is None here (because we're
# guarded) and upgrade has no impact except when satisfied_by
# is not None.
# Then inside find_requirement existing_applicable -> False
# If no new versions are found, DistributionNotFound is raised,
# otherwise a result is guaranteed.
assert req_to_install.link
link = req_to_install.link
# Now that we have the real link, we can tell what kind of
# requirements we have and raise some more informative errors
# than otherwise. (For example, we can raise VcsHashUnsupported
# for a VCS URL rather than HashMissing.)
if require_hashes:
# We could check these first 2 conditions inside
# unpack_url and save repetition of conditions, but then
# we would report less-useful error messages for
# unhashable requirements, complaining that there's no
# hash provided.
if is_vcs_url(link):
raise VcsHashUnsupported()
elif is_file_url(link) and is_dir_url(link):
raise DirectoryUrlHashUnsupported()
if (not req_to_install.original_link and
not req_to_install.is_pinned):
# Unpinned packages are asking for trouble when a new
# version is uploaded. This isn't a security check, but
# it saves users a surprising hash mismatch in the
# future.
#
# file:/// URLs aren't pinnable, so don't complain
# about them not being pinned.
raise HashUnpinned()
hashes = req_to_install.hashes(
trust_internet=not require_hashes)
if require_hashes and not hashes:
# Known-good hashes are missing for this requirement, so
# shim it with a facade object that will provoke hash
# computation and then raise a HashMissing exception
# showing the user what the hash should be.
hashes = MissingHashes()
try:
download_dir = self.download_dir
# We always delete unpacked sdists after pip ran.
autodelete_unpacked = True
if req_to_install.link.is_wheel \
and self.wheel_download_dir:
# when doing 'pip wheel` we download wheels to a
# dedicated dir.
download_dir = self.wheel_download_dir
if req_to_install.link.is_wheel:
if download_dir:
# When downloading, we only unpack wheels to get
# metadata.
autodelete_unpacked = True
else:
# When installing a wheel, we use the unpacked
# wheel.
autodelete_unpacked = False
unpack_url(
req_to_install.link, req_to_install.source_dir,
download_dir, autodelete_unpacked,
session=self.session, hashes=hashes)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, req_to_install.link)
)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
# Make a .zip of the source_dir we already created.
if req_to_install.link.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
# ###################### #
# # parse dependencies # #
# ###################### #
dist = abstract_dist.dist(finder)
more_reqs = []
def add_req(subreq):
sub_install_req = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
wheel_cache=self._wheel_cache,
)
more_reqs.extend(self.add_requirement(
sub_install_req, req_to_install.name))
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install, None)
if not ignore_dependencies:
if (req_to_install.extras):
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq)
# cleanup tmp src
self.reqs_to_cleanup.append(req_to_install)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
self.successfully_downloaded.append(req_to_install)
return more_reqs
def cleanup_files(self):
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
def _to_install(self):
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set()
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._dependencies[req]:
schedule(dep)
order.append(req)
for install_req in self.requirements.values():
schedule(install_req)
return order
def install(self, install_options, global_options=(), *args, **kwargs):
"""
Install everything in this set (after having downloaded and unpacked
the packages)
"""
to_install = self._to_install()
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
requirement.uninstall(auto_confirm=True)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except:
# if install did not succeed, rollback previous uninstall
if (requirement.conflicts_with and not
requirement.install_succeeded):
requirement.rollback_uninstall()
raise
else:
if (requirement.conflicts_with and
requirement.install_succeeded):
requirement.commit_uninstall()
requirement.remove_temporary_source()
self.successfully_installed = to_install
|
mit
|
ramcn/demo3
|
venv/lib/python3.4/site-packages/oauthlib/oauth2/rfc6749/grant_types/refresh_token.py
|
64
|
5240
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
import json
import logging
from .base import GrantTypeBase
from .. import errors, utils
from ..request_validator import RequestValidator
log = logging.getLogger(__name__)
class RefreshTokenGrant(GrantTypeBase):
"""`Refresh token grant`_
.. _`Refresh token grant`: http://tools.ietf.org/html/rfc6749#section-6
"""
@property
def issue_new_refresh_tokens(self):
return True
def __init__(self, request_validator=None, issue_new_refresh_tokens=True):
self.request_validator = request_validator or RequestValidator()
def create_token_response(self, request, token_handler):
"""Create a new access token from a refresh_token.
If valid and authorized, the authorization server issues an access
token as described in `Section 5.1`_. If the request failed
verification or is invalid, the authorization server returns an error
response as described in `Section 5.2`_.
The authorization server MAY issue a new refresh token, in which case
the client MUST discard the old refresh token and replace it with the
new refresh token. The authorization server MAY revoke the old
refresh token after issuing a new refresh token to the client. If a
new refresh token is issued, the refresh token scope MUST be
identical to that of the refresh token included by the client in the
request.
.. _`Section 5.1`: http://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: http://tools.ietf.org/html/rfc6749#section-5.2
"""
headers = {
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
}
try:
log.debug('Validating refresh token request, %r.', request)
self.validate_token_request(request)
except errors.OAuth2Error as e:
return headers, e.json, e.status_code
token = token_handler.create_token(request,
refresh_token=self.issue_new_refresh_tokens)
log.debug('Issuing new token to client id %r (%r), %r.',
request.client_id, request.client, token)
return headers, json.dumps(token), 200
def validate_token_request(self, request):
# REQUIRED. Value MUST be set to "refresh_token".
if request.grant_type != 'refresh_token':
raise errors.UnsupportedGrantTypeError(request=request)
if request.refresh_token is None:
raise errors.InvalidRequestError(
description='Missing refresh token parameter.',
request=request)
# Because refresh tokens are typically long-lasting credentials used to
# request additional access tokens, the refresh token is bound to the
# client to which it was issued. If the client type is confidential or
# the client was issued client credentials (or assigned other
# authentication requirements), the client MUST authenticate with the
# authorization server as described in Section 3.2.1.
# http://tools.ietf.org/html/rfc6749#section-3.2.1
if self.request_validator.client_authentication_required(request):
log.debug('Authenticating client, %r.', request)
if not self.request_validator.authenticate_client(request):
log.debug('Invalid client (%r), denying access.', request)
raise errors.InvalidClientError(request=request)
elif not self.request_validator.authenticate_client_id(request.client_id, request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
# REQUIRED. The refresh token issued to the client.
log.debug('Validating refresh token %s for client %r.',
request.refresh_token, request.client)
if not self.request_validator.validate_refresh_token(
request.refresh_token, request.client, request):
log.debug('Invalid refresh token, %s, for client %r.',
request.refresh_token, request.client)
raise errors.InvalidGrantError(request=request)
original_scopes = utils.scope_to_list(
self.request_validator.get_original_scopes(
request.refresh_token, request))
if request.scope:
request.scopes = utils.scope_to_list(request.scope)
if (not all((s in original_scopes for s in request.scopes))
and not self.request_validator.is_within_original_scope(
request.scopes, request.refresh_token, request)):
log.debug('Refresh token %s lack requested scopes, %r.',
request.refresh_token, request.scopes)
raise errors.InvalidScopeError(request=request)
else:
request.scopes = original_scopes
|
mit
|
mrquim/mrquimrepo
|
plugin.program.indigo/libs/requests/packages/chardet/chardistribution.py
|
2755
|
9226
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
|
gpl-2.0
|
cowai/jottalib
|
tests/test_jottacloud.py
|
2
|
3100
|
# -*- encoding: utf-8 -*-
'Tests for jottacloud.py'
#
# This file is part of jottalib.
#
# jottalib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jottalib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jottafs. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2015 Håvard Gulldahl <havard@gulldahl.no>
# metadata
__author__ = 'havard@gulldahl.no'
# import standardlib
import os, logging, tempfile, random, hashlib
try:
from io import StringIO # py3
except ImportError:
from cStringIO import StringIO # py2
# import py.test
import pytest # pip install pytest
try:
from xattr import xattr # pip install xattr
HAS_XATTR=True
except ImportError: # no xattr installed, not critical because it is optional
HAS_XATTR=False
# import jotta
from jottalib import JFS, __version__, jottacloud
jfs = JFS.JFS() # get username and password from environment or .netrc
TESTFILEDATA="""
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla est dolor, convallis fermentum sapien in, fringilla congue ligula. Fusce at justo ac felis vulputate laoreet vel at metus. Aenean justo lacus, porttitor dignissim imperdiet a, elementum cursus ligula. Vivamus eu est viverra, pretium arcu eget, imperdiet eros. Curabitur in bibendum."""
class TestXattr:
@pytest.mark.skipif(HAS_XATTR==False,
reason="requires xattr")
def test_setget(self):
temp = tempfile.NamedTemporaryFile()
temp.write(random.randint(0, 10)*TESTFILEDATA)
temp.flush()
temp.seek(0)
md5 = hashlib.md5(temp.read()).hexdigest()
assert jottacloud.setxattrhash(temp.name, md5) is not False
assert jottacloud.getxattrhash(temp.name) == md5
x = xattr(temp.name)
assert x.get('user.jottalib.md5') == md5
assert x.get('user.jottalib.filesize') == str(os.path.getsize(temp.name)) # xattr always stores strings
def test_get_jottapath(tmpdir):
topdir = tmpdir.mkdir("topdir")
subdir = topdir.mkdir("subdir1").mkdir("subdir2")
jottapath = jottacloud.get_jottapath(str(topdir), str(subdir), "/TEST_ROOT")
assert jottapath == "/TEST_ROOT/topdir/subdir1/subdir2"
# TODO:
# def get_jottapath(localtopdir, dirpath, jottamountpoint):
# def is_file(jottapath, JFS):
# def filelist(jottapath, JFS):
# def compare(localtopdir, jottamountpoint, JFS, followlinks=False, exclude_patterns=None):
# def _decode_filename(f):
# def new(localfile, jottapath, JFS):
# def resume(localfile, jottafile, JFS):
# def replace_if_changed(localfile, jottapath, JFS):
# def delete(jottapath, JFS):
# def mkdir(jottapath, JFS):
# def iter_tree(jottapath, JFS):
|
gpl-3.0
|
nuuuboo/odoo
|
addons/mail/wizard/invite.py
|
268
|
5847
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import tools
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.translate import _
class invite_wizard(osv.osv_memory):
""" Wizard to invite partners and make them followers. """
_name = 'mail.wizard.invite'
_description = 'Invite wizard'
def default_get(self, cr, uid, fields, context=None):
result = super(invite_wizard, self).default_get(cr, uid, fields, context=context)
user_name = self.pool.get('res.users').name_get(cr, uid, [uid], context=context)[0][1]
model = result.get('res_model')
res_id = result.get('res_id')
if 'message' in fields and model and res_id:
ir_model = self.pool.get('ir.model')
model_ids = ir_model.search(cr, uid, [('model', '=', self.pool[model]._name)], context=context)
model_name = ir_model.name_get(cr, uid, model_ids, context=context)[0][1]
document_name = self.pool[model].name_get(cr, uid, [res_id], context=context)[0][1]
message = _('<div><p>Hello,</p><p>%s invited you to follow %s document: %s.<p></div>') % (user_name, model_name, document_name)
result['message'] = message
elif 'message' in fields:
result['message'] = _('<div><p>Hello,</p><p>%s invited you to follow a new document.</p></div>') % user_name
return result
_columns = {
'res_model': fields.char('Related Document Model',
required=True, select=1,
help='Model of the followed resource'),
'res_id': fields.integer('Related Document ID', select=1,
help='Id of the followed resource'),
'partner_ids': fields.many2many('res.partner', string='Recipients',
help="List of partners that will be added as follower of the current document."),
'message': fields.html('Message'),
'send_mail': fields.boolean('Send Email',
help="If checked, the partners will receive an email warning they have been "
"added in the document's followers."),
}
_defaults = {
'send_mail': True,
}
def add_followers(self, cr, uid, ids, context=None):
for wizard in self.browse(cr, uid, ids, context=context):
model_obj = self.pool[wizard.res_model]
document = model_obj.browse(cr, uid, wizard.res_id, context=context)
# filter partner_ids to get the new followers, to avoid sending email to already following partners
new_follower_ids = [p.id for p in wizard.partner_ids if p not in document.message_follower_ids]
model_obj.message_subscribe(cr, uid, [wizard.res_id], new_follower_ids, context=context)
ir_model = self.pool.get('ir.model')
model_ids = ir_model.search(cr, uid, [('model', '=', model_obj._name)], context=context)
model_name = ir_model.name_get(cr, uid, model_ids, context=context)[0][1]
# send an email if option checked and if a message exists (do not send void emails)
if wizard.send_mail and wizard.message and not wizard.message == '<br>': # when deleting the message, cleditor keeps a <br>
# add signature
# FIXME 8.0: use notification_email_send, send a wall message and let mail handle email notification + message box
signature_company = self.pool.get('mail.notification').get_signature_footer(cr, uid, user_id=uid, res_model=wizard.res_model, res_id=wizard.res_id, context=context)
wizard.message = tools.append_content_to_html(wizard.message, signature_company, plaintext=False, container_tag='div')
# send mail to new followers
# the invite wizard should create a private message not related to any object -> no model, no res_id
mail_mail = self.pool.get('mail.mail')
mail_id = mail_mail.create(cr, uid, {
'model': wizard.res_model,
'res_id': wizard.res_id,
'record_name': document.name_get()[0][1],
'email_from': self.pool['mail.message']._get_default_from(cr, uid, context=context),
'reply_to': self.pool['mail.message']._get_default_from(cr, uid, context=context),
'subject': _('Invitation to follow %s: %s') % (model_name, document.name_get()[0][1]),
'body_html': '%s' % wizard.message,
'auto_delete': True,
'message_id': self.pool['mail.message']._get_message_id(cr, uid, {'no_auto_thread': True}, context=context),
'recipient_ids': [(4, id) for id in new_follower_ids]
}, context=context)
mail_mail.send(cr, uid, [mail_id], context=context)
return {'type': 'ir.actions.act_window_close'}
|
agpl-3.0
|
Endika/odoo
|
addons/analytic_user_function/__openerp__.py
|
260
|
2015
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Jobs on Contracts',
'version': '1.0',
'category': 'Sales Management',
'description': """
This module allows you to define what is the default function of a specific user on a given account.
====================================================================================================
This is mostly used when a user encodes his timesheet: the values are retrieved
and the fields are auto-filled. But the possibility to change these values is
still available.
Obviously if no data has been recorded for the current account, the default
value is given as usual by the employee data so that this module is perfectly
compatible with older configurations.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'depends': ['hr_timesheet_sheet'],
'data': ['analytic_user_function_view.xml', 'security/ir.model.access.csv'],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
windyuuy/opera
|
chromium/src/third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/changelog.py
|
122
|
4368
|
# Copyright (C) 2011 Patrick Gansterer <paroga@paroga.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Checks WebKit style for ChangeLog files."""
import re
from common import TabChecker
from webkitpy.common.checkout.changelog import parse_bug_id_from_changelog
class ChangeLogChecker(object):
"""Processes text lines for checking style."""
categories = set(['changelog/bugnumber', 'changelog/filechangedescriptionwhitespace'])
def __init__(self, file_path, handle_style_error, should_line_be_checked):
self.file_path = file_path
self.handle_style_error = handle_style_error
self.should_line_be_checked = should_line_be_checked
self._tab_checker = TabChecker(file_path, handle_style_error)
def check_entry(self, first_line_checked, entry_lines):
if not entry_lines:
return
for line in entry_lines:
if parse_bug_id_from_changelog(line):
break
if re.search("Unreviewed", line, re.IGNORECASE):
break
if re.search("build", line, re.IGNORECASE) and re.search("fix", line, re.IGNORECASE):
break
else:
self.handle_style_error(first_line_checked,
"changelog/bugnumber", 5,
"ChangeLog entry has no bug number")
# check file change descriptions for style violations
line_no = first_line_checked - 1
for line in entry_lines:
line_no = line_no + 1
# filter file change descriptions
if not re.match('\s*\*\s', line):
continue
if re.search(':\s*$', line) or re.search(':\s', line):
continue
self.handle_style_error(line_no,
"changelog/filechangedescriptionwhitespace", 5,
"Need whitespace between colon and description")
# check for a lingering "No new tests. (OOPS!)" left over from prepare-changeLog.
line_no = first_line_checked - 1
for line in entry_lines:
line_no = line_no + 1
if re.match('\s*No new tests. \(OOPS!\)$', line):
self.handle_style_error(line_no,
"changelog/nonewtests", 5,
"You should remove the 'No new tests' and either add and list tests, or explain why no new tests were possible.")
def check(self, lines):
self._tab_checker.check(lines)
first_line_checked = 0
entry_lines = []
for line_index, line in enumerate(lines):
if not self.should_line_be_checked(line_index + 1):
# If we transitioned from finding changed lines to
# unchanged lines, then we are done.
if first_line_checked:
break
continue
if not first_line_checked:
first_line_checked = line_index + 1
entry_lines.append(line)
self.check_entry(first_line_checked, entry_lines)
|
bsd-3-clause
|
ashwyn/eden-message_parser
|
modules/pygsm/textsmshandler.py
|
62
|
9906
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import errors, traceback, message
import re, datetime, time
import StringIO
import pytz
from smshandler import SmsHandler
class TextSmsHandler(SmsHandler):
SCTS_FMT = "%y/%m/%d,%H:%M:%S"
CMGL_MATCHER=re.compile(r'^\+CMGL: (\d+),"(.+?)","(.+?)",*?,"(.+?)".*?$')
CMGL_STATUS='"REC UNREAD"'
def __init__(self, modem):
SmsHandler.__init__(self, modem)
def get_mode_cmd(self):
return "AT+CMGF=1"
def send_sms(self, recipient, text):
"""Sends an SMS to _recipient_ containing _text_. Some networks
will automatically chunk long messages into multiple parts,
and reassembled them upon delivery, but some will silently
drop them. At the moment, pyGSM does nothing to avoid this,
so try to keep _text_ under 160 characters."""
old_mode = None
try:
try:
# cast the text to a string, to check that
# it doesn't contain non-ascii characters
try:
text = str(text)
# uh-oh. unicode ahoy
except UnicodeEncodeError:
# fetch and store the current mode (so we can
# restore it later), and override it with UCS2
csmp = self.modem.query("AT+CSMP?", "+CSMP:")
if csmp is not None:
old_mode = csmp.split(",")
mode = old_mode[:]
mode[3] = "8"
# enable hex mode, and set the encoding
# to UCS2 for the full character set
self.modem.command('AT+CSCS="HEX"')
self.modem.command("AT+CSMP=%s" % ",".join(mode))
text = text.encode("utf-16").encode("hex")
# initiate the sms, and give the device a second
# to raise an error. unfortunately, we can't just
# wait for the "> " prompt, because some modems
# will echo it FOLLOWED BY a CMS error
result = self.modem.command(
'AT+CMGS=\"%s\"' % (recipient),
read_timeout=1)
# if no error is raised within the timeout period,
# and the text-mode prompt WAS received, send the
# sms text, wait until it is accepted or rejected
# (text-mode messages are terminated with ascii char 26
# "SUBSTITUTE" (ctrl+z)), and return True (message sent)
except errors.GsmReadTimeoutError, err:
if err.pending_data[0] == ">":
self.modem.command(text, write_term=chr(26))
return True
# a timeout was raised, but no prompt nor
# error was received. i have no idea what
# is going on, so allow the error to propagate
else:
raise
# for all other errors...
# (likely CMS or CME from device)
except Exception, err:
traceback.print_exc(err)
# whatever went wrong, break out of the
# message prompt. if this is missed, all
# subsequent writes will go into the message!
self.modem.break_out_of_prompt()
# rule of thumb: pyGSM is meant to be embedded,
# so DO NOT EVER allow exceptions to propagate
# (obviously, this sucks. there should be an
# option, at least, but i'm being cautious)
return None
finally:
# if the mode was overridden above, (if this
# message contained unicode), switch it back
if old_mode is not None:
self.modem.command("AT+CSMP=%s" % ",".join(old_mode))
self.modem.command('AT+CSCS="GSM"')
# returns a list of messages
def parse_stored_messages(self, lines):
# loop through all the lines attempting to match CMGL lines (the header)
# and then match NOT CMGL lines (the content)
# need to seed the loop first
messages = []
if len(lines)>0:
m=self.CMGL_MATCHER.match(lines[0])
while len(lines)>0:
if m is None:
# couldn't match OR no text data following match
raise(errors.GsmReadError())
# if here, we have a match AND text
# start by popping the header (which we have stored in the 'm'
# matcher object already)
lines.pop(0)
# now put the captures into independent vars
index, status, sender, timestamp = m.groups()
# now loop through, popping content until we get
# the next CMGL or out of lines
msg_buf=StringIO.StringIO()
while len(lines)>0:
m=self.CMGL_MATCHER.match(lines[0])
if m is not None:
# got another header, get out
break
else:
msg_buf.write(lines.pop(0))
# get msg text
msg_text=msg_buf.getvalue().strip()
# now create message
messages.append(self._incoming_to_msg(timestamp,sender,msg_text))
return messages
# returns a single message
def parse_incoming_message(self, header_line, text):
# since this line IS a CMT string (an incoming
# SMS), parse it and store it to deal with later
m = re.match(r'^\+CMT: "(.+?)",.*?,"(.+?)".*?$', header_line)
sender = ""
timestamp = None
if m is not None:
# extract the meta-info from the CMT line,
# and the message from the FOLLOWING line
sender, timestamp = m.groups()
# multi-part messages begin with ASCII 130 followed
# by "@" (ASCII 64). TODO: more docs on this, i wrote
# this via reverse engineering and lost my notes
if (ord(text[0]) == 130) and (text[1] == "@"):
part_text = text[7:]
# ensure we have a place for the incoming
# message part to live as they are delivered
if sender not in self.multipart:
self.multipart[sender] = []
# append THIS PART
self.multipart[sender].append(part_text)
# abort if this is not the last part
if ord(text[5]) != 173:
return None
# last part, so switch out the received
# part with the whole message, to be processed
# below (the sender and timestamp are the same
# for all parts, so no change needed there)
text = "".join(self.multipart[sender])
del self.multipart[sender]
return self._incoming_to_msg(timestamp, sender, text)
def _incoming_to_msg(self, timestamp, sender, text):
# since neither message notifications nor messages
# fetched from storage give any indication of their
# encoding, we're going to have to guess. if the
# text has a multiple-of-four length and starts
# with a UTF-16 Byte Order Mark, try to decode it
# into a unicode string
try:
if (len(text) % 4 == 0) and (len(text) > 0):
bom = text[:4].lower()
if bom == "fffe"\
or bom == "feff":
# decode the text into a unicode string,
# so developers embedding pyGSM need never
# experience this confusion and pain
text = text.decode("hex").decode("utf-16")
# oh dear. it looked like hex-encoded utf-16,
# but wasn't. who sends a message like that?!
except:
pass
# create and store the IncomingMessage object
time_sent = None
if timestamp is not None:
time_sent = self._parse_incoming_timestamp(timestamp)
return message.IncomingMessage(self, sender, time_sent, text)
def _parse_incoming_timestamp(self, timestamp):
"""Parse a Service Center Time Stamp (SCTS) string into a Python datetime
object, or None if the timestamp couldn't be parsed. The SCTS format does
not seem to be standardized, but looks something like: YY/MM/DD,HH:MM:SS."""
# timestamps usually have trailing timezones, measured
# in 15-minute intervals (?!), which is not handled by
# python's datetime lib. if _this_ timezone does, chop
# it off, and note the actual offset in minutes
tz_pattern = r"([-+])(\d+)$"
m = re.search(tz_pattern, timestamp)
if m is not None:
timestamp = re.sub(tz_pattern, "", timestamp)
tz_offset = datetime.timedelta(minutes=int(m.group(2)) * 15)
if m.group(1)=='-':
tz_offset = -tz_offset
# we won't be modifying the output, but
# still need an empty timedelta to subtract
else:
tz_offset = datetime.timedelta()
# attempt to parse the (maybe modified) timestamp into
# a time_struct, and convert it into a datetime object
try:
time_struct = time.strptime(timestamp, self.SCTS_FMT)
dt = datetime.datetime(*time_struct[:6])
dt.replace(tzinfo=pytz.utc)
# patch the time to represent UTC, since
dt-=tz_offset
return dt
# if the timestamp couldn't be parsed, we've encountered
# a format the pyGSM doesn't support. this sucks, but isn't
# important enough to explode like RubyGSM does
except ValueError:
traceback.print_exc()
return None
|
mit
|
pulkitpagare/Mobile-Security-Framework-MobSF
|
DynamicAnalyzer/tools/pyWebProxy/socket_wrapper.py
|
37
|
4249
|
#!/usr/bin/env python
'''
owtf is an OWASP+PTES-focused try to unite great tools & facilitate pentesting
Copyright (c) 2013, Abraham Aranguren <name.surname@gmail.com> http://7-a.org
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright owner nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Inbound Proxy Module developed by Bharadwaj Machiraju (blog.tunnelshade.in)
# as a part of Google Summer of Code 2013
'''
from tornado import ioloop
import ssl
from gen_cert import gen_signed_cert
def wrap_socket(socket, domain, ca_crt, ca_key, ca_pass, certs_folder, success=None, failure=None, io=None, **options):
"""Wrap an active socket in an SSL socket."""
# # Default Options
options.setdefault('do_handshake_on_connect', False)
options.setdefault('ssl_version', ssl.PROTOCOL_SSLv23)
options.setdefault('server_side', True)
# The idea is to handle domains with greater than 3 dots using wildcard certs
if domain.count(".") >= 3:
key, cert = gen_signed_cert("*." + ".".join(domain.split(".")[-3:]), ca_crt, ca_key, ca_pass, certs_folder)
else:
key, cert = gen_signed_cert(domain, ca_crt, ca_key, ca_pass, certs_folder)
options.setdefault('certfile', cert)
options.setdefault('keyfile', key)
# # Handlers
def done():
"""Handshake finished successfully."""
io.remove_handler(wrapped.fileno())
success and success(wrapped)
def error():
"""The handshake failed."""
if failure:
return failure(wrapped)
# # By default, just close the socket.
io.remove_handler(wrapped.fileno())
wrapped.close()
def handshake(fd, events):
"""Handler fGetting the same error here... also looking for answers....
TheHippo Dec 19 '12 at 20:29or SSL handshake negotiation.
See Python docs for ssl.do_handshake()."""
if events & io.ERROR:
error()
return
try:
new_state = io.ERROR
wrapped.do_handshake()
return done()
except ssl.SSLError as exc:
if exc.args[0] == ssl.SSL_ERROR_WANT_READ:
new_state |= io.READ
elif exc.args[0] == ssl.SSL_ERROR_WANT_WRITE:
new_state |= io.WRITE
else:
raise
if new_state != state[0]:
state[0] = new_state
io.update_handler(fd, new_state)
# # set up handshake state; use a list as a mutable cell.
io = io or ioloop.IOLoop.instance()
state = [io.ERROR]
# # Wrap the socket; swap out handlers.
io.remove_handler(socket.fileno())
wrapped = ssl.SSLSocket(socket, **options)
wrapped.setblocking(0)
io.add_handler(wrapped.fileno(), handshake, state[0])
# # Begin the handshake.
handshake(wrapped.fileno(), 0)
return wrapped
|
gpl-3.0
|
markastern/hbcal
|
tests/tests_hebrew_calendar/test_reltime.py
|
1
|
8611
|
# Copyright 2015 Mark Stern
#
# This file is part of Hbcal.
#
# Hbcal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# Hbcal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hbcal. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import unittest
from hbcal.hebrew_calendar import abs_time
class TestAddSubtract(unittest.TestCase):
def test_add_rel(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = abs_time.RelTime(50, 4, 12, 123)
self.assertEqual(abs_time.RelTime(150, 8, 24, 246).chalakim,
rel_time1.chalakim + rel_time2.chalakim)
def test_add_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
with self.assertRaises(TypeError):
rel_time1 + 6
def test_radd_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
with self.assertRaises(TypeError):
6 + rel_time1
def test_sub_rel(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = abs_time.RelTime(50, 4, 12, 123)
self.assertEqual(abs_time.RelTime(50, 0, 0, 0).chalakim,
rel_time1.chalakim - rel_time2.chalakim)
def test_sub_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
with self.assertRaises(TypeError):
rel_time1 - 6
def test_rsub_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
with self.assertRaises(TypeError):
6 - rel_time1
class TestIncrementDecrement(unittest.TestCase):
def test_iadd_rel(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = abs_time.RelTime(50, 4, 12, 123)
rel_time1 += rel_time2
self.assertEqual(abs_time.RelTime(150, 8, 24, 246).chalakim,
rel_time1.chalakim)
def test_iadd_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
with self.assertRaises(TypeError):
rel_time1 += 6
def test_isub_rel(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = abs_time.RelTime(50, 4, 12, 123)
rel_time1 -= rel_time2
self.assertEqual(abs_time.RelTime(50, 0, 0, 0).chalakim,
rel_time1.chalakim)
def test_isub_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
with self.assertRaises(TypeError):
rel_time1 -= 6
class TestMultiply(unittest.TestCase):
def test_multiply_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = rel_time1 * 2
self.assertEqual(abs_time.RelTime(200, 8, 24, 246).chalakim,
rel_time2.chalakim)
def test_rmultiply_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = 2 * rel_time1
self.assertEqual(abs_time.RelTime(200, 8, 24, 246).chalakim,
rel_time2.chalakim)
def test_imultiply_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = rel_time1
rel_time1 *= 2
self.assertEqual(abs_time.RelTime(200, 8, 24, 246).chalakim,
rel_time2.chalakim)
def test_multiply(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = abs_time.RelTime(50, 4, 12, 123)
with self.assertRaises(TypeError):
rel_time1 * rel_time2
def test_imultiply(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = abs_time.RelTime(50, 4, 12, 123)
with self.assertRaises(TypeError):
rel_time1 *= rel_time2
def test_floor_divide_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
self.assertEqual(abs_time.RelTime(20, 0, 21, 672).chalakim,
(rel_time1 // 5).chalakim)
def test_ifloor_divide_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time1 //= 5
self.assertEqual(abs_time.RelTime(20, 0, 21, 672).chalakim,
rel_time1.chalakim)
def test_floor_divide(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = abs_time.RelTime(50, 4, 12, 123)
self.assertEqual(1, rel_time1 // rel_time2)
def test_ifloor_divide(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = abs_time.RelTime(50, 4, 12, 123)
rel_time1 //= rel_time2
self.assertEqual(1, rel_time1)
def test_floor_divide_string(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
with self.assertRaises(TypeError):
rel_time1 // "Hell World"
def test_ifloor_divide_string(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
with self.assertRaises(TypeError):
rel_time1 //= "Hell World"
def test_true_divide_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
self.assertAlmostEqual(3652152.6, (rel_time1 / 5).chalakim, 10)
def test_itrue_divide_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time1 /= 5
self.assertAlmostEqual(3652152.6, rel_time1.chalakim, 10)
def test_true_divide(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = abs_time.RelTime(50, 4, 12, 123)
self.assertAlmostEqual(1.987, rel_time1 / rel_time2, 3)
def test_itrue_divide(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = abs_time.RelTime(50, 4, 12, 123)
rel_time1 /= rel_time2
self.assertAlmostEqual(1.987, rel_time1, 3)
def test_itrue_divide_string(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
with self.assertRaises(TypeError):
rel_time1 //= "Hell World"
def test_mod_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
self.assertEqual(abs_time.RelTime(0, 0, 0, 3).chalakim,
(rel_time1 % 5).chalakim)
def test_imod_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time1 %= 5
self.assertEqual(abs_time.RelTime(0, 0, 0, 3).chalakim,
rel_time1.chalakim)
def test_mod(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = abs_time.RelTime(50, 4, 12, 123)
self.assertEqual(abs_time.RelTime(50, 0, 0, 0).chalakim,
(rel_time1 % rel_time2).chalakim)
def test_imod(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = abs_time.RelTime(50, 4, 12, 123)
rel_time1 %= rel_time2
self.assertEqual(abs_time.RelTime(50, 0, 0, 0).chalakim,
rel_time1.chalakim)
def test_mod_string(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
with self.assertRaises(TypeError):
rel_time1 % "Hell World"
def test_imod_string(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
with self.assertRaises(TypeError):
rel_time1 %= "Hell World"
def test_divmod_int(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2, rel_time3 = divmod(rel_time1, 5)
self.assertEqual((abs_time.RelTime(20, 0, 21, 672).chalakim,
abs_time.RelTime(0, 0, 0, 3).chalakim),
(rel_time2.chalakim, rel_time3.chalakim))
def test_divmod(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
rel_time2 = abs_time.RelTime(50, 4, 12, 123)
quotient, rel_time3 = divmod(rel_time1, rel_time2)
self.assertEqual((1, abs_time.RelTime(50, 0, 0, 0).chalakim),
(quotient, rel_time3.chalakim))
class TestProperties(unittest.TestCase):
def test_weeks(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
self.assertEqual(100, rel_time1.weeks)
def test_days(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
self.assertEqual(704, rel_time1.days)
def test_days_xxx(self):
rel_time1 = abs_time.RelTime(100, 4, 12, 123)
self.assertEqual((704, 12 * 1080 + 123), rel_time1.days_chalakim)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
codesmart-co/bit
|
bit/migrations/versions/4ad33f99723a_.py
|
1
|
2133
|
"""empty message
Revision ID: 4ad33f99723a
Revises: 743a0a1b5bc9
Create Date: 2017-08-17 14:36:49.229000
"""
# revision identifiers, used by Alembic.
revision = '4ad33f99723a'
down_revision = '743a0a1b5bc9'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('bit_facebook_daily_ad_insights_impression_device',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('account_id', sa.String(length=255), nullable=True),
sa.Column('campaign_id', sa.String(length=255), nullable=True),
sa.Column('adset_id', sa.String(length=255), nullable=True),
sa.Column('campaign_name', sa.String(length=255), nullable=True),
sa.Column('spend', sa.Numeric(), nullable=True),
sa.Column('cost_per_unique_click', sa.Numeric(), nullable=True),
sa.Column('unique_clicks', sa.Integer(), nullable=True),
sa.Column('unique_impressions', sa.Integer(), nullable=True),
sa.Column('unique_social_clicks', sa.Integer(), nullable=True),
sa.Column('unique_social_impressions', sa.Integer(), nullable=True),
sa.Column('website_clicks', sa.Integer(), nullable=True),
sa.Column('date_start', sa.DateTime(), nullable=True),
sa.Column('date_stop', sa.DateTime(), nullable=True),
sa.Column('impression_device', sa.String(length=255), nullable=True),
sa.Column('ad_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['ad_id'], ['bit_facebook_ad.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_bit_facebook_daily_ad_insights_impression_device_impression_device'), 'bit_facebook_daily_ad_insights_impression_device', ['impression_device'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_bit_facebook_daily_ad_insights_impression_device_impression_device'), table_name='bit_facebook_daily_ad_insights_impression_device')
op.drop_table('bit_facebook_daily_ad_insights_impression_device')
# ### end Alembic commands ###
|
apache-2.0
|
minghuascode/pyj
|
examples/lightout/lightout.py
|
2
|
2702
|
# Copyright (C) 2009, Radoslav Kirov
#
# Lightout game, by Radoslav Kirov
import pyjd # this is dummy in pyjs.
from pyjamas.ui.FlowPanel import FlowPanel
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.Button import Button
from pyjamas.ui.CheckBox import CheckBox
from pyjamas.ui.HTML import HTML
from pyjamas.ui.Label import Label
from pyjamas.ui.Grid import Grid
from pyjamas import Window
from pyjamas import DOM
from pyjamas.ui.FocusWidget import FocusWidget
game = None
class GridCell(FocusWidget):
def __init__(self,i,j):
self.i = i
self.j = j
self.light = True
element = DOM.createDiv()
#DOM.setInnerHTML(element,'<b>%i%i</b>' % (i,j))
FocusWidget.__init__(self, element)
self.redraw()
self.addClickListener(self)
def redraw(self):
if self.light:
self.setStyleName("on")
else:
self.setStyleName("off")
def toggle(self):
if self.light:
self.light = False
else:
self.light = True
self.redraw()
def onClick(self,sender):
if self.i>0:
self.parent.getWidget(self.i-1,self.j).toggle()
if self.i<self.parent.getRowCount()-1:
self.parent.getWidget(self.i+1,self.j).toggle()
if self.j>0:
self.parent.getWidget(self.i,self.j-1).toggle()
if self.j<self.parent.getColumnCount()-1:
self.parent.getWidget(self.i,self.j+1).toggle()
self.toggle()
self.check_win()
def check_win(self):
for i in range(self.parent.getRowCount()):
for j in range(self.parent.getColumnCount()):
if self.parent.getWidget(i,j).light:
return
Window.alert('You win!!! But can you beat the next level?')
global game
game.next_level()
class Game(SimplePanel):
def __init__(self,level):
self.level = level
SimplePanel.__init__(self)
self.start_game(self.level)
def start_game(self, level=None):
if level is not None:
self.level = level
dim = self.level
grid = Grid(dim,dim)
grid.setStyleName("grid")
for i in range(dim):
for j in range(dim):
gc = GridCell(i,j)
grid.setWidget(i,j,gc)
self.add(grid)
def next_level(self):
self.remove(self.getWidget())
self.level+=1
self.start_game()
if __name__ == '__main__':
pyjd.setup("public/lightout.html")
game = Game(3)
RootPanel('game').add(game)
pyjd.run()
|
apache-2.0
|
zephyrproject-rtos/zephyr
|
scripts/tracing/trace_capture_usb.py
|
6
|
3389
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Intel Corporation.
#
# SPDX-License-Identifier: Apache-2.0
"""
Script to capture tracing data with USB backend.
"""
import usb.core
import usb.util
import argparse
import sys
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-v", "--vendor_id", required=True,
help="usb device vendor id")
parser.add_argument("-p", "--product_id", required=True,
help="usb device product id")
parser.add_argument("-o", "--output", default='channel0_0',
required=False, help="tracing data output file")
args = parser.parse_args()
def main():
parse_args()
if args.vendor_id.isdecimal():
vendor_id = int(args.vendor_id)
else:
vendor_id = int(args.vendor_id, 16)
if args.product_id.isdecimal():
product_id = int(args.product_id)
else:
product_id = int(args.product_id, 16)
output_file = args.output
try:
usb_device = usb.core.find(idVendor=vendor_id, idProduct=product_id)
except Exception as e:
sys.exit("{}".format(e))
if usb_device is None:
sys.exit("No device found, check vendor_id and product_id")
if usb_device.is_kernel_driver_active(0):
try:
usb_device.detach_kernel_driver(0)
except usb.core.USBError as e:
sys.exit("{}".format(e))
# set the active configuration. With no arguments, the first
# configuration will be the active one
try:
usb_device.set_configuration()
except usb.core.USBError as e:
sys.exit("{}".format(e))
configuration = usb_device[0]
interface = configuration[(0, 0)]
# match the only IN endpoint
read_endpoint = usb.util.find_descriptor(interface, custom_match = \
lambda e: \
usb.util.endpoint_direction( \
e.bEndpointAddress) == \
usb.util.ENDPOINT_IN)
# match the only OUT endpoint
write_endpoint = usb.util.find_descriptor(interface, custom_match = \
lambda e: \
usb.util.endpoint_direction( \
e.bEndpointAddress) == \
usb.util.ENDPOINT_OUT)
usb.util.claim_interface(usb_device, interface)
#enable device tracing
write_endpoint.write('enable')
#try to read to avoid garbage mixed to useful stream data
buff = usb.util.create_buffer(8192)
read_endpoint.read(buff, 10000)
with open(output_file, "wb") as file_desc:
while True:
buff = usb.util.create_buffer(8192)
length = read_endpoint.read(buff, 100000)
for index in range(length):
file_desc.write(chr(buff[index]).encode('latin1'))
usb.util.release_interface(usb_device, interface)
if __name__=="__main__":
try:
main()
except KeyboardInterrupt:
print('Data capture interrupted, data saved into {}'.format(args.output))
sys.exit(0)
|
apache-2.0
|
yongtang/tensorflow
|
tensorflow/python/kernel_tests/distributions/uniform_test.py
|
47
|
10551
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Uniform distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import uniform as uniform_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class UniformTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testUniformRange(self):
a = 3.0
b = 10.0
uniform = uniform_lib.Uniform(low=a, high=b)
self.assertAllClose(a, self.evaluate(uniform.low))
self.assertAllClose(b, self.evaluate(uniform.high))
self.assertAllClose(b - a, self.evaluate(uniform.range()))
@test_util.run_in_graph_and_eager_modes
def testUniformPDF(self):
a = constant_op.constant([-3.0] * 5 + [15.0])
b = constant_op.constant([11.0] * 5 + [20.0])
uniform = uniform_lib.Uniform(low=a, high=b)
a_v = -3.0
b_v = 11.0
x = np.array([-10.5, 4.0, 0.0, 10.99, 11.3, 17.0], dtype=np.float32)
def _expected_pdf():
pdf = np.zeros_like(x) + 1.0 / (b_v - a_v)
pdf[x > b_v] = 0.0
pdf[x < a_v] = 0.0
pdf[5] = 1.0 / (20.0 - 15.0)
return pdf
expected_pdf = _expected_pdf()
pdf = uniform.prob(x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
log_pdf = uniform.log_prob(x)
self.assertAllClose(np.log(expected_pdf), self.evaluate(log_pdf))
@test_util.run_in_graph_and_eager_modes
def testUniformShape(self):
a = constant_op.constant([-3.0] * 5)
b = constant_op.constant(11.0)
uniform = uniform_lib.Uniform(low=a, high=b)
self.assertEqual(self.evaluate(uniform.batch_shape_tensor()), (5,))
self.assertEqual(uniform.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(self.evaluate(uniform.event_shape_tensor()), [])
self.assertEqual(uniform.event_shape, tensor_shape.TensorShape([]))
@test_util.run_in_graph_and_eager_modes
def testUniformPDFWithScalarEndpoint(self):
a = constant_op.constant([0.0, 5.0])
b = constant_op.constant(10.0)
uniform = uniform_lib.Uniform(low=a, high=b)
x = np.array([0.0, 8.0], dtype=np.float32)
expected_pdf = np.array([1.0 / (10.0 - 0.0), 1.0 / (10.0 - 5.0)])
pdf = uniform.prob(x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes
def testUniformCDF(self):
batch_size = 6
a = constant_op.constant([1.0] * batch_size)
b = constant_op.constant([11.0] * batch_size)
a_v = 1.0
b_v = 11.0
x = np.array([-2.5, 2.5, 4.0, 0.0, 10.99, 12.0], dtype=np.float32)
uniform = uniform_lib.Uniform(low=a, high=b)
def _expected_cdf():
cdf = (x - a_v) / (b_v - a_v)
cdf[x >= b_v] = 1
cdf[x < a_v] = 0
return cdf
cdf = uniform.cdf(x)
self.assertAllClose(_expected_cdf(), self.evaluate(cdf))
log_cdf = uniform.log_cdf(x)
self.assertAllClose(np.log(_expected_cdf()), self.evaluate(log_cdf))
@test_util.run_in_graph_and_eager_modes
def testUniformEntropy(self):
a_v = np.array([1.0, 1.0, 1.0])
b_v = np.array([[1.5, 2.0, 3.0]])
uniform = uniform_lib.Uniform(low=a_v, high=b_v)
expected_entropy = np.log(b_v - a_v)
self.assertAllClose(expected_entropy, self.evaluate(uniform.entropy()))
@test_util.run_in_graph_and_eager_modes
def testUniformAssertMaxGtMin(self):
a_v = np.array([1.0, 1.0, 1.0], dtype=np.float32)
b_v = np.array([1.0, 2.0, 3.0], dtype=np.float32)
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"x < y"):
uniform = uniform_lib.Uniform(low=a_v, high=b_v, validate_args=True)
self.evaluate(uniform.low)
@test_util.run_in_graph_and_eager_modes
def testUniformSample(self):
a = constant_op.constant([3.0, 4.0])
b = constant_op.constant(13.0)
a1_v = 3.0
a2_v = 4.0
b_v = 13.0
n = constant_op.constant(100000)
uniform = uniform_lib.Uniform(low=a, high=b)
samples = uniform.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(sample_values.shape, (100000, 2))
self.assertAllClose(
sample_values[::, 0].mean(), (b_v + a1_v) / 2, atol=1e-1, rtol=0.)
self.assertAllClose(
sample_values[::, 1].mean(), (b_v + a2_v) / 2, atol=1e-1, rtol=0.)
self.assertFalse(
np.any(sample_values[::, 0] < a1_v) or np.any(sample_values >= b_v))
self.assertFalse(
np.any(sample_values[::, 1] < a2_v) or np.any(sample_values >= b_v))
@test_util.run_in_graph_and_eager_modes
def _testUniformSampleMultiDimensional(self):
# DISABLED: Please enable this test once b/issues/30149644 is resolved.
batch_size = 2
a_v = [3.0, 22.0]
b_v = [13.0, 35.0]
a = constant_op.constant([a_v] * batch_size)
b = constant_op.constant([b_v] * batch_size)
uniform = uniform_lib.Uniform(low=a, high=b)
n_v = 100000
n = constant_op.constant(n_v)
samples = uniform.sample(n)
self.assertEqual(samples.get_shape(), (n_v, batch_size, 2))
sample_values = self.evaluate(samples)
self.assertFalse(
np.any(sample_values[:, 0, 0] < a_v[0]) or
np.any(sample_values[:, 0, 0] >= b_v[0]))
self.assertFalse(
np.any(sample_values[:, 0, 1] < a_v[1]) or
np.any(sample_values[:, 0, 1] >= b_v[1]))
self.assertAllClose(
sample_values[:, 0, 0].mean(), (a_v[0] + b_v[0]) / 2, atol=1e-2)
self.assertAllClose(
sample_values[:, 0, 1].mean(), (a_v[1] + b_v[1]) / 2, atol=1e-2)
@test_util.run_in_graph_and_eager_modes
def testUniformMean(self):
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
if not stats:
return
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.mean()), s_uniform.mean())
@test_util.run_in_graph_and_eager_modes
def testUniformVariance(self):
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
if not stats:
return
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.variance()), s_uniform.var())
@test_util.run_in_graph_and_eager_modes
def testUniformStd(self):
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
if not stats:
return
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.stddev()), s_uniform.std())
@test_util.run_in_graph_and_eager_modes
def testUniformNans(self):
a = 10.0
b = [11.0, 100.0]
uniform = uniform_lib.Uniform(low=a, high=b)
no_nans = constant_op.constant(1.0)
nans = constant_op.constant(0.0) / constant_op.constant(0.0)
self.assertTrue(self.evaluate(math_ops.is_nan(nans)))
with_nans = array_ops.stack([no_nans, nans])
pdf = uniform.prob(with_nans)
is_nan = self.evaluate(math_ops.is_nan(pdf))
self.assertFalse(is_nan[0])
self.assertTrue(is_nan[1])
@test_util.run_in_graph_and_eager_modes
def testUniformSamplePdf(self):
a = 10.0
b = [11.0, 100.0]
uniform = uniform_lib.Uniform(a, b)
self.assertTrue(
self.evaluate(
math_ops.reduce_all(uniform.prob(uniform.sample(10)) > 0)))
@test_util.run_in_graph_and_eager_modes
def testUniformBroadcasting(self):
a = 10.0
b = [11.0, 20.0]
uniform = uniform_lib.Uniform(a, b)
pdf = uniform.prob([[10.5, 11.5], [9.0, 19.0], [10.5, 21.0]])
expected_pdf = np.array([[1.0, 0.1], [0.0, 0.1], [1.0, 0.0]])
self.assertAllClose(expected_pdf, self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes
def testUniformSampleWithShape(self):
a = 10.0
b = [11.0, 20.0]
uniform = uniform_lib.Uniform(a, b)
pdf = uniform.prob(uniform.sample((2, 3)))
# pylint: disable=bad-continuation
expected_pdf = [
[[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
[[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
]
# pylint: enable=bad-continuation
self.assertAllClose(expected_pdf, self.evaluate(pdf))
pdf = uniform.prob(uniform.sample())
expected_pdf = [1.0, 0.1]
self.assertAllClose(expected_pdf, self.evaluate(pdf))
def testFullyReparameterized(self):
a = constant_op.constant(0.1)
b = constant_op.constant(0.8)
with backprop.GradientTape() as tape:
tape.watch(a)
tape.watch(b)
uniform = uniform_lib.Uniform(a, b)
samples = uniform.sample(100)
grad_a, grad_b = tape.gradient(samples, [a, b])
self.assertIsNotNone(grad_a)
self.assertIsNotNone(grad_b)
# Eager doesn't pass due to a type mismatch in one of the ops.
def testUniformFloat64(self):
uniform = uniform_lib.Uniform(
low=np.float64(0.), high=np.float64(1.))
self.assertAllClose(
[1., 1.],
self.evaluate(uniform.prob(np.array([0.5, 0.6], dtype=np.float64))))
self.assertAllClose(
[0.5, 0.6],
self.evaluate(uniform.cdf(np.array([0.5, 0.6], dtype=np.float64))))
self.assertAllClose(0.5, self.evaluate(uniform.mean()))
self.assertAllClose(1 / 12., self.evaluate(uniform.variance()))
self.assertAllClose(0., self.evaluate(uniform.entropy()))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
tapanagupta/mi-instrument
|
mi/instrument/teledyne/workhorse/particles.py
|
8
|
35830
|
import re
import time
from datetime import datetime
from mi.core.log import get_logger
from mi.core.common import BaseEnum
from mi.core.time_tools import timegm_to_float
from mi.core.instrument.data_particle import CommonDataParticleType, DataParticle, DataParticleKey
NEWLINE = '\r\n'
log = get_logger()
class WorkhorseDataParticleType(BaseEnum):
"""
Stream types of data particles
"""
RAW = CommonDataParticleType.RAW
ADCP_PD0_PARSED_BEAM = 'adcp_velocity_beam'
ADCP_PD0_PARSED_EARTH = 'adcp_velocity_earth'
ADCP_PD0_ENGINEERING = 'adcp_engineering'
ADCP_PD0_CONFIG = 'adcp_config'
ADCP_PD0_ERROR_STATUS = 'adcp_error_status'
ADCP_SYSTEM_CONFIGURATION = 'adcp_system_configuration'
ADCP_COMPASS_CALIBRATION = 'adcp_compass_calibration'
ADCP_ANCILLARY_SYSTEM_DATA = "adcp_ancillary_system_data"
ADCP_TRANSMIT_PATH = "adcp_transmit_path"
class VADCPDataParticleType(WorkhorseDataParticleType):
"""
VADCP Stream types of data particles
"""
VADCP_PD0_BEAM_MASTER = 'vadcp_velocity_beam'
VADCP_PD0_BEAM_SLAVE = 'vadcp_velocity_beam_5'
VADCP_PD0_ENGINEERING_SLAVE = 'adcp_engineering_5'
VADCP_PD0_CONFIG_SLAVE = 'adcp_config_5'
VADCP_PD0_ERROR_STATUS_SLAVE = 'adcp_error_status_5'
VADCP_SYSTEM_CONFIGURATION_SLAVE = "adcp_system_configuration_5"
VADCP_ANCILLARY_SYSTEM_DATA_SLAVE = "adcp_ancillary_system_data_5"
VADCP_TRANSMIT_PATH_SLAVE = "adcp_transmit_path_5"
VADCP_COMPASS_CALIBRATION_SLAVE = 'adcp_compass_calibration_5'
class AdcpPd0ParsedKey(BaseEnum):
"""
ADCP PD0 parsed keys
"""
HEADER_ID = "header_id"
DATA_SOURCE_ID = "data_source_id"
NUM_BYTES = "num_bytes"
NUM_DATA_TYPES = "num_data_types"
OFFSET_DATA_TYPES = "offset_data_types"
FIXED_LEADER_ID = "fixed_leader_id"
FIRMWARE_VERSION = "firmware_version"
FIRMWARE_REVISION = "firmware_revision"
SYSCONFIG_FREQUENCY = "sysconfig_frequency"
SYSCONFIG_BEAM_PATTERN = "sysconfig_beam_pattern"
SYSCONFIG_SENSOR_CONFIG = "sysconfig_sensor_config"
SYSCONFIG_HEAD_ATTACHED = "sysconfig_head_attached"
SYSCONFIG_VERTICAL_ORIENTATION = "sysconfig_vertical_orientation"
SYSCONFIG_BEAM_ANGLE = "sysconfig_beam_angle"
SYSCONFIG_BEAM_CONFIG = "sysconfig_beam_config"
DATA_FLAG = "data_flag"
LAG_LENGTH = "lag_length"
NUM_BEAMS = "num_beams"
NUM_CELLS = "num_cells"
PINGS_PER_ENSEMBLE = "pings_per_ensemble"
DEPTH_CELL_LENGTH = "cell_length"
BLANK_AFTER_TRANSMIT = "blank_after_transmit"
SIGNAL_PROCESSING_MODE = "signal_processing_mode"
LOW_CORR_THRESHOLD = "low_corr_threshold"
NUM_CODE_REPETITIONS = "num_code_repetitions"
PERCENT_GOOD_MIN = "percent_good_min"
ERROR_VEL_THRESHOLD = "error_vel_threshold"
TIME_PER_PING_MINUTES = "time_per_ping_minutes"
TIME_PER_PING_SECONDS = "time_per_ping_seconds"
TIME_PER_PING_HUNDREDTHS = "time_per_ping_hundredths"
COORD_TRANSFORM_TYPE = "coord_transform_type"
COORD_TRANSFORM_TILTS = "coord_transform_tilts"
COORD_TRANSFORM_BEAMS = "coord_transform_beams"
COORD_TRANSFORM_MAPPING = "coord_transform_mapping"
HEADING_ALIGNMENT = "heading_alignment"
HEADING_BIAS = "heading_bias"
SENSOR_SOURCE_SPEED = "sensor_source_speed"
SENSOR_SOURCE_DEPTH = "sensor_source_depth"
SENSOR_SOURCE_HEADING = "sensor_source_heading"
SENSOR_SOURCE_PITCH = "sensor_source_pitch"
SENSOR_SOURCE_ROLL = "sensor_source_roll"
SENSOR_SOURCE_CONDUCTIVITY = "sensor_source_conductivity"
SENSOR_SOURCE_TEMPERATURE = "sensor_source_temperature"
SENSOR_AVAILABLE_SPEED = "sensor_available_speed"
SENSOR_AVAILABLE_DEPTH = "sensor_available_depth"
SENSOR_AVAILABLE_HEADING = "sensor_available_heading"
SENSOR_AVAILABLE_PITCH = "sensor_available_pitch"
SENSOR_AVAILABLE_ROLL = "sensor_available_roll"
SENSOR_AVAILABLE_CONDUCTIVITY = "sensor_available_conductivity"
SENSOR_AVAILABLE_TEMPERATURE = "sensor_available_temperature"
BIN_1_DISTANCE = "bin_1_distance"
TRANSMIT_PULSE_LENGTH = "transmit_pulse_length"
REFERENCE_LAYER_START = "reference_layer_start"
REFERENCE_LAYER_STOP = "reference_layer_stop"
FALSE_TARGET_THRESHOLD = "false_target_threshold"
LOW_LATENCY_TRIGGER = "low_latency_trigger"
TRANSMIT_LAG_DISTANCE = "transmit_lag_distance"
CPU_BOARD_SERIAL_NUMBER = "cpu_board_serial_number"
SYSTEM_BANDWIDTH = "system_bandwidth"
SYSTEM_POWER = "system_power"
SERIAL_NUMBER = "serial_number"
BEAM_ANGLE = "beam_angle"
VARIABLE_LEADER_ID = "variable_leader_id"
ENSEMBLE_NUMBER = "ensemble_number"
REAL_TIME_CLOCK = "real_time_clock"
ENSEMBLE_START_TIME = "ensemble_start_time"
ENSEMBLE_NUMBER_INCREMENT = "ensemble_number_increment"
BIT_RESULT = "bit_result"
SPEED_OF_SOUND = "speed_of_sound"
TRANSDUCER_DEPTH = "transducer_depth"
HEADING = "heading"
PITCH = "pitch"
ROLL = "roll"
SALINITY = "salinity"
TEMPERATURE = "temperature"
MPT_MINUTES = "mpt_minutes"
MPT_SECONDS = "mpt_seconds"
MPT_HUNDREDTHS = "mpt_hundredths"
HEADING_STDEV = "heading_stdev"
PITCH_STDEV = "pitch_stdev"
ROLL_STDEV = "roll_stdev"
ADC_TRANSMIT_CURRENT = "adc_transmit_current"
ADC_TRANSMIT_VOLTAGE = "adc_transmit_voltage"
ADC_AMBIENT_TEMP = "adc_ambient_temp"
ADC_PRESSURE_PLUS = "adc_pressure_plus"
ADC_PRESSURE_MINUS = "adc_pressure_minus"
ADC_ATTITUDE_TEMP = "adc_attitude_temp"
ADC_ATTITUDE = "adc_attitude"
ADC_CONTAMINATION_SENSOR = "adc_contamination_sensor"
ERROR_STATUS_WORD = "error_status_word"
ABSOLUTE_PRESSURE = "pressure"
PRESSURE_VARIANCE = "pressure_variance"
VELOCITY_DATA_ID = "velocity_data_id"
BEAM_1_VELOCITY = "velocity_beam1"
BEAM_2_VELOCITY = "velocity_beam2"
BEAM_3_VELOCITY = "velocity_beam3"
BEAM_4_VELOCITY = "velocity_beam4"
BEAM_5_VELOCITY = "velocity_beam5"
WATER_VELOCITY_EAST = "water_velocity_east"
WATER_VELOCITY_NORTH = "water_velocity_north"
WATER_VELOCITY_UP = "water_velocity_up"
ERROR_VELOCITY = "error_velocity"
CORRELATION_MAGNITUDE_ID = "correlation_magnitude_id"
CORRELATION_MAGNITUDE_BEAM1 = "correlation_magnitude_beam1"
CORRELATION_MAGNITUDE_BEAM2 = "correlation_magnitude_beam2"
CORRELATION_MAGNITUDE_BEAM3 = "correlation_magnitude_beam3"
CORRELATION_MAGNITUDE_BEAM4 = "correlation_magnitude_beam4"
CORRELATION_MAGNITUDE_BEAM5 = "correlation_magnitude_beam5"
ECHO_INTENSITY_ID = "echo_intensity_id"
ECHO_INTENSITY_BEAM1 = "echo_intensity_beam1"
ECHO_INTENSITY_BEAM2 = "echo_intensity_beam2"
ECHO_INTENSITY_BEAM3 = "echo_intensity_beam3"
ECHO_INTENSITY_BEAM4 = "echo_intensity_beam4"
ECHO_INTENSITY_BEAM5 = "echo_intensity_beam5"
PERCENT_GOOD_BEAM1 = "percent_good_beam1"
PERCENT_GOOD_BEAM2 = "percent_good_beam2"
PERCENT_GOOD_BEAM3 = "percent_good_beam3"
PERCENT_GOOD_BEAM4 = "percent_good_beam4"
PERCENT_GOOD_BEAM5 = "percent_good_beam5"
PERCENT_GOOD_ID = "percent_good_id"
PERCENT_GOOD_3BEAM = "percent_good_3beam"
PERCENT_TRANSFORMS_REJECT = "percent_transforms_reject"
PERCENT_BAD_BEAMS = "percent_bad_beams"
PERCENT_GOOD_4BEAM = "percent_good_4beam"
CHECKSUM = "checksum"
class Pd0CoordinateTransformType(BaseEnum):
BEAM = 0
EARTH = 3
class Pd0DataParticle(DataParticle):
ntp_epoch = datetime(1900, 1, 1)
def __init__(self, *args, **kwargs):
super(Pd0DataParticle, self).__init__(*args, **kwargs)
record = self.raw_data
dts = datetime(record.variable_data.rtc_y2k_century * 100 + record.variable_data.rtc_y2k_year,
record.variable_data.rtc_y2k_month,
record.variable_data.rtc_y2k_day,
record.variable_data.rtc_y2k_hour,
record.variable_data.rtc_y2k_minute,
record.variable_data.rtc_y2k_seconds)
rtc_time = (dts - self.ntp_epoch).total_seconds() + record.variable_data.rtc_y2k_hundredths / 100.0
self.set_internal_timestamp(rtc_time)
class Pd0VelocityParticle(Pd0DataParticle):
def _build_scalar_values(self):
record = self.raw_data
ensemble_number = (record.variable_data.ensemble_roll_over << 16) + record.variable_data.ensemble_number
fields = [
# FIXED LEADER
(AdcpPd0ParsedKey.NUM_CELLS, record.fixed_data.number_of_cells),
(AdcpPd0ParsedKey.DEPTH_CELL_LENGTH, record.fixed_data.depth_cell_length),
(AdcpPd0ParsedKey.BIN_1_DISTANCE, record.fixed_data.bin_1_distance),
# VARIABLE LEADER
(AdcpPd0ParsedKey.ENSEMBLE_NUMBER, ensemble_number),
(AdcpPd0ParsedKey.HEADING, record.variable_data.heading),
(AdcpPd0ParsedKey.PITCH, record.variable_data.pitch),
(AdcpPd0ParsedKey.ROLL, record.variable_data.roll),
(AdcpPd0ParsedKey.SALINITY, record.variable_data.salinity),
(AdcpPd0ParsedKey.TEMPERATURE, record.variable_data.temperature),
(AdcpPd0ParsedKey.TRANSDUCER_DEPTH, record.variable_data.depth_of_transducer),
(AdcpPd0ParsedKey.ABSOLUTE_PRESSURE, record.variable_data.pressure),
# SYSCONFIG BITMAP
(AdcpPd0ParsedKey.SYSCONFIG_VERTICAL_ORIENTATION, record.sysconfig.beam_facing)]
return fields
def _build_base_values(self):
"""
Parse the base portion of the particle
"""
record = self.raw_data
fields = self._build_scalar_values()
fields.extend([
# CORRELATION MAGNITUDES
(AdcpPd0ParsedKey.CORRELATION_MAGNITUDE_BEAM1, record.correlation_magnitudes.beam1),
(AdcpPd0ParsedKey.CORRELATION_MAGNITUDE_BEAM2, record.correlation_magnitudes.beam2),
(AdcpPd0ParsedKey.CORRELATION_MAGNITUDE_BEAM3, record.correlation_magnitudes.beam3),
(AdcpPd0ParsedKey.CORRELATION_MAGNITUDE_BEAM4, record.correlation_magnitudes.beam4),
# ECHO INTENSITIES
(AdcpPd0ParsedKey.ECHO_INTENSITY_BEAM1, record.echo_intensity.beam1),
(AdcpPd0ParsedKey.ECHO_INTENSITY_BEAM2, record.echo_intensity.beam2),
(AdcpPd0ParsedKey.ECHO_INTENSITY_BEAM3, record.echo_intensity.beam3),
(AdcpPd0ParsedKey.ECHO_INTENSITY_BEAM4, record.echo_intensity.beam4)])
return fields
class Pd0BeamParticle(Pd0VelocityParticle):
"""
ADCP PD0 data particle
@throw SampleException if when break happens
"""
_data_particle_type = WorkhorseDataParticleType.ADCP_PD0_PARSED_BEAM
def _build_parsed_values(self):
"""
Parse the base portion of the particle
"""
record = self.raw_data
fields = self._build_base_values()
fields.extend([
# BEAM VELOCITIES
(AdcpPd0ParsedKey.BEAM_1_VELOCITY, record.velocities.beam1),
(AdcpPd0ParsedKey.BEAM_2_VELOCITY, record.velocities.beam2),
(AdcpPd0ParsedKey.BEAM_3_VELOCITY, record.velocities.beam3),
(AdcpPd0ParsedKey.BEAM_4_VELOCITY, record.velocities.beam4),
(AdcpPd0ParsedKey.PERCENT_GOOD_BEAM1, record.percent_good.beam1),
(AdcpPd0ParsedKey.PERCENT_GOOD_BEAM2, record.percent_good.beam2),
(AdcpPd0ParsedKey.PERCENT_GOOD_BEAM3, record.percent_good.beam3),
(AdcpPd0ParsedKey.PERCENT_GOOD_BEAM4, record.percent_good.beam4)])
return [{DataParticleKey.VALUE_ID: key, DataParticleKey.VALUE: value} for key, value in fields]
class Pd0EarthParticle(Pd0VelocityParticle):
"""
ADCP PD0 data particle
@throw SampleException if when break happens
"""
_data_particle_type = WorkhorseDataParticleType.ADCP_PD0_PARSED_EARTH
def _build_parsed_values(self):
"""
Parse the base portion of the particle
"""
record = self.raw_data
fields = self._build_base_values()
fields.extend([
# EARTH VELOCITIES
(AdcpPd0ParsedKey.WATER_VELOCITY_EAST, record.velocities.beam1),
(AdcpPd0ParsedKey.WATER_VELOCITY_NORTH, record.velocities.beam2),
(AdcpPd0ParsedKey.WATER_VELOCITY_UP, record.velocities.beam3),
(AdcpPd0ParsedKey.ERROR_VELOCITY, record.velocities.beam4),
(AdcpPd0ParsedKey.PERCENT_GOOD_3BEAM, record.percent_good.beam1),
(AdcpPd0ParsedKey.PERCENT_TRANSFORMS_REJECT, record.percent_good.beam2),
(AdcpPd0ParsedKey.PERCENT_BAD_BEAMS, record.percent_good.beam3),
(AdcpPd0ParsedKey.PERCENT_GOOD_4BEAM, record.percent_good.beam4)])
return [{DataParticleKey.VALUE_ID: key, DataParticleKey.VALUE: value} for key, value in fields]
class VadcpBeamSlaveParticle(Pd0VelocityParticle):
_data_particle_type = VADCPDataParticleType.VADCP_PD0_BEAM_SLAVE
def _build_parsed_values(self):
"""
Parse the base portion of the particle
"""
record = self.raw_data
fields = self._build_scalar_values()
fields.extend([
# BEAM VELOCITIES
(AdcpPd0ParsedKey.CORRELATION_MAGNITUDE_BEAM5, record.correlation_magnitudes.beam1),
(AdcpPd0ParsedKey.ECHO_INTENSITY_BEAM5, record.echo_intensity.beam1),
(AdcpPd0ParsedKey.BEAM_5_VELOCITY, record.velocities.beam1),
(AdcpPd0ParsedKey.PERCENT_GOOD_BEAM5, record.percent_good.beam1)])
return [{DataParticleKey.VALUE_ID: key, DataParticleKey.VALUE: value} for key, value in fields]
class AdcpPd0EngineeringParticle(Pd0DataParticle):
"""
ADCP PD0 data particle
@throw SampleException if when break happens
"""
_data_particle_type = WorkhorseDataParticleType.ADCP_PD0_ENGINEERING
def _build_parsed_values(self):
"""
Parse the base portion of the particle
"""
record = self.raw_data
fields = [
# FIXED LEADER
(AdcpPd0ParsedKey.TRANSMIT_PULSE_LENGTH, record.fixed_data.transmit_pulse_length),
# VARIABLE LEADER
(AdcpPd0ParsedKey.SPEED_OF_SOUND, record.variable_data.speed_of_sound),
(AdcpPd0ParsedKey.MPT_MINUTES, record.variable_data.mpt_minutes),
(AdcpPd0ParsedKey.MPT_SECONDS, record.variable_data.mpt_seconds),
(AdcpPd0ParsedKey.MPT_HUNDREDTHS, record.variable_data.mpt_hundredths),
(AdcpPd0ParsedKey.HEADING_STDEV, record.variable_data.heading_standard_deviation),
(AdcpPd0ParsedKey.PITCH_STDEV, record.variable_data.pitch_standard_deviation),
(AdcpPd0ParsedKey.ROLL_STDEV, record.variable_data.roll_standard_deviation),
(AdcpPd0ParsedKey.ADC_TRANSMIT_CURRENT, record.variable_data.transmit_current),
(AdcpPd0ParsedKey.ADC_TRANSMIT_VOLTAGE, record.variable_data.transmit_voltage),
(AdcpPd0ParsedKey.ADC_AMBIENT_TEMP, record.variable_data.ambient_temperature),
(AdcpPd0ParsedKey.ADC_PRESSURE_PLUS, record.variable_data.pressure_positive),
(AdcpPd0ParsedKey.ADC_PRESSURE_MINUS, record.variable_data.pressure_negative),
(AdcpPd0ParsedKey.ADC_ATTITUDE_TEMP, record.variable_data.attitude_temperature),
(AdcpPd0ParsedKey.ADC_ATTITUDE, record.variable_data.attitude),
(AdcpPd0ParsedKey.ADC_CONTAMINATION_SENSOR, record.variable_data.contamination_sensor),
(AdcpPd0ParsedKey.PRESSURE_VARIANCE, record.variable_data.pressure_variance),
(AdcpPd0ParsedKey.BIT_RESULT, record.variable_data.bit_result),
(AdcpPd0ParsedKey.ERROR_STATUS_WORD, record.variable_data.error_status_word),
]
return [{DataParticleKey.VALUE_ID: key, DataParticleKey.VALUE: value} for key, value in fields]
class AdcpPd0ConfigParticle(Pd0DataParticle):
"""
ADCP PD0 data particle
@throw SampleException if when break happens
"""
_data_particle_type = WorkhorseDataParticleType.ADCP_PD0_CONFIG
def _build_parsed_values(self):
"""
Parse the base portion of the particle
"""
record = self.raw_data
fields = [
# FIXED LEADER
(AdcpPd0ParsedKey.FIRMWARE_VERSION, record.fixed_data.cpu_firmware_version),
(AdcpPd0ParsedKey.FIRMWARE_REVISION, record.fixed_data.cpu_firmware_revision),
(AdcpPd0ParsedKey.DATA_FLAG, record.fixed_data.simulation_data_flag),
(AdcpPd0ParsedKey.LAG_LENGTH, record.fixed_data.lag_length),
(AdcpPd0ParsedKey.NUM_BEAMS, record.fixed_data.number_of_beams),
(AdcpPd0ParsedKey.NUM_CELLS, record.fixed_data.number_of_cells),
(AdcpPd0ParsedKey.PINGS_PER_ENSEMBLE, record.fixed_data.pings_per_ensemble),
(AdcpPd0ParsedKey.DEPTH_CELL_LENGTH, record.fixed_data.depth_cell_length),
(AdcpPd0ParsedKey.BLANK_AFTER_TRANSMIT, record.fixed_data.blank_after_transmit),
(AdcpPd0ParsedKey.SIGNAL_PROCESSING_MODE, record.fixed_data.signal_processing_mode),
(AdcpPd0ParsedKey.LOW_CORR_THRESHOLD, record.fixed_data.low_corr_threshold),
(AdcpPd0ParsedKey.NUM_CODE_REPETITIONS, record.fixed_data.num_code_reps),
(AdcpPd0ParsedKey.PERCENT_GOOD_MIN, record.fixed_data.minimum_percentage),
(AdcpPd0ParsedKey.ERROR_VEL_THRESHOLD, record.fixed_data.error_velocity_max),
(AdcpPd0ParsedKey.TIME_PER_PING_MINUTES, record.fixed_data.tpp_minutes),
(AdcpPd0ParsedKey.TIME_PER_PING_SECONDS, record.fixed_data.tpp_seconds),
(AdcpPd0ParsedKey.TIME_PER_PING_HUNDREDTHS, record.fixed_data.tpp_hundredths),
(AdcpPd0ParsedKey.HEADING_ALIGNMENT, record.fixed_data.heading_alignment),
(AdcpPd0ParsedKey.HEADING_BIAS, record.fixed_data.heading_bias),
(AdcpPd0ParsedKey.REFERENCE_LAYER_START, record.fixed_data.starting_depth_cell),
(AdcpPd0ParsedKey.REFERENCE_LAYER_STOP, record.fixed_data.ending_depth_cell),
(AdcpPd0ParsedKey.FALSE_TARGET_THRESHOLD, record.fixed_data.false_target_threshold),
(AdcpPd0ParsedKey.LOW_LATENCY_TRIGGER, record.fixed_data.spare1),
(AdcpPd0ParsedKey.TRANSMIT_LAG_DISTANCE, record.fixed_data.transmit_lag_distance),
(AdcpPd0ParsedKey.CPU_BOARD_SERIAL_NUMBER, str(record.fixed_data.cpu_board_serial_number)),
(AdcpPd0ParsedKey.SYSTEM_BANDWIDTH, record.fixed_data.system_bandwidth),
(AdcpPd0ParsedKey.SYSTEM_POWER, record.fixed_data.system_power),
(AdcpPd0ParsedKey.SERIAL_NUMBER, str(record.fixed_data.serial_number)),
(AdcpPd0ParsedKey.BEAM_ANGLE, record.fixed_data.beam_angle),
# SYSCONFIG BITMAP
(AdcpPd0ParsedKey.SYSCONFIG_FREQUENCY, record.sysconfig.frequency),
(AdcpPd0ParsedKey.SYSCONFIG_BEAM_PATTERN, record.sysconfig.beam_pattern),
(AdcpPd0ParsedKey.SYSCONFIG_SENSOR_CONFIG, record.sysconfig.sensor_config),
(AdcpPd0ParsedKey.SYSCONFIG_HEAD_ATTACHED, record.sysconfig.xdcr_head_attached),
(AdcpPd0ParsedKey.SYSCONFIG_VERTICAL_ORIENTATION, record.sysconfig.beam_facing),
(AdcpPd0ParsedKey.SYSCONFIG_BEAM_ANGLE, record.sysconfig.beam_angle),
(AdcpPd0ParsedKey.SYSCONFIG_BEAM_CONFIG, record.sysconfig.janus_config),
# COORD TRANSFORM BITMAP
(AdcpPd0ParsedKey.COORD_TRANSFORM_TYPE, record.coord_transform.coord_transform),
(AdcpPd0ParsedKey.COORD_TRANSFORM_TILTS, record.coord_transform.tilts_used),
(AdcpPd0ParsedKey.COORD_TRANSFORM_BEAMS, record.coord_transform.three_beam_used),
(AdcpPd0ParsedKey.COORD_TRANSFORM_MAPPING, record.coord_transform.bin_mapping_used),
# SENSOR SOURCE BITMAP
(AdcpPd0ParsedKey.SENSOR_SOURCE_SPEED, record.sensor_source.calculate_ec),
(AdcpPd0ParsedKey.SENSOR_SOURCE_DEPTH, record.sensor_source.depth_used),
(AdcpPd0ParsedKey.SENSOR_SOURCE_HEADING, record.sensor_source.heading_used),
(AdcpPd0ParsedKey.SENSOR_SOURCE_PITCH, record.sensor_source.pitch_used),
(AdcpPd0ParsedKey.SENSOR_SOURCE_ROLL, record.sensor_source.roll_used),
(AdcpPd0ParsedKey.SENSOR_SOURCE_CONDUCTIVITY, record.sensor_source.conductivity_used),
(AdcpPd0ParsedKey.SENSOR_SOURCE_TEMPERATURE, record.sensor_source.temperature_used),
# SENSOR AVAIL BITMAP
(AdcpPd0ParsedKey.SENSOR_AVAILABLE_SPEED, record.sensor_avail.speed_avail),
(AdcpPd0ParsedKey.SENSOR_AVAILABLE_DEPTH, record.sensor_avail.depth_avail),
(AdcpPd0ParsedKey.SENSOR_AVAILABLE_HEADING, record.sensor_avail.heading_avail),
(AdcpPd0ParsedKey.SENSOR_AVAILABLE_PITCH, record.sensor_avail.pitch_avail),
(AdcpPd0ParsedKey.SENSOR_AVAILABLE_ROLL, record.sensor_avail.roll_avail),
(AdcpPd0ParsedKey.SENSOR_AVAILABLE_CONDUCTIVITY, record.sensor_avail.conductivity_avail),
(AdcpPd0ParsedKey.SENSOR_AVAILABLE_TEMPERATURE, record.sensor_avail.temperature_avail)]
return [{DataParticleKey.VALUE_ID: key, DataParticleKey.VALUE: value} for key, value in fields]
# ADCP System Configuration keys will be varied in VADCP
class AdcpSystemConfigurationKey(BaseEnum):
# https://confluence.oceanobservatories.org/display/instruments/ADCP+Driver PS0
SERIAL_NUMBER = "serial_number"
TRANSDUCER_FREQUENCY = "transducer_frequency"
CONFIGURATION = "configuration"
MATCH_LAYER = "match_layer"
BEAM_ANGLE = "beam_angle"
BEAM_PATTERN = "beam_pattern"
ORIENTATION = "orientation"
SENSORS = "sensors"
PRESSURE_COEFF_c3 = "pressure_coeff_c3"
PRESSURE_COEFF_c2 = "pressure_coeff_c2"
PRESSURE_COEFF_c1 = "pressure_coeff_c1"
PRESSURE_COEFF_OFFSET = "pressure_coeff_offset"
TEMPERATURE_SENSOR_OFFSET = "temperature_sensor_offset"
CPU_FIRMWARE = "cpu_firmware"
BOOT_CODE_REQUIRED = "boot_code_required"
BOOT_CODE_ACTUAL = "boot_code_actual"
DEMOD_1_VERSION = "demod_1_version"
DEMOD_1_TYPE = "demod_1_type"
DEMOD_2_VERSION = "demod_2_version"
DEMOD_2_TYPE = "demod_2_type"
POWER_TIMING_VERSION = "power_timing_version"
POWER_TIMING_TYPE = "power_timing_type"
BOARD_SERIAL_NUMBERS = "board_serial_numbers"
# ADCP System Configuration keys will be varied in VADCP
# Some of the output lines will not be available in VADCP as it support only
# 4 beams and 5th beam
class AdcpSystemConfigurationDataParticle(DataParticle):
_data_particle_type = WorkhorseDataParticleType.ADCP_SYSTEM_CONFIGURATION
@staticmethod
def regex():
groups = [
r'Instrument S/N: +(\w+)',
r'Frequency: +(\d+) HZ',
r'Configuration: +([\w, ]+)',
r'Match Layer: +(\w+)',
r'Beam Angle: +([\d.]+) DEGREES',
r'Beam Pattern: +([\w]+)',
r'Orientation: +([\w]+)',
r'Sensor\(s\): +([\w ]+)',
r'(Pressure Sens Coefficients:)?',
r'(c3 = ([\+\-\d.E]+))?',
r'(c2 = ([\+\-\d.E]+))?',
r'(c1 = ([\+\-\d.E]+))?',
r'(Offset = ([\+\-\d.E]+))?',
r'Temp Sens Offset: +([\+\-\d.]+) degrees C',
r'CPU Firmware: +([\w.\[\] ]+)',
r'Boot Code Ver: +Required: +([\w.]+) +Actual: +([\w.]+)',
r'DEMOD #1 Ver: +(\w+), Type: +(\w+)',
r'DEMOD #2 Ver: +(\w+), Type: +(\w+)',
r'PWRTIMG Ver: +(\w+), Type: +(\w+)',
r'Board Serial Number Data:',
r'([\w\- ]+)',
r'([\w\- ]+)',
r'([\w\- ]+)',
r'([\w\- ]+)',
r'([\w\- ]+)?',
r'([\w\- ]+)?',
r'>'
]
return r'\s*'.join(groups)
@staticmethod
def regex_compiled():
return re.compile(AdcpSystemConfigurationDataParticle.regex())
def _build_parsed_values(self):
results = []
match = self.regex_compiled().search(self.raw_data)
key = AdcpSystemConfigurationKey
results.append(self._encode_value(key.SERIAL_NUMBER, match.group(1), str))
results.append(self._encode_value(key.TRANSDUCER_FREQUENCY, match.group(2), int))
results.append(self._encode_value(key.CONFIGURATION, match.group(3), str))
results.append(self._encode_value(key.MATCH_LAYER, match.group(4), str))
results.append(self._encode_value(key.BEAM_ANGLE, match.group(5), int))
results.append(self._encode_value(key.BEAM_PATTERN, match.group(6), str))
results.append(self._encode_value(key.ORIENTATION, match.group(7), str))
results.append(self._encode_value(key.SENSORS, match.group(8), str))
if match.group(11):
results.append(self._encode_value(key.PRESSURE_COEFF_c3, match.group(11), float))
if match.group(13):
results.append(self._encode_value(key.PRESSURE_COEFF_c2, match.group(13), float))
if match.group(15):
results.append(self._encode_value(key.PRESSURE_COEFF_c1, match.group(15), float))
if match.group(17):
results.append(self._encode_value(key.PRESSURE_COEFF_OFFSET, match.group(17), float))
results.append(self._encode_value(key.TEMPERATURE_SENSOR_OFFSET, match.group(18), float))
results.append(self._encode_value(key.CPU_FIRMWARE, match.group(19), str))
results.append(self._encode_value(key.BOOT_CODE_REQUIRED, match.group(20), str))
results.append(self._encode_value(key.BOOT_CODE_ACTUAL, match.group(21), str))
results.append(self._encode_value(key.DEMOD_1_VERSION, match.group(22), str))
results.append(self._encode_value(key.DEMOD_1_TYPE, match.group(23), str))
results.append(self._encode_value(key.DEMOD_2_VERSION, match.group(24), str))
results.append(self._encode_value(key.DEMOD_2_TYPE, match.group(25), str))
results.append(self._encode_value(key.POWER_TIMING_VERSION, match.group(26), str))
results.append(self._encode_value(key.POWER_TIMING_TYPE, match.group(27), str))
results.append(self._encode_value(key.BOARD_SERIAL_NUMBERS,
[match.group(28),
match.group(29),
match.group(30),
match.group(31),
match.group(32),
match.group(33)], lambda y: ','.join(x.strip() for x in y if x)))
return results
# AC command
class AdcpCompassCalibrationKey(BaseEnum):
"""
Keys for ADCP Compass Calibration
"""
FLUXGATE_CALIBRATION_TIMESTAMP = "fluxgate_calibration_timestamp"
S_INVERSE_BX = "s_inverse_bx"
S_INVERSE_BY = "s_inverse_by"
S_INVERSE_BZ = "s_inverse_bz"
S_INVERSE_ERR = "s_inverse_err"
COIL_OFFSET = "coil_offset"
ELECTRICAL_NULL = "electrical_null"
TILT_CALIBRATION_TIMESTAMP = "tilt_calibration_timestamp"
CALIBRATION_TEMP = "calibration_temp"
ROLL_UP_DOWN = "roll_up_down"
PITCH_UP_DOWN = "pitch_up_down"
OFFSET_UP_DOWN = "offset_up_down"
TILT_NULL = "tilt_null"
class AdcpCompassCalibrationDataParticle(DataParticle):
"""
ADCP Compass Calibration data particle
"""
_data_particle_type = WorkhorseDataParticleType.ADCP_COMPASS_CALIBRATION
RE01 = re.compile(r' +Calibration date and time: ([/\d: ]+)')
RE04 = re.compile(r' +Bx +. +([\deE\+\-.]+) +([\deE\+\-.]+) +([\deE\+\-.]+) +([\deE\+\-.]+) .')
RE05 = re.compile(r' +By +. +([\deE\+\-.]+) +([\deE\+\-.]+) +([\deE\+\-.]+) +([\deE\+\-.]+) .')
RE06 = re.compile(r' +Bz +. +([\deE\+\-.]+) +([\deE\+\-.]+) +([\deE\+\-.]+) +([\deE\+\-.]+) .')
RE07 = re.compile(r' +Err +. +([\deE\+\-.]+) +([\deE\+\-.]+) +([\deE\+\-.]+) +([\deE\+\-.]+) .')
RE11 = re.compile(r' +. +([\deE\+\-.]+) +.')
RE12 = re.compile(r' +. +([\deE\+\-.]+) +.')
RE13 = re.compile(r' +. +([\deE\+\-.]+) +.')
RE14 = re.compile(r' +. +([\deE\+\-.]+) +.')
RE18 = re.compile(r' +. ([\d.]+) .')
RE21 = re.compile(r' +Calibration date and time: ([/\d: ]+)')
RE22 = re.compile(r' +Average Temperature During Calibration was +([\d.]+) .')
RE27 = re.compile(r' Roll +. +([\deE\+\-.]+) +([\deE\+\-.]+) +. +. +([\deE\+\-.]+) +([\deE\+\-.]+) +.')
RE28 = re.compile(r' Pitch +. +([\deE\+\-.]+) +([\deE\+\-.]+) +. +. +([\deE\+\-.]+) +([\deE\+\-.]+) +.')
RE32 = re.compile(r' Offset . +([\deE\+\-.]+) +([\deE\+\-.]+) +. +. +([\deE\+\-.]+) +([\deE\+\-.]+) +.')
RE36 = re.compile(r' +Null +. (\d+) +.')
def _build_parsed_values(self):
# Initialize
matches = {}
lines = self.raw_data.split(NEWLINE)
match = self.RE01.match(lines[1])
timestamp = match.group(1)
matches[AdcpCompassCalibrationKey.FLUXGATE_CALIBRATION_TIMESTAMP] = timegm_to_float(
time.strptime(timestamp, "%m/%d/%Y %H:%M:%S"))
match = self.RE04.match(lines[4])
matches[AdcpCompassCalibrationKey.S_INVERSE_BX] = [float(match.group(1)), float(match.group(2)),
float(match.group(3)), float(match.group(4))]
match = self.RE05.match(lines[5])
matches[AdcpCompassCalibrationKey.S_INVERSE_BY] = [float(match.group(1)), float(match.group(2)),
float(match.group(3)), float(match.group(4))]
match = self.RE06.match(lines[6])
matches[AdcpCompassCalibrationKey.S_INVERSE_BZ] = [float(match.group(1)), float(match.group(2)),
float(match.group(3)), float(match.group(4))]
match = self.RE07.match(lines[7])
matches[AdcpCompassCalibrationKey.S_INVERSE_ERR] = [float(match.group(1)), float(match.group(2)),
float(match.group(3)), float(match.group(4))]
match = self.RE11.match(lines[11])
matches[AdcpCompassCalibrationKey.COIL_OFFSET] = [float(match.group(1))]
match = self.RE12.match(lines[12])
matches[AdcpCompassCalibrationKey.COIL_OFFSET].append(float(match.group(1)))
match = self.RE13.match(lines[13])
matches[AdcpCompassCalibrationKey.COIL_OFFSET].append(float(match.group(1)))
match = self.RE14.match(lines[14])
matches[AdcpCompassCalibrationKey.COIL_OFFSET].append(float(match.group(1)))
match = self.RE18.match(lines[18])
matches[AdcpCompassCalibrationKey.ELECTRICAL_NULL] = float(match.group(1))
match = self.RE21.match(lines[21])
timestamp = match.group(1)
matches[AdcpCompassCalibrationKey.TILT_CALIBRATION_TIMESTAMP] = timegm_to_float(
time.strptime(timestamp, "%m/%d/%Y %H:%M:%S"))
match = self.RE22.match(lines[22])
matches[AdcpCompassCalibrationKey.CALIBRATION_TEMP] = float(match.group(1))
match = self.RE27.match(lines[27])
matches[AdcpCompassCalibrationKey.ROLL_UP_DOWN] = [float(match.group(1)), float(match.group(2)),
float(match.group(3)), float(match.group(4))]
match = self.RE28.match(lines[28])
matches[AdcpCompassCalibrationKey.PITCH_UP_DOWN] = [float(match.group(1)), float(match.group(2)),
float(match.group(3)), float(match.group(4))]
match = self.RE32.match(lines[32])
matches[AdcpCompassCalibrationKey.OFFSET_UP_DOWN] = [float(match.group(1)), float(match.group(2)),
float(match.group(3)), float(match.group(4))]
match = self.RE36.match(lines[36])
matches[AdcpCompassCalibrationKey.TILT_NULL] = float(match.group(1))
result = []
for key, value in matches.iteritems():
result.append({DataParticleKey.VALUE_ID: key,
DataParticleKey.VALUE: value})
return result
# for keys for PT2 command
class AdcpAncillarySystemDataKey(BaseEnum):
"""
Keys for PT2 command
"""
ADCP_AMBIENT_CURRENT = "adcp_ambient_temp"
ADCP_ATTITUDE_TEMP = "adcp_attitude_temp"
ADCP_INTERNAL_MOISTURE = "adcp_internal_moisture"
# PT2 command data particle
class AdcpAncillarySystemDataParticle(DataParticle):
"""
Data particle for PT2 command
"""
_data_particle_type = WorkhorseDataParticleType.ADCP_ANCILLARY_SYSTEM_DATA
RE01 = re.compile(r'Ambient +Temperature = +([\+\-\d.]+) Degrees C')
RE02 = re.compile(r'Attitude Temperature = +([\+\-\d.]+) Degrees C')
RE03 = re.compile(r'Internal Moisture = +(\w+)')
def _build_parsed_values(self):
# Initialize
matches = {}
for key, regex, formatter in [
(AdcpAncillarySystemDataKey.ADCP_AMBIENT_CURRENT, self.RE01, float),
(AdcpAncillarySystemDataKey.ADCP_ATTITUDE_TEMP, self.RE02, float),
(AdcpAncillarySystemDataKey.ADCP_INTERNAL_MOISTURE, self.RE03, lambda hexval: int(hexval[:-1], 16))
]:
match = regex.search(self.raw_data)
matches[key] = formatter(match.group(1))
result = []
for key, value in matches.iteritems():
result.append({DataParticleKey.VALUE_ID: key,
DataParticleKey.VALUE: value})
return result
# keys for PT4 command
class AdcpTransmitPathKey(BaseEnum):
ADCP_TRANSIT_CURRENT = "adcp_transmit_current"
ADCP_TRANSIT_VOLTAGE = "adcp_transmit_voltage"
ADCP_TRANSIT_IMPEDANCE = "adcp_transmit_impedance"
ADCP_TRANSIT_TEST_RESULT = "adcp_transmit_test_results"
# Data particle for PT4 command
class AdcpTransmitPathParticle(DataParticle):
_data_particle_type = WorkhorseDataParticleType.ADCP_TRANSMIT_PATH
RE01 = re.compile(r'IXMT += +([\+\-\d.]+) Amps')
RE02 = re.compile(r'VXMT += +([\+\-\d.]+) Volts')
RE03 = re.compile(r' +Z += +([\+\-\d.]+) Ohms')
RE04 = re.compile(r'Transmit Test Results = +(.*)\r')
def _build_parsed_values(self):
# Initialize
matches = {}
for key, regex, formatter in [
(AdcpTransmitPathKey.ADCP_TRANSIT_CURRENT, self.RE01, float),
(AdcpTransmitPathKey.ADCP_TRANSIT_VOLTAGE, self.RE02, float),
(AdcpTransmitPathKey.ADCP_TRANSIT_IMPEDANCE, self.RE03, float),
(AdcpTransmitPathKey.ADCP_TRANSIT_TEST_RESULT, self.RE04, str),
]:
match = regex.search(self.raw_data)
matches[key] = formatter(match.group(1))
result = []
for key, value in matches.iteritems():
result.append({DataParticleKey.VALUE_ID: key,
DataParticleKey.VALUE: value})
return result
class VadcpBeamMasterParticle(Pd0BeamParticle):
_data_particle_type = VADCPDataParticleType.VADCP_PD0_BEAM_MASTER
class VadcpEngineeringSlaveParticle(AdcpPd0EngineeringParticle):
_data_particle_type = VADCPDataParticleType.VADCP_PD0_ENGINEERING_SLAVE
class VadcpConfigSlaveParticle(AdcpPd0ConfigParticle):
_data_particle_type = VADCPDataParticleType.VADCP_PD0_CONFIG_SLAVE
class VadcpSystemConfigurationDataParticle(AdcpSystemConfigurationDataParticle):
_data_particle_type = VADCPDataParticleType.VADCP_SYSTEM_CONFIGURATION_SLAVE
class VadcpAncillarySystemDataParticle(AdcpAncillarySystemDataParticle):
_data_particle_type = VADCPDataParticleType.VADCP_ANCILLARY_SYSTEM_DATA_SLAVE
class VadcpTransmitPathParticle(AdcpTransmitPathParticle):
_data_particle_type = VADCPDataParticleType.VADCP_TRANSMIT_PATH_SLAVE
class VadcpCompassCalibrationDataParticle(AdcpCompassCalibrationDataParticle):
_data_particle_type = VADCPDataParticleType.VADCP_COMPASS_CALIBRATION_SLAVE
|
bsd-2-clause
|
tonycao/IntroToHadoopAndMR__Udacity_Course
|
ProblemStatement2/Python/P2Q1_Mapper_Old.py
|
4
|
3397
|
#!/usr/bin/python
# URL that generated this code:
# http://txt2re.com/index-python.php3?s=10.223.157.186%20-%20-%20[15/Jul/2009:15:50:35%20-0700]%20%22GET%20/assets/js/lowpro.js%20HTTP/1.1%22%20200%2010469&4&-56&-139&-57&-140&-58&2&60&-137&29&61&3&-63&-62&20&18&-138&24&64&13
import sys
import re
txt = ['10.190.174.142 - - [03/Dec/2011:13:28:06 -0800] "GET /images/filmpics/0000/2229/GOEMON-NUKI-000163.jpg HTTP/1.1" 200 184976', '10.190.174.142 - - [03/Dec/2011:13:28:08 -0800] "GET /assets/js/javascript_combined.js HTTP/1.1" 200 20404', '10.190.174.142 - - [03/Dec/2011:13:28:09 -0800] "GET /assets/img/home-logo.png HTTP/1.1" 200 3892', '10.190.174.142 - - [03/Dec/2011:13:28:09 -0800] "GET /images/filmmediablock/360/019.jpg HTTP/1.1" 200 74446', '10.190.174.142 - - [03/Dec/2011:13:28:10 -0800] "GET /images/filmmediablock/360/g_still_04.jpg HTTP/1.1" 200 761555', '10.190.174.142 - - [03/Dec/2011:13:28:09 -0800] "GET /images/filmmediablock/360/07082218.jpg HTTP/1.1" 200 154609', '10.190.174.142 - - [03/Dec/2011:13:28:10 -0800] "GET /images/filmpics/0000/2229/GOEMON-NUKI-000163.jpg HTTP/1.1" 200 184976', '10.190.174.142 - - [03/Dec/2011:13:28:11 -0800] "GET /images/filmmediablock/360/GOEMON-NUKI-000163.jpg HTTP/1.1" 200 60117', '10.190.174.142 - - [03/Dec/2011:13:28:10 -0800] "GET /images/filmmediablock/360/Chacha.jpg HTTP/1.1" 200 109379', '10.190.174.142 - - [03/Dec/2011:13:28:11 -0800] "GET /images/filmmediablock/360/GOEMON-NUKI-000159.jpg HTTP/1.1" 200 161657']
#txt='10.223.157.186 - - [15/Jul/2009:15:50:35 -0700] "GET /assets/js/lowpro.js HTTP/1.1" 200 10469'
re1='((?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))(?![\\d])' # IPv4 IP Address 1
re2='( )' # White Space 1
re3='(-)' # Any Single Character 1
re4='( )' # White Space 2
re5='(-)' # Any Single Character 2
re6='( )' # White Space 3
re7='(\\[.*?\\])' # Square Braces 1
re8='(\\s+)' # White Space 4
re9='(")' # Any Single Character 3
re10='((?:[a-z][a-z0-9_]*))' # Variable Name 1
re11='(\\s+)' # White Space 5
re12='((?:\\/[\\w\\.\\-]+)+)' # Unix Path 1
re13='( )' # White Space 6
re14='((?:[a-z][a-z0-9_]*))' # Variable Name 2
re15='((?:\\/[\\w\\.\\-]+)+)' # Unix Path 2
re16='(")' # Any Single Character 4
re17='( )' # White Space 7
re18='(\\d+)' # Integer Number 1
re19='(\\s+)' # White Space 8
re20='(\\d+)' # Integer Number 2
rg = re.compile(re1+re2+re3+re4+re5+re6+re7+re8+re9+re10+re11+re12+re13+re14+re15+re16+re17+re18+re19+re20,re.IGNORECASE|re.DOTALL)
for t in sys.stdin:
m = rg.search(t)
if m:
ipaddress1=m.group(1)
ws1=m.group(2)
c1=m.group(3)
ws2=m.group(4)
c2=m.group(5)
ws3=m.group(6)
sbraces1=m.group(7)
ws4=m.group(8)
c3=m.group(9)
var1=m.group(10)
ws5=m.group(11)
unixpath1=m.group(12)
ws6=m.group(13)
var2=m.group(14)
unixpath2=m.group(15)
c4=m.group(16)
ws7=m.group(17)
int1=m.group(18)
ws8=m.group(19)
int2=m.group(20)
#print "("+ipaddress1+")"+"("+ws1+")"+"("+c1+")"+"("+ws2+")"+"("+c2+")"+"("+ws3+")"+"("+sbraces1+")"+"("+ws4+")"+"("+c3+")"+"("+var1+")"+"("+ws5+")"+"("+unixpath1+")"+"("+ws6+")"+"("+var2+")"+"("+unixpath2+")"+"("+c4+")"+"("+ws7+")"+"("+int1+")"+"("+ws8+")"+"("+int2+")"+"\n"
#print unixpath1
print "{0}\t{1}".format(unixpath1, 1)
|
apache-2.0
|
nok/sklearn-porter
|
tests/estimator/regressor/MLPRegressor/MLPRegressorJSTest.py
|
1
|
9499
|
# -*- coding: utf-8 -*-
from unittest import TestCase
import numpy as np
from sklearn.neural_network.multilayer_perceptron import MLPRegressor
from tests.estimator.regressor.Regressor import Regressor
from tests.language.JavaScript import JavaScript
class MLPRegressorJSTest(JavaScript, Regressor, TestCase):
N_RANDOM_TESTS = 50
def setUp(self):
super(MLPRegressorJSTest, self).setUp()
np.random.seed(0)
self.estimator = MLPRegressor(activation='relu', hidden_layer_sizes=50,
max_iter=500, learning_rate_init=.1,
random_state=3)
def tearDown(self):
super(MLPRegressorJSTest, self).tearDown()
def test_activation_fn_relu_with_mult_layers_2(self):
self.estimator = MLPRegressor(activation='relu',
hidden_layer_sizes=(50, 30),
max_iter=500, learning_rate_init=.1,
random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_relu_with_mult_layers_3(self):
self.estimator = MLPRegressor(activation='relu',
hidden_layer_sizes=(50, 30, 15),
max_iter=500, learning_rate_init=.1,
random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_identity(self):
self.estimator = MLPRegressor(activation='identity',
hidden_layer_sizes=50, max_iter=500,
learning_rate_init=.1)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_identity_with_mult_layers_2(self):
self.estimator = MLPRegressor(activation='identity',
hidden_layer_sizes=(50, 30), max_iter=500,
learning_rate_init=.1, random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_identity_with_mult_layers_3(self):
self.estimator = MLPRegressor(activation='identity',
hidden_layer_sizes=(50, 30, 15),
max_iter=500, learning_rate_init=.1,
random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_tanh(self):
self.estimator = MLPRegressor(activation='tanh', hidden_layer_sizes=50,
max_iter=500, learning_rate_init=.1,
random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_tanh_with_mult_layers_2(self):
self.estimator = MLPRegressor(activation='tanh',
hidden_layer_sizes=(50, 30),
max_iter=500, learning_rate_init=.1,
random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_tanh_with_mult_layers_3(self):
self.estimator = MLPRegressor(activation='tanh',
hidden_layer_sizes=(50, 30, 15),
max_iter=500, learning_rate_init=.1,
random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_logistic(self):
self.estimator = MLPRegressor(activation='logistic',
hidden_layer_sizes=50, max_iter=500,
learning_rate_init=.1, random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_logstic_with_mult_layers_2(self):
self.estimator = MLPRegressor(activation='logistic',
hidden_layer_sizes=(50, 30), max_iter=500,
learning_rate_init=.1, random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
def test_activation_fn_logstic_with_mult_layers_3(self):
self.estimator = MLPRegressor(activation='logistic',
hidden_layer_sizes=(50, 30, 15),
max_iter=500, learning_rate_init=.1,
random_state=3)
self.load_data()
self._port_estimator()
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
match = []
for _ in range(30):
x = np.random.uniform(amin, amax, self.n_features)
match.append(self.pred_in_custom(x, cast=False) -
self.pred_in_py(x, cast=False) < 0.0001)
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertEqual(match.count(True), len(match))
|
mit
|
aurelieladier/openturns
|
python/test/t_InverseChiSquare_std.py
|
1
|
6233
|
#! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
allDistributions = [InverseChiSquare(10.5), InverseChiSquare(15.0)]
for n in range(len(allDistributions)):
distribution = allDistributions[n]
print("Distribution ", distribution)
# Is this distribution elliptical ?
print("Elliptical = ", distribution.isElliptical())
# Is this distribution continuous ?
print("Continuous = ", distribution.isContinuous())
# Test for realization of distribution
oneRealization = distribution.getRealization()
print("oneRealization=", oneRealization)
# Test for sampling
size = 10000
oneSample = distribution.getSample(size)
print("oneSample first=", oneSample[0], " last=", oneSample[size - 1])
print("mean=", oneSample.computeMean())
print("covariance=", oneSample.computeCovariance())
size = 100
for i in range(2):
if FittingTest.Kolmogorov(distribution.getSample(size), distribution).getBinaryQualityMeasure():
msg = "accepted"
else:
msg = "rejected"
print(
"Kolmogorov test for the generator, sample size=", size, " is ", msg)
size *= 10
# Define a point
point = NumericalPoint(
distribution.getDimension(), 2.0 / distribution.getNu())
print("Point= ", point)
# Show PDF and CDF of point
eps = 1e-5
DDF = distribution.computeDDF(point)
print("ddf =", DDF)
print("ddf (FD)=%.6g" % ((distribution.computePDF(point + NumericalPoint(1, eps)) -
distribution.computePDF(point + NumericalPoint(1, -eps))) / (2.0 * eps)))
LPDF = distribution.computeLogPDF(point)
print("log pdf= %.12g" % LPDF)
PDF = distribution.computePDF(point)
print("pdf =%.6g" % PDF)
print("pdf (FD)=%.6g" % ((distribution.computeCDF(point + NumericalPoint(1, eps)) -
distribution.computeCDF(point + NumericalPoint(1, -eps))) / (2.0 * eps)))
CDF = distribution.computeCDF(point)
print("cdf= %.12g" % CDF)
CCDF = distribution.computeComplementaryCDF(point)
print("ccdf= %.12g" % CCDF)
Survival = distribution.computeSurvivalFunction(point)
print("survival= %.12g" % Survival)
CF = distribution.computeCharacteristicFunction(point[0])
print("characteristic function=(%.6g, %.6g)" % (CF.real, CF.imag))
LCF = distribution.computeLogCharacteristicFunction(point[0])
print("log characteristic function=(%.6g, %.6g)" %
(LCF.real, LCF.imag))
PDFgr = distribution.computePDFGradient(point)
print("pdf gradient =", PDFgr)
PDFgrFD = NumericalPoint(1)
PDFgrFD[0] = (InverseChiSquare(distribution.getNu() + eps).computePDF(point) -
InverseChiSquare(distribution.getNu() - eps).computePDF(point)) / (2.0 * eps)
print("pdf gradient (FD)=", PDFgrFD)
CDFgr = distribution.computeCDFGradient(point)
print("cdf gradient =", CDFgr)
CDFgrFD = NumericalPoint(1)
CDFgrFD[0] = (InverseChiSquare(distribution.getNu() + eps).computeCDF(point) -
InverseChiSquare(distribution.getNu() - eps).computeCDF(point)) / (2.0 * eps)
print("cdf gradient (FD)=", CDFgrFD)
quantile = distribution.computeQuantile(0.95)
print("quantile=", quantile)
print("cdf(quantile)= %.2f" % distribution.computeCDF(quantile))
# Get 95% survival function
inverseSurvival = NumericalPoint(distribution.computeInverseSurvivalFunction(0.95))
print("InverseSurvival=", repr(inverseSurvival))
print("Survival(inverseSurvival)=%.6f" % distribution.computeSurvivalFunction(inverseSurvival))
# Confidence regions
interval, threshold = distribution.computeMinimumVolumeIntervalWithMarginalProbability(0.95)
print("Minimum volume interval=", interval)
print("threshold=", NumericalPoint(1, threshold))
levelSet, beta = distribution.computeMinimumVolumeLevelSetWithThreshold(0.95)
print("Minimum volume level set=", levelSet)
print("beta=", NumericalPoint(1, beta))
interval, beta = distribution.computeBilateralConfidenceIntervalWithMarginalProbability(0.95)
print("Bilateral confidence interval=", interval)
print("beta=", NumericalPoint(1, beta))
interval, beta = distribution.computeUnilateralConfidenceIntervalWithMarginalProbability(0.95, False)
print("Unilateral confidence interval (lower tail)=", interval)
print("beta=", NumericalPoint(1, beta))
interval, beta = distribution.computeUnilateralConfidenceIntervalWithMarginalProbability(0.95, True)
print("Unilateral confidence interval (upper tail)=", interval)
print("beta=", NumericalPoint(1, beta))
mean = distribution.getMean()
print("mean=", mean)
covariance = distribution.getCovariance()
print("covariance=", covariance)
correlation = distribution.getCorrelation()
print("correlation=", correlation)
spearman = distribution.getSpearmanCorrelation()
print("spearman=", spearman)
kendall = distribution.getKendallTau()
print("kendall=", kendall)
parameters = distribution.getParametersCollection()
print("parameters=", parameters)
for i in range(6):
print("standard moment n=", i, ", value=",
distribution.getStandardMoment(i))
print("Standard representative=",
distribution.getStandardRepresentative())
standardDeviation = distribution.getStandardDeviation()
print("standard deviation=", standardDeviation)
skewness = distribution.getSkewness()
print("skewness=", skewness)
kurtosis = distribution.getKurtosis()
print("kurtosis=", kurtosis)
except:
import sys
print("InverseChiSquare.py", sys.exc_info()[0], sys.exc_info()[1])
|
lgpl-3.0
|
pranner/CMPUT410-Lab6-Django
|
v1/lib/python2.7/site-packages/django/contrib/admin/templatetags/admin_list.py
|
75
|
17069
|
from __future__ import unicode_literals
import datetime
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (lookup_field, display_for_field,
display_for_value, label_for_field)
from django.contrib.admin.views.main import (ALL_VAR, EMPTY_CHANGELIST_VALUE,
ORDER_VAR, PAGE_VAR, SEARCH_VAR)
from django.contrib.admin.templatetags.admin_static import static
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import NoReverseMatch
from django.db import models
from django.utils import formats
from django.utils.html import escapejs, format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from django.template import Library
from django.template.loader import get_template
from django.template.context import Context
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl, i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{0}</span> ', i + 1)
else:
return format_html('<a href="{0}"{1}>{2}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages - 1 else ''),
i + 1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(
field_name, cl.model,
model_admin=cl.model_admin,
return_attr=True
)
if attr:
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"class_attrib": format_html(' class="column-{0}"', field_name),
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable', 'column-{0}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{0}"', ' '.join(th_classes)) if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.gif' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{0}" alt="{1}" />', icon_url, field_val)
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
def link_in_col(is_first, field_name, cl):
if cl.list_display_links is None:
return False
if is_first and not cl.list_display_links:
return True
return field_name in cl.list_display_links
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_classes = ['field-%s' % field_name]
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
if field_name == 'action_checkbox':
row_classes = ['action-checkbox']
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = display_for_value(value, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_classes.append('nowrap')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
row_class = mark_safe(' class="%s"' % ' '.join(row_classes))
# If list_display_links not defined, add the link tag to the first field
if link_in_col(first, field_name, cl):
table_tag = 'th' if first else 'td'
first = False
# Display link to the result's change_view if the url exists, else
# display just the result's representation.
try:
url = cl.url_for_result(result)
except NoReverseMatch:
link_or_text = result_repr
else:
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = escapejs(value)
link_or_text = format_html(
'<a href="{0}"{1}>{2}</a>',
url,
format_html(' onclick="opener.dismissRelatedLookupPopup(window, '{0}'); return false;"', result_id) if cl.is_popup else '',
result_repr)
yield format_html('<{0}{1}>{2}</{3}>',
table_tag,
row_class,
link_or_text,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
yield format_html('<td{0}>{1}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{0}</td>', force_text(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_text(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
field = cl.opts.get_field_by_name(field_name)[0]
dates_or_datetimes = 'datetimes' if isinstance(field, models.DateTimeField) else 'dates'
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda filters: cl.get_query_string(filters, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.queryset.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.queryset.filter(**{year_field: year_lookup, month_field: month_lookup})
days = getattr(days, dates_or_datetimes)(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.queryset.filter(**{year_field: year_lookup})
months = getattr(months, dates_or_datetimes)(field_name, 'month')
return {
'show': True,
'back': {
'link': link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = getattr(cl.queryset, dates_or_datetimes)(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render(Context({
'title': spec.title,
'choices': list(spec.choices(cl)),
'spec': spec,
}))
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
|
apache-2.0
|
nelmiux/CarnotKE
|
jyhton/lib-python/2.7/plat-mac/lib-scriptpackages/Finder/Standard_Suite.py
|
73
|
12423
|
"""Suite Standard Suite: Common terms that most applications should support
Level 1, version 1
Generated from /System/Library/CoreServices/Finder.app
AETE/AEUT resource version 0/144, language 0, script 0
"""
import aetools
import MacOS
_code = 'CoRe'
from StdSuites.Standard_Suite import *
class Standard_Suite_Events(Standard_Suite_Events):
def close(self, _object, _attributes={}, **_arguments):
"""close: Close an object
Required argument: the object to close
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'core'
_subcode = 'clos'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_count = {
'each' : 'kocl',
}
def count(self, _object, _attributes={}, **_arguments):
"""count: Return the number of elements of a particular class within an object
Required argument: the object whose elements are to be counted
Keyword argument each: the class of the elements to be counted
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: the number of elements
"""
_code = 'core'
_subcode = 'cnte'
aetools.keysubst(_arguments, self._argmap_count)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_data_size = {
'as' : 'rtyp',
}
def data_size(self, _object, _attributes={}, **_arguments):
"""data size: Return the size in bytes of an object
Required argument: the object whose data size is to be returned
Keyword argument as: the data type for which the size is calculated
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: the size of the object in bytes
"""
_code = 'core'
_subcode = 'dsiz'
aetools.keysubst(_arguments, self._argmap_data_size)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def delete(self, _object, _attributes={}, **_arguments):
"""delete: Move an item from its container to the trash
Required argument: the item to delete
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the item that was just deleted
"""
_code = 'core'
_subcode = 'delo'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_duplicate = {
'to' : 'insh',
'replacing' : 'alrp',
'routing_suppressed' : 'rout',
}
def duplicate(self, _object, _attributes={}, **_arguments):
"""duplicate: Duplicate one or more object(s)
Required argument: the object(s) to duplicate
Keyword argument to: the new location for the object(s)
Keyword argument replacing: Specifies whether or not to replace items in the destination that have the same name as items being duplicated
Keyword argument routing_suppressed: Specifies whether or not to autoroute items (default is false). Only applies when copying to the system folder.
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the duplicated object(s)
"""
_code = 'core'
_subcode = 'clon'
aetools.keysubst(_arguments, self._argmap_duplicate)
_arguments['----'] = _object
aetools.enumsubst(_arguments, 'alrp', _Enum_bool)
aetools.enumsubst(_arguments, 'rout', _Enum_bool)
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def exists(self, _object, _attributes={}, **_arguments):
"""exists: Verify if an object exists
Required argument: the object in question
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: true if it exists, false if not
"""
_code = 'core'
_subcode = 'doex'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_make = {
'new' : 'kocl',
'at' : 'insh',
'to' : 'to ',
'with_properties' : 'prdt',
}
def make(self, _no_object=None, _attributes={}, **_arguments):
"""make: Make a new element
Keyword argument new: the class of the new element
Keyword argument at: the location at which to insert the element
Keyword argument to: when creating an alias file, the original item to create an alias to or when creating a file viewer window, the target of the window
Keyword argument with_properties: the initial values for the properties of the element
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the new object(s)
"""
_code = 'core'
_subcode = 'crel'
aetools.keysubst(_arguments, self._argmap_make)
if _no_object is not None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_move = {
'to' : 'insh',
'replacing' : 'alrp',
'positioned_at' : 'mvpl',
'routing_suppressed' : 'rout',
}
def move(self, _object, _attributes={}, **_arguments):
"""move: Move object(s) to a new location
Required argument: the object(s) to move
Keyword argument to: the new location for the object(s)
Keyword argument replacing: Specifies whether or not to replace items in the destination that have the same name as items being moved
Keyword argument positioned_at: Gives a list (in local window coordinates) of positions for the destination items
Keyword argument routing_suppressed: Specifies whether or not to autoroute items (default is false). Only applies when moving to the system folder.
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: to the object(s) after they have been moved
"""
_code = 'core'
_subcode = 'move'
aetools.keysubst(_arguments, self._argmap_move)
_arguments['----'] = _object
aetools.enumsubst(_arguments, 'alrp', _Enum_bool)
aetools.enumsubst(_arguments, 'mvpl', _Enum_list)
aetools.enumsubst(_arguments, 'rout', _Enum_bool)
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_open = {
'using' : 'usin',
'with_properties' : 'prdt',
}
def open(self, _object, _attributes={}, **_arguments):
"""open: Open the specified object(s)
Required argument: list of objects to open
Keyword argument using: the application file to open the object with
Keyword argument with_properties: the initial values for the properties, to be included with the open command sent to the application that opens the direct object
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'odoc'
aetools.keysubst(_arguments, self._argmap_open)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_print_ = {
'with_properties' : 'prdt',
}
def print_(self, _object, _attributes={}, **_arguments):
"""print: Print the specified object(s)
Required argument: list of objects to print
Keyword argument with_properties: optional properties to be included with the print command sent to the application that prints the direct object
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'pdoc'
aetools.keysubst(_arguments, self._argmap_print_)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def quit(self, _no_object=None, _attributes={}, **_arguments):
"""quit: Quit the Finder
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'quit'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object is not None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def select(self, _object, _attributes={}, **_arguments):
"""select: Select the specified object(s)
Required argument: the object to select
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'misc'
_subcode = 'slct'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_Enum_list = None # XXXX enum list not found!!
_Enum_bool = None # XXXX enum bool not found!!
#
# Indices of types declared in this module
#
_classdeclarations = {
}
_propdeclarations = {
}
_compdeclarations = {
}
_enumdeclarations = {
}
|
apache-2.0
|
mikeboers/Nitrogen
|
nitrogen/imgsizer.py
|
1
|
7970
|
from __future__ import division
import math
import os
import logging
import Image as image
from cStringIO import StringIO
import datetime
import hashlib
import sys
import base64
import struct
from urlparse import urlparse
from urllib2 import urlopen
from subprocess import call
from .request import Request, Response
from . import status
from . import sign
log = logging.getLogger(__name__)
# TODO:
# - take max_age from config
def encode_int(value):
return base64.urlsafe_b64encode(struct.pack('>I', int(value))).rstrip('=').lstrip('A')
class ImgSizer(object):
MODE_FIT = 'fit'
MODE_CROP = 'crop'
MODE_PAD = 'pad'
MODES = (MODE_FIT, MODE_CROP, MODE_PAD)
def __init__(self, path, cache_root, sig_key, max_age):
self.path = [os.path.abspath(x) for x in path]
self.cache_root = cache_root
self.sig_key = sig_key
self.max_age = max_age
def build_url(self, local_path, **kwargs):
for key in 'background mode width height quality format padding'.split():
if key in kwargs:
kwargs[key[0]] = kwargs[key]
del kwargs[key]
# Remote URLs are encoded into the query.
parsed = urlparse(local_path)
if parsed.netloc:
kwargs['u'] = local_path
local_path = '/remote'
# Local ones are not.
else:
abs_path = self.find_img(local_path)
if abs_path:
kwargs['v'] = encode_int(int(os.path.getmtime(abs_path)))
query = sign.sign_query(self.sig_key, kwargs, add_time=False, nonce=False, depends_on=dict(path=local_path))
return local_path + '?' + sign.encode_query(query)
def find_img(self, local_path):
local_path = local_path.lstrip('/')
for path_base in self.path:
path = os.path.join(path_base, local_path)
if os.path.exists(path):
return path
def resize(self, img, width=None, height=None, mode=None, background=None):
orig_width, orig_height = img.size
width = min(width, orig_width) if width else None
height = min(height, orig_height) if height else None
if not img.mode.lower().startswith('rgb'):
img = img.convert('RGBA')
if width and height:
fit, crop = sorted([
(width, orig_height * width // orig_width),
(orig_width * height // orig_height, height)
])
if mode == self.MODE_FIT or mode == self.MODE_PAD:
img = img.resize(fit, image.ANTIALIAS)
if mode == self.MODE_PAD:
pad_color = {'white': (255, 255, 255)}.get(str(background).lower(), 0)
back = image.new('RGBA', (width, height), pad_color)
back.paste(img, (
(width - fit[0]) // 2,
(height - fit[1]) // 2
))
img = back
elif mode == self.MODE_CROP:
dx = (crop[0] - width) // 2
dy = (crop[1] - height) // 2
img = img.resize(crop, image.ANTIALIAS).crop(
(dx, dy, dx + width, dy + height)
)
else:
img = img.resize((width, height), image.ANTIALIAS)
elif width:
height = orig_height * width // orig_width
img = img.resize((width, height), image.ANTIALIAS)
elif height:
width = orig_width * height // orig_height
img = img.resize((width, height), image.ANTIALIAS)
return img
@Request.application
def __call__(self, request):
path = request.path_info
if not path:
return status.NotFound()
query = dict(request.query.iteritems())
if not sign.verify_query(self.sig_key, query, depends_on=dict(path=path)):
log.warning('signature not accepted')
return status.NotFound()
remote_url = query.get('u')
if remote_url:
# Download the remote file.
path = os.path.join(
self.cache_root,
hashlib.md5(remote_url).hexdigest() + os.path.splitext(remote_url)[1]
)
if not os.path.exists(path):
log.info('downloading %s' % remote_url)
tmp_path = path + '.tmp-' + str(os.getpid())
fh = open(tmp_path, 'wb')
fh.write(urlopen(remote_url).read())
fh.close()
call(['mv', tmp_path, path])
else:
path = self.find_img(path)
if not path:
raise status.NotFound()
raw_mtime = os.path.getmtime(path)
mtime = datetime.datetime.utcfromtimestamp(raw_mtime)
# log.debug('last_modified: %r' % mtime)
# log.debug('if_modified_since: %r' % request.if_modified_since)
if request.if_modified_since and request.if_modified_since >= mtime:
return status.NotModified()
mode = request.query.get('m')
background = request.query.get('b')
width = request.query.get('w')
width = int(width) if width else None
height = request.query.get('h')
height = int(height) if height else None
quality = request.query.get('q')
quality = int(quality) if quality else 75
format = request.query.get('f', '').lower() or os.path.splitext(path)[1][1:] or 'jpeg'
format = {'jpg' : 'jpeg'}.get(format, format)
has_version = 'v' in request.query
cache_key = hashlib.md5(repr((
path, mode, width, height, quality, format, background
))).hexdigest()
cache_path = os.path.join(self.cache_root, cache_key + '.' + format)
cache_mtime = os.path.getmtime(cache_path) if os.path.exists(cache_path) else None
if not cache_mtime or cache_mtime < raw_mtime:
log.info('resizing %r for %s' % (request.path_info, request.query))
img = image.open(path)
img = self.resize(img, width=width, height=height, mode=mode, background=background)
try:
cache_file = open(cache_path, 'wb')
img.save(cache_file, format, quality=quality)
cache_file.close()
except Exception as e:
log.exception('error while saving image to cache')
return Response().send_file(cache_path,
mimetype='image/%s' % format,
cache_max_age=31536000 if has_version else self.max_age,
)
class ImgSizerAppMixin(object):
def setup_config(self):
super(ImgSizerAppMixin, self).setup_config()
self.config.setdefaults(
imgsizer_path=[],
imgsizer_max_age=3600,
imgsizer_cache_dir='/tmp',
imgsizer_url_base='__img',
)
def __init__(self, *args, **kwargs):
super(ImgSizerAppMixin, self).__init__(*args, **kwargs)
self.imgsizer = ImgSizer(
self.config.imgsizer_path,
self.config.imgsizer_cache_dir,
self.config.private_key or os.urandom(32),
self.config.imgsizer_max_age,
)
self.route('/' + self.config.imgsizer_url_base, self.imgsizer)
self.view_globals['auto_img_src'] = self.auto_img_src
def auto_img_src(self, *args, **kwargs):
return '/' + self.config.imgsizer_url_base + self.imgsizer.build_url(*args, **kwargs)
if __name__ == '__main__':
#
__app__ = ImgSizer(
path=[],
sig_key='awesome'
)
print __app__.build_url('/mock/photos/2459172663_35af8640ff.jpg', width=200)
|
bsd-3-clause
|
nitzmahone/ansible
|
lib/ansible/modules/cloud/vmware/vmware_vsan_cluster.py
|
25
|
3756
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Russell Teague <rteague2 () csc.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_vsan_cluster
short_description: Configure VSAN clustering on an ESXi host
description:
- This module can be used to configure VSAN clustering on an ESXi host
version_added: 2.0
author:
- Russell Teague (@mtnbikenc)
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
cluster_uuid:
description:
- Desired cluster UUID
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Configure VMware VSAN Cluster
hosts: deploy_node
tags:
- vsan
tasks:
- name: Configure VSAN on first host
vmware_vsan_cluster:
hostname: "{{ groups['esxi'][0] }}"
username: "{{ esxi_username }}"
password: "{{ site_password }}"
delegate_to: localhost
register: vsan_cluster
- name: Configure VSAN on remaining hosts
vmware_vsan_cluster:
hostname: "{{ item }}"
username: "{{ esxi_username }}"
password: "{{ site_password }}"
cluster_uuid: "{{ vsan_cluster.cluster_uuid }}"
delegate_to: localhost
with_items: "{{ groups['esxi'][1:] }}"
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (HAS_PYVMOMI, connect_to_api, get_all_objs, vmware_argument_spec,
wait_for_task)
def create_vsan_cluster(host_system, new_cluster_uuid):
host_config_manager = host_system.configManager
vsan_system = host_config_manager.vsanSystem
vsan_config = vim.vsan.host.ConfigInfo()
vsan_config.enabled = True
if new_cluster_uuid is not None:
vsan_config.clusterInfo = vim.vsan.host.ConfigInfo.ClusterInfo()
vsan_config.clusterInfo.uuid = new_cluster_uuid
vsan_config.storageInfo = vim.vsan.host.ConfigInfo.StorageInfo()
vsan_config.storageInfo.autoClaimStorage = True
task = vsan_system.UpdateVsan_Task(vsan_config)
changed, result = wait_for_task(task)
host_status = vsan_system.QueryHostStatus()
cluster_uuid = host_status.uuid
return changed, result, cluster_uuid
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(cluster_uuid=dict(required=False, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
new_cluster_uuid = module.params['cluster_uuid']
try:
content = connect_to_api(module, False)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = host.keys()[0]
changed, result, cluster_uuid = create_vsan_cluster(host_system, new_cluster_uuid)
module.exit_json(changed=changed, result=result, cluster_uuid=cluster_uuid)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
gpl-3.0
|
sargas/scipy
|
scipy/spatial/distance.py
|
3
|
72559
|
"""
=====================================================
Distance computations (:mod:`scipy.spatial.distance`)
=====================================================
.. sectionauthor:: Damian Eads
Function Reference
------------------
Distance matrix computation from a collection of raw observation vectors
stored in a rectangular array.
.. autosummary::
:toctree: generated/
pdist -- pairwise distances between observation vectors.
cdist -- distances between between two collections of observation vectors
squareform -- convert distance matrix to a condensed one and vice versa
Predicates for checking the validity of distance matrices, both
condensed and redundant. Also contained in this module are functions
for computing the number of observations in a distance matrix.
.. autosummary::
:toctree: generated/
is_valid_dm -- checks for a valid distance matrix
is_valid_y -- checks for a valid condensed distance matrix
num_obs_dm -- # of observations in a distance matrix
num_obs_y -- # of observations in a condensed distance matrix
Distance functions between two vectors ``u`` and ``v``. Computing
distances over a large collection of vectors is inefficient for these
functions. Use ``pdist`` for this purpose.
.. autosummary::
:toctree: generated/
braycurtis -- the Bray-Curtis distance.
canberra -- the Canberra distance.
chebyshev -- the Chebyshev distance.
cityblock -- the Manhattan distance.
correlation -- the Correlation distance.
cosine -- the Cosine distance.
dice -- the Dice dissimilarity (boolean).
euclidean -- the Euclidean distance.
hamming -- the Hamming distance (boolean).
jaccard -- the Jaccard distance (boolean).
kulsinski -- the Kulsinski distance (boolean).
mahalanobis -- the Mahalanobis distance.
matching -- the matching dissimilarity (boolean).
minkowski -- the Minkowski distance.
rogerstanimoto -- the Rogers-Tanimoto dissimilarity (boolean).
russellrao -- the Russell-Rao dissimilarity (boolean).
seuclidean -- the normalized Euclidean distance.
sokalmichener -- the Sokal-Michener dissimilarity (boolean).
sokalsneath -- the Sokal-Sneath dissimilarity (boolean).
sqeuclidean -- the squared Euclidean distance.
wminkowski -- the weighted Minkowski distance.
yule -- the Yule dissimilarity (boolean).
"""
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.linalg import norm
from scipy.lib.six import callable, string_types
from scipy.lib.six.moves import xrange
from . import _distance_wrap
import collections
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _convert_to_bool(X):
if X.dtype != np.bool:
X = X.astype(np.bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def _validate_vector(u, dtype=None):
# XXX Is order='c' really necessary?
u = np.asarray(u, dtype=dtype, order='c').squeeze()
# Ensure values such as u=1 and u=[1] still return 1-D arrays.
u = np.atleast_1d(u)
if u.ndim > 1:
raise ValueError("Input vector should be 1-D.")
return u
def minkowski(u, v, p):
"""
Computes the Minkowski distance between two 1-D arrays.
The Minkowski distance between 1-D arrays `u` and `v`,
is defined as
.. math::
{||u-v||}_p = (\\sum{|u_i - v_i|^p})^{1/p}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
p : int
The order of the norm of the difference :math:`{||u-v||}_p`.
Returns
-------
d : double
The Minkowski distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if p < 1:
raise ValueError("p must be at least 1")
dist = norm(u - v, ord=p)
return dist
def wminkowski(u, v, p, w):
"""
Computes the weighted Minkowski distance between two 1-D arrays.
The weighted Minkowski distance between `u` and `v`, defined as
.. math::
\\left(\\sum{(w_i |u_i - v_i|^p)}\\right)^{1/p}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
p : int
The order of the norm of the difference :math:`{||u-v||}_p`.
w : (N,) array_like
The weight vector.
Returns
-------
wminkowski : double
The weighted Minkowski distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
w = _validate_vector(w)
if p < 1:
raise ValueError("p must be at least 1")
dist = norm(w * (u - v), ord=p)
return dist
def euclidean(u, v):
"""
Computes the Euclidean distance between two 1-D arrays.
The Euclidean distance between 1-D arrays `u` and `v`, is defined as
.. math::
{||u-v||}_2
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
euclidean : double
The Euclidean distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
dist = norm(u - v)
return dist
def sqeuclidean(u, v):
"""
Computes the squared Euclidean distance between two 1-D arrays.
The squared Euclidean distance between `u` and `v` is defined as
.. math::
{||u-v||}_2^2.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
sqeuclidean : double
The squared Euclidean distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
dist = ((u - v) ** 2).sum()
return dist
def cosine(u, v):
"""
Computes the Cosine distance between 1-D arrays.
The Cosine distance between `u` and `v`, is defined as
.. math::
1 - \\frac{u \\cdot v}
{||u||_2 ||v||_2}.
where :math:`u \\cdot v` is the dot product of :math:`u` and
:math:`v`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
cosine : double
The Cosine distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
dist = 1.0 - np.dot(u, v) / (norm(u) * norm(v))
return dist
def correlation(u, v):
"""
Computes the correlation distance between two 1-D arrays.
The correlation distance between `u` and `v`, is
defined as
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{u}` is the mean of the elements of `u`
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
correlation : double
The correlation distance between 1-D array `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
umu = u.mean()
vmu = v.mean()
um = u - umu
vm = v - vmu
dist = 1.0 - np.dot(um, vm) / (norm(um) * norm(vm))
return dist
def hamming(u, v):
"""
Computes the Hamming distance between two 1-D arrays.
The Hamming distance between 1-D arrays `u` and `v`, is simply the
proportion of disagreeing components in `u` and `v`. If `u` and `v` are
boolean vectors, the Hamming distance is
.. math::
\\frac{c_{01} + c_{10}}{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
hamming : double
The Hamming distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
return (u != v).mean()
def jaccard(u, v):
"""
Computes the Jaccard-Needham dissimilarity between two boolean 1-D arrays.
The Jaccard-Needham dissimilarity between 1-D boolean arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TF} + c_{FT}}
{c_{TT} + c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
jaccard : double
The Jaccard distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
dist = (np.double(np.bitwise_and((u != v),
np.bitwise_or(u != 0, v != 0)).sum())
/ np.double(np.bitwise_or(u != 0, v != 0).sum()))
return dist
def kulsinski(u, v):
"""
Computes the Kulsinski dissimilarity between two boolean 1-D arrays.
The Kulsinski dissimilarity between two boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TF} + c_{FT} - c_{TT} + n}
{c_{FT} + c_{TF} + n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
kulsinski : double
The Kulsinski distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
n = float(len(u))
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
return (ntf + nft - ntt + n) / (ntf + nft + n)
def seuclidean(u, v, V):
"""
Returns the standardized Euclidean distance between two 1-D arrays.
The standardized Euclidean distance between `u` and `v`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
V : (N,) array_like
`V` is an 1-D array of component variances. It is usually computed
among a larger collection vectors.
Returns
-------
seuclidean : double
The standardized Euclidean distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
V = _validate_vector(V, dtype=np.float64)
if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:
raise TypeError('V must be a 1-D array of the same dimension '
'as u and v.')
return np.sqrt(((u - v) ** 2 / V).sum())
def cityblock(u, v):
"""
Computes the City Block (Manhattan) distance.
Computes the Manhattan distance between two 1-D arrays `u` and `v`,
which is defined as
.. math::
\\sum_i {\\left| u_i - v_i \\right|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
cityblock : double
The City Block (Manhattan) distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
return abs(u - v).sum()
def mahalanobis(u, v, VI):
"""
Computes the Mahalanobis distance between two 1-D arrays.
The Mahalanobis distance between 1-D arrays `u` and `v`, is defined as
.. math::
\\sqrt{ (u-v) V^{-1} (u-v)^T }
where ``V`` is the covariance matrix. Note that the argument `VI`
is the inverse of ``V``.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
VI : ndarray
The inverse of the covariance matrix.
Returns
-------
mahalanobis : double
The Mahalanobis distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
VI = np.atleast_2d(VI)
delta = u - v
m = np.dot(np.dot(delta, VI), delta)
return np.sqrt(m)
def chebyshev(u, v):
"""
Computes the Chebyshev distance.
Computes the Chebyshev distance between two 1-D arrays `u` and `v`,
which is defined as
.. math::
\\max_i {|u_i-v_i|}.
Parameters
----------
u : (N,) array_like
Input vector.
v : (N,) array_like
Input vector.
Returns
-------
chebyshev : double
The Chebyshev distance between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
return max(abs(u - v))
def braycurtis(u, v):
"""
Computes the Bray-Curtis distance between two 1-D arrays.
Bray-Curtis distance is defined as
.. math::
\\sum{|u_i-v_i|} / \\sum{|u_i+v_i|}
The Bray-Curtis distance is in the range [0, 1] if all coordinates are
positive, and is undefined if the inputs are of length zero.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
braycurtis : double
The Bray-Curtis distance between 1-D arrays `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
return abs(u - v).sum() / abs(u + v).sum()
def canberra(u, v):
"""
Computes the Canberra distance between two 1-D arrays.
The Canberra distance is defined as
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
Returns
-------
canberra : double
The Canberra distance between vectors `u` and `v`.
Notes
-----
When `u[i]` and `v[i]` are 0 for given i, then the fraction 0/0 = 0 is
used in the calculation.
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
olderr = np.seterr(invalid='ignore')
try:
d = np.nansum(abs(u - v) / (abs(u) + abs(v)))
finally:
np.seterr(**olderr)
return d
def _nbool_correspond_all(u, v):
if u.dtype != v.dtype:
raise TypeError("Arrays being compared must be of the same data type.")
if u.dtype == np.int or u.dtype == np.float_ or u.dtype == np.double:
not_u = 1.0 - u
not_v = 1.0 - v
nff = (not_u * not_v).sum()
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
ntt = (u * v).sum()
elif u.dtype == np.bool:
not_u = ~u
not_v = ~v
nff = (not_u & not_v).sum()
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
ntt = (u & v).sum()
else:
raise TypeError("Arrays being compared have unknown type.")
return (nff, nft, ntf, ntt)
def _nbool_correspond_ft_tf(u, v):
if u.dtype == np.int or u.dtype == np.float_ or u.dtype == np.double:
not_u = 1.0 - u
not_v = 1.0 - v
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
else:
not_u = ~u
not_v = ~v
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
return (nft, ntf)
def yule(u, v):
"""
Computes the Yule dissimilarity between two boolean 1-D arrays.
The Yule dissimilarity is defined as
.. math::
\\frac{R}{c_{TT} + c_{FF} + \\frac{R}{2}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2.0 * (c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
yule : double
The Yule dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
return float(2.0 * ntf * nft) / float(ntt * nff + ntf * nft)
def matching(u, v):
"""
Computes the Matching dissimilarity between two boolean 1-D arrays.
The Matching dissimilarity between two boolean 1-D arrays
`u` and `v`, is defined as
.. math::
\\frac{c_{TF} + c_{FT}}{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
matching : double
The Matching dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
return float(nft + ntf) / float(len(u))
def dice(u, v):
"""
Computes the Dice dissimilarity between two boolean 1-D arrays.
The Dice dissimilarity between `u` and `v`, is
.. math::
\\frac{c_{TF} + c_{FT}}
{2c_{TT} + c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) ndarray, bool
Input 1-D array.
v : (N,) ndarray, bool
Input 1-D array.
Returns
-------
dice : double
The Dice dissimilarity between 1-D arrays `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == np.bool:
ntt = (u & v).sum()
else:
ntt = (u * v).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
return float(ntf + nft) / float(2.0 * ntt + ntf + nft)
def rogerstanimoto(u, v):
"""
Computes the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays.
The Rogers-Tanimoto dissimilarity between two boolean 1-D arrays
`u` and `v`, is defined as
.. math::
\\frac{R}
{c_{TT} + c_{FF} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
rogerstanimoto : double
The Rogers-Tanimoto dissimilarity between vectors
`u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v)
return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))
def russellrao(u, v):
"""
Computes the Russell-Rao dissimilarity between two boolean 1-D arrays.
The Russell-Rao dissimilarity between two boolean 1-D arrays, `u` and
`v`, is defined as
.. math::
\\frac{n - c_{TT}}
{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
russellrao : double
The Russell-Rao dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == np.bool:
ntt = (u & v).sum()
else:
ntt = (u * v).sum()
return float(len(u) - ntt) / float(len(u))
def sokalmichener(u, v):
"""
Computes the Sokal-Michener dissimilarity between two boolean 1-D arrays.
The Sokal-Michener dissimilarity between boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{R}
{S + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and
:math:`S = c_{FF} + c_{TT}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
sokalmichener : double
The Sokal-Michener dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == np.bool:
ntt = (u & v).sum()
nff = (~u & ~v).sum()
else:
ntt = (u * v).sum()
nff = ((1.0 - u) * (1.0 - v)).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))
def sokalsneath(u, v):
"""
Computes the Sokal-Sneath dissimilarity between two boolean 1-D arrays.
The Sokal-Sneath dissimilarity between `u` and `v`,
.. math::
\\frac{R}
{c_{TT} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
Returns
-------
sokalsneath : double
The Sokal-Sneath dissimilarity between vectors `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == np.bool:
ntt = (u & v).sum()
else:
ntt = (u * v).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
denom = ntt + 2.0 * (ntf + nft)
if denom == 0:
raise ValueError('Sokal-Sneath dissimilarity is not defined for '
'vectors that are entirely false.')
return float(2.0 * (ntf + nft)) / denom
def pdist(X, metric='euclidean', p=2, w=None, V=None, VI=None):
"""
Pairwise distances between observations in n-dimensional space.
The following are common calling conventions.
1. ``Y = pdist(X, 'euclidean')``
Computes the distance between m points using Euclidean distance
(2-norm) as the distance metric between the points. The points
are arranged as m n-dimensional row vectors in the matrix X.
2. ``Y = pdist(X, 'minkowski', p)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (p-norm) where :math:`p \\geq 1`.
3. ``Y = pdist(X, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = pdist(X, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = pdist(X, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = pdist(X, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of ``u`` and ``v``.
7. ``Y = pdist(X, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = pdist(X, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = pdist(X, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree where at least one of them is non-zero.
10. ``Y = pdist(X, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}
11. ``Y = pdist(X, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}
12. ``Y = pdist(X, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i {u_i-v_i}}
{\\sum_i {u_i+v_i}}
13. ``Y = pdist(X, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`(u-v)(1/V)(u-v)^T` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = pdist(X, 'yule')``
Computes the Yule distance between each pair of boolean
vectors. (see yule function documentation)
15. ``Y = pdist(X, 'matching')``
Computes the matching distance between each pair of boolean
vectors. (see matching function documentation)
16. ``Y = pdist(X, 'dice')``
Computes the Dice distance between each pair of boolean
vectors. (see dice function documentation)
17. ``Y = pdist(X, 'kulsinski')``
Computes the Kulsinski distance between each pair of
boolean vectors. (see kulsinski function documentation)
18. ``Y = pdist(X, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between each pair of
boolean vectors. (see rogerstanimoto function documentation)
19. ``Y = pdist(X, 'russellrao')``
Computes the Russell-Rao distance between each pair of
boolean vectors. (see russellrao function documentation)
20. ``Y = pdist(X, 'sokalmichener')``
Computes the Sokal-Michener distance between each pair of
boolean vectors. (see sokalmichener function documentation)
21. ``Y = pdist(X, 'sokalsneath')``
Computes the Sokal-Sneath distance between each pair of
boolean vectors. (see sokalsneath function documentation)
22. ``Y = pdist(X, 'wminkowski')``
Computes the weighted Minkowski distance between each pair of
vectors. (see wminkowski function documentation)
23. ``Y = pdist(X, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = pdist(X, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function sokalsneath. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax.::
dm = pdist(X, 'sokalsneath')
Parameters
----------
X : ndarray
An m by n array of m original observations in an
n-dimensional space.
metric : string or function
The distance metric to use. The distance function can
be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis', 'matching',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'.
w : ndarray
The weight vector (for weighted Minkowski).
p : double
The p-norm to apply (for Minkowski, weighted and unweighted)
V : ndarray
The variance vector (for standardized Euclidean).
VI : ndarray
The inverse of the covariance matrix (for Mahalanobis).
Returns
-------
Y : ndarray
Returns a condensed distance matrix Y. For
each :math:`i` and :math:`j` (where :math:`i<j<n`), the
metric ``dist(u=X[i], v=X[j])`` is computed and stored in entry ``ij``.
See Also
--------
squareform : converts between condensed distance matrices and
square distance matrices.
Notes
-----
See ``squareform`` for information on how to calculate the index of
this entry or to convert the condensed distance matrix to a
redundant square matrix.
"""
# 21. Y = pdist(X, 'test_Y')
#
# Computes the distance between all pairs of vectors in X
# using the distance metric Y but with a more succinct,
# verifiable, but less efficient implementation.
X = np.asarray(X, order='c')
# The C code doesn't do striding.
[X] = _copy_arrays_if_base_present([_convert_to_double(X)])
s = X.shape
if len(s) != 2:
raise ValueError('A 2-dimensional array must be passed.')
m, n = s
dm = np.zeros((m * (m - 1) / 2,), dtype=np.double)
wmink_names = ['wminkowski', 'wmi', 'wm', 'wpnorm']
if w is None and (metric == wminkowski or metric in wmink_names):
raise ValueError('weighted minkowski requires a weight '
'vector `w` to be given.')
if callable(metric):
if metric == minkowski:
def dfun(u, v):
return minkowski(u, v, p)
elif metric == wminkowski:
def dfun(u, v):
return wminkowski(u, v, p, w)
elif metric == seuclidean:
def dfun(u, v):
return seuclidean(u, v, V)
elif metric == mahalanobis:
def dfun(u, v):
return mahalanobis(u, v, V)
else:
dfun = metric
k = 0
for i in xrange(0, m - 1):
for j in xrange(i + 1, m):
dm[k] = dfun(X[i], X[j])
k = k + 1
elif isinstance(metric, string_types):
mstr = metric.lower()
#if X.dtype != np.double and \
# (mstr != 'hamming' and mstr != 'jaccard'):
# TypeError('A double array must be passed.')
if mstr in set(['euclidean', 'euclid', 'eu', 'e']):
_distance_wrap.pdist_euclidean_wrap(_convert_to_double(X), dm)
elif mstr in set(['sqeuclidean', 'sqe', 'sqeuclid']):
_distance_wrap.pdist_euclidean_wrap(_convert_to_double(X), dm)
dm = dm ** 2.0
elif mstr in set(['cityblock', 'cblock', 'cb', 'c']):
_distance_wrap.pdist_city_block_wrap(X, dm)
elif mstr in set(['hamming', 'hamm', 'ha', 'h']):
if X.dtype == np.bool:
_distance_wrap.pdist_hamming_bool_wrap(_convert_to_bool(X), dm)
else:
_distance_wrap.pdist_hamming_wrap(_convert_to_double(X), dm)
elif mstr in set(['jaccard', 'jacc', 'ja', 'j']):
if X.dtype == np.bool:
_distance_wrap.pdist_jaccard_bool_wrap(_convert_to_bool(X), dm)
else:
_distance_wrap.pdist_jaccard_wrap(_convert_to_double(X), dm)
elif mstr in set(['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']):
_distance_wrap.pdist_chebyshev_wrap(_convert_to_double(X), dm)
elif mstr in set(['minkowski', 'mi', 'm']):
_distance_wrap.pdist_minkowski_wrap(_convert_to_double(X), dm, p)
elif mstr in wmink_names:
_distance_wrap.pdist_weighted_minkowski_wrap(_convert_to_double(X),
dm, p, np.asarray(w))
elif mstr in set(['seuclidean', 'se', 's']):
if V is not None:
V = np.asarray(V, order='c')
if type(V) != np.ndarray:
raise TypeError('Variance vector V must be a numpy array')
if V.dtype != np.double:
raise TypeError('Variance vector V must contain doubles.')
if len(V.shape) != 1:
raise ValueError('Variance vector V must '
'be one-dimensional.')
if V.shape[0] != n:
raise ValueError('Variance vector V must be of the same '
'dimension as the vectors on which the distances '
'are computed.')
# The C code doesn't do striding.
[VV] = _copy_arrays_if_base_present([_convert_to_double(V)])
else:
VV = np.var(X, axis=0, ddof=1)
_distance_wrap.pdist_seuclidean_wrap(_convert_to_double(X), VV, dm)
# Need to test whether vectorized cosine works better.
# Find out: Is there a dot subtraction operator so I can
# subtract matrices in a similar way to multiplying them?
# Need to get rid of as much unnecessary C code as possible.
elif mstr in set(['cosine', 'cos']):
norms = np.sqrt(np.sum(X * X, axis=1))
_distance_wrap.pdist_cosine_wrap(_convert_to_double(X), dm, norms)
elif mstr in set(['old_cosine', 'old_cos']):
norms = np.sqrt(np.sum(X * X, axis=1))
nV = norms.reshape(m, 1)
# The numerator u * v
nm = np.dot(X, X.T)
# The denom. ||u||*||v||
de = np.dot(nV, nV.T)
dm = 1.0 - (nm / de)
dm[xrange(0, m), xrange(0, m)] = 0.0
dm = squareform(dm)
elif mstr in set(['correlation', 'co']):
X2 = X - X.mean(1)[:, np.newaxis]
#X2 = X - np.matlib.repmat(np.mean(X, axis=1).reshape(m, 1), 1, n)
norms = np.sqrt(np.sum(X2 * X2, axis=1))
_distance_wrap.pdist_cosine_wrap(_convert_to_double(X2),
_convert_to_double(dm),
_convert_to_double(norms))
elif mstr in set(['mahalanobis', 'mahal', 'mah']):
if VI is not None:
VI = _convert_to_double(np.asarray(VI, order='c'))
if type(VI) != np.ndarray:
raise TypeError('VI must be a numpy array.')
if VI.dtype != np.double:
raise TypeError('The array must contain 64-bit floats.')
[VI] = _copy_arrays_if_base_present([VI])
else:
V = np.cov(X.T)
VI = _convert_to_double(np.linalg.inv(V).T.copy())
# (u-v)V^(-1)(u-v)^T
_distance_wrap.pdist_mahalanobis_wrap(_convert_to_double(X),
VI, dm)
elif mstr == 'canberra':
_distance_wrap.pdist_canberra_wrap(_convert_to_double(X), dm)
elif mstr == 'braycurtis':
_distance_wrap.pdist_bray_curtis_wrap(_convert_to_double(X), dm)
elif mstr == 'yule':
_distance_wrap.pdist_yule_bool_wrap(_convert_to_bool(X), dm)
elif mstr == 'matching':
_distance_wrap.pdist_matching_bool_wrap(_convert_to_bool(X), dm)
elif mstr == 'kulsinski':
_distance_wrap.pdist_kulsinski_bool_wrap(_convert_to_bool(X), dm)
elif mstr == 'dice':
_distance_wrap.pdist_dice_bool_wrap(_convert_to_bool(X), dm)
elif mstr == 'rogerstanimoto':
_distance_wrap.pdist_rogerstanimoto_bool_wrap(_convert_to_bool(X),
dm)
elif mstr == 'russellrao':
_distance_wrap.pdist_russellrao_bool_wrap(_convert_to_bool(X), dm)
elif mstr == 'sokalmichener':
_distance_wrap.pdist_sokalmichener_bool_wrap(_convert_to_bool(X),
dm)
elif mstr == 'sokalsneath':
_distance_wrap.pdist_sokalsneath_bool_wrap(_convert_to_bool(X), dm)
elif metric == 'test_euclidean':
dm = pdist(X, euclidean)
elif metric == 'test_sqeuclidean':
if V is None:
V = np.var(X, axis=0, ddof=1)
else:
V = np.asarray(V, order='c')
dm = pdist(X, lambda u, v: seuclidean(u, v, V))
elif metric == 'test_braycurtis':
dm = pdist(X, braycurtis)
elif metric == 'test_mahalanobis':
if VI is None:
V = np.cov(X.T)
VI = np.linalg.inv(V)
else:
VI = np.asarray(VI, order='c')
[VI] = _copy_arrays_if_base_present([VI])
# (u-v)V^(-1)(u-v)^T
dm = pdist(X, (lambda u, v: mahalanobis(u, v, VI)))
elif metric == 'test_canberra':
dm = pdist(X, canberra)
elif metric == 'test_cityblock':
dm = pdist(X, cityblock)
elif metric == 'test_minkowski':
dm = pdist(X, minkowski, p=p)
elif metric == 'test_wminkowski':
dm = pdist(X, wminkowski, p=p, w=w)
elif metric == 'test_cosine':
dm = pdist(X, cosine)
elif metric == 'test_correlation':
dm = pdist(X, correlation)
elif metric == 'test_hamming':
dm = pdist(X, hamming)
elif metric == 'test_jaccard':
dm = pdist(X, jaccard)
elif metric == 'test_chebyshev' or metric == 'test_chebychev':
dm = pdist(X, chebyshev)
elif metric == 'test_yule':
dm = pdist(X, yule)
elif metric == 'test_matching':
dm = pdist(X, matching)
elif metric == 'test_dice':
dm = pdist(X, dice)
elif metric == 'test_kulsinski':
dm = pdist(X, kulsinski)
elif metric == 'test_rogerstanimoto':
dm = pdist(X, rogerstanimoto)
elif metric == 'test_russellrao':
dm = pdist(X, russellrao)
elif metric == 'test_sokalsneath':
dm = pdist(X, sokalsneath)
elif metric == 'test_sokalmichener':
dm = pdist(X, sokalmichener)
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
def squareform(X, force="no", checks=True):
"""
Converts a vector-form distance vector to a square-form distance
matrix, and vice-versa.
Parameters
----------
X : ndarray
Either a condensed or redundant distance matrix.
Returns
-------
Y : ndarray
If a condensed distance matrix is passed, a redundant one is
returned, or if a redundant one is passed, a condensed distance
matrix is returned.
force : str, optional
As with MATLAB(TM), if force is equal to 'tovector' or 'tomatrix',
the input will be treated as a distance matrix or distance vector
respectively.
checks : bool, optional
If `checks` is set to False, no checks will be made for matrix
symmetry nor zero diagonals. This is useful if it is known that
``X - X.T1`` is small and ``diag(X)`` is close to zero.
These values are ignored any way so they do not disrupt the
squareform transformation.
Notes
-----
1. v = squareform(X)
Given a square d-by-d symmetric distance matrix X,
``v=squareform(X)`` returns a ``d * (d-1) / 2`` (or
`${n \\choose 2}$`) sized vector v.
v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)] is the distance
between points i and j. If X is non-square or asymmetric, an error
is returned.
2. X = squareform(v)
Given a d*d(-1)/2 sized v for some integer d>=2 encoding distances
as described, X=squareform(v) returns a d by d distance matrix X. The
X[i, j] and X[j, i] values are set to
v[{n \\choose 2}-{n-i \\choose 2} + (j-u-1)] and all
diagonal elements are zero.
"""
X = _convert_to_double(np.asarray(X, order='c'))
if not np.issubsctype(X, np.double):
raise TypeError('A double array must be passed.')
s = X.shape
if force.lower() == 'tomatrix':
if len(s) != 1:
raise ValueError("Forcing 'tomatrix' but input X is not a "
"distance vector.")
elif force.lower() == 'tovector':
if len(s) != 2:
raise ValueError("Forcing 'tovector' but input X is not a "
"distance matrix.")
# X = squareform(v)
if len(s) == 1:
if X.shape[0] == 0:
return np.zeros((1, 1), dtype=np.double)
# Grab the closest value to the square root of the number
# of elements times 2 to see if the number of elements
# is indeed a binomial coefficient.
d = int(np.ceil(np.sqrt(X.shape[0] * 2)))
# Check that v is of valid dimensions.
if d * (d - 1) / 2 != int(s[0]):
raise ValueError('Incompatible vector size. It must be a binomial '
'coefficient n choose 2 for some integer n >= 2.')
# Allocate memory for the distance matrix.
M = np.zeros((d, d), dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[X] = _copy_arrays_if_base_present([X])
# Fill in the values of the distance matrix.
_distance_wrap.to_squareform_from_vector_wrap(M, X)
# Return the distance matrix.
M = M + M.transpose()
return M
elif len(s) == 2:
if s[0] != s[1]:
raise ValueError('The matrix argument must be square.')
if checks:
is_valid_dm(X, throw=True, name='X')
# One-side of the dimensions is set here.
d = s[0]
if d <= 1:
return np.array([], dtype=np.double)
# Create a vector.
v = np.zeros(((d * (d - 1) / 2),), dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[X] = _copy_arrays_if_base_present([X])
# Convert the vector to squareform.
_distance_wrap.to_vector_from_squareform_wrap(X, v)
return v
else:
raise ValueError(('The first argument must be one or two dimensional '
'array. A %d-dimensional array is not '
'permitted') % len(s))
def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False):
"""
Returns True if input array is a valid distance matrix.
Distance matrices must be 2-dimensional numpy arrays containing
doubles. They must have a zero-diagonal, and they must be symmetric.
Parameters
----------
D : ndarray
The candidate object to test for validity.
tol : float, optional
The distance matrix should be symmetric. `tol` is the maximum
difference between entries ``ij`` and ``ji`` for the distance
metric to be considered symmetric.
throw : bool, optional
An exception is thrown if the distance matrix passed is not valid.
name : str, optional
The name of the variable to checked. This is useful if
throw is set to True so the offending variable can be identified
in the exception message when an exception is thrown.
warning : bool, optional
Instead of throwing an exception, a warning message is
raised.
Returns
-------
valid : bool
True if the variable `D` passed is a valid distance matrix.
Notes
-----
Small numerical differences in `D` and `D.T` and non-zeroness of
the diagonal are ignored if they are within the tolerance specified
by `tol`.
"""
D = np.asarray(D, order='c')
valid = True
try:
s = D.shape
if D.dtype != np.double:
if name:
raise TypeError(('Distance matrix \'%s\' must contain doubles '
'(double).') % name)
else:
raise TypeError('Distance matrix must contain doubles '
'(double).')
if len(D.shape) != 2:
if name:
raise ValueError(('Distance matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Distance matrix must have shape=2 (i.e. '
'be two-dimensional).')
if tol == 0.0:
if not (D == D.T).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric.') % name)
else:
raise ValueError('Distance matrix must be symmetric.')
if not (D[xrange(0, s[0]), xrange(0, s[0])] == 0).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must '
'be zero.') % name)
else:
raise ValueError('Distance matrix diagonal must be zero.')
else:
if not (D - D.T <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric within tolerance %d.')
% (name, tol))
else:
raise ValueError('Distance matrix must be symmetric within'
' tolerance %5.5f.' % tol)
if not (D[xrange(0, s[0]), xrange(0, s[0])] <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% tol)
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
def is_valid_y(y, warning=False, throw=False, name=None):
"""
Returns True if the input array is a valid condensed distance matrix.
Condensed distance matrices must be 1-dimensional
numpy arrays containing doubles. Their length must be a binomial
coefficient :math:`{n \\choose 2}` for some positive integer n.
Parameters
----------
y : ndarray
The condensed distance matrix.
warning : bool, optional
Invokes a warning if the variable passed is not a valid
condensed distance matrix. The warning message explains why
the distance matrix is not valid. `name` is used when
referencing the offending variable.
throws : throw, optional
Throws an exception if the variable passed is not a valid
condensed distance matrix.
name : bool, optional
Used when referencing the offending variable in the
warning or exception message.
"""
y = np.asarray(y, order='c')
valid = True
try:
if type(y) != np.ndarray:
if name:
raise TypeError(('\'%s\' passed as a condensed distance '
'matrix is not a numpy array.') % name)
else:
raise TypeError('Variable is not a numpy array.')
if y.dtype != np.double:
if name:
raise TypeError(('Condensed distance matrix \'%s\' must '
'contain doubles (double).') % name)
else:
raise TypeError('Condensed distance matrix must contain '
'doubles (double).')
if len(y.shape) != 1:
if name:
raise ValueError(('Condensed distance matrix \'%s\' must '
'have shape=1 (i.e. be one-dimensional).')
% name)
else:
raise ValueError('Condensed distance matrix must have shape=1 '
'(i.e. be one-dimensional).')
n = y.shape[0]
d = int(np.ceil(np.sqrt(n * 2)))
if (d * (d - 1) / 2) != n:
if name:
raise ValueError(('Length n of condensed distance matrix '
'\'%s\' must be a binomial coefficient, i.e.'
'there must be a k such that '
'(k \choose 2)=n)!') % name)
else:
raise ValueError('Length n of condensed distance matrix must '
'be a binomial coefficient, i.e. there must '
'be a k such that (k \choose 2)=n)!')
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
def num_obs_dm(d):
"""
Returns the number of original observations that correspond to a
square, redundant distance matrix.
Parameters
----------
d : ndarray
The target distance matrix.
Returns
-------
num_obs_dm : int
The number of observations in the redundant distance matrix.
"""
d = np.asarray(d, order='c')
is_valid_dm(d, tol=np.inf, throw=True, name='d')
return d.shape[0]
def num_obs_y(Y):
"""
Returns the number of original observations that correspond to a
condensed distance matrix.
Parameters
----------
Y : ndarray
Condensed distance matrix.
Returns
-------
n : int
The number of observations in the condensed distance matrix `Y`.
"""
Y = np.asarray(Y, order='c')
is_valid_y(Y, throw=True, name='Y')
k = Y.shape[0]
if k == 0:
raise ValueError("The number of observations cannot be determined on "
"an empty distance matrix.")
d = int(np.ceil(np.sqrt(k * 2)))
if (d * (d - 1) / 2) != k:
raise ValueError("Invalid condensed distance matrix passed. Must be "
"some k where k=(n choose 2) for some n >= 2.")
return d
def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
"""
Computes distance between each pair of the two collections of inputs.
The following are common calling conventions:
1. ``Y = cdist(XA, XB, 'euclidean')``
Computes the distance between :math:`m` points using
Euclidean distance (2-norm) as the distance metric between the
points. The points are arranged as :math:`m`
:math:`n`-dimensional row vectors in the matrix X.
2. ``Y = cdist(XA, XB, 'minkowski', p)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (:math:`p`-norm) where :math:`p \\geq 1`.
3. ``Y = cdist(XA, XB, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = cdist(XA, XB, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}.
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = cdist(XA, XB, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = cdist(XA, XB, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`.
7. ``Y = cdist(XA, XB, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = cdist(XA, XB, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = cdist(XA, XB, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree where at least one of them is non-zero.
10. ``Y = cdist(XA, XB, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}.
11. ``Y = cdist(XA, XB, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
12. ``Y = cdist(XA, XB, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i (u_i-v_i)}
{\\sum_i (u_i+v_i)}
13. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`(u-v)(1/V)(u-v)^T` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = cdist(XA, XB, 'yule')``
Computes the Yule distance between the boolean
vectors. (see yule function documentation)
15. ``Y = cdist(XA, XB, 'matching')``
Computes the matching distance between the boolean
vectors. (see matching function documentation)
16. ``Y = cdist(XA, XB, 'dice')``
Computes the Dice distance between the boolean vectors. (see
dice function documentation)
17. ``Y = cdist(XA, XB, 'kulsinski')``
Computes the Kulsinski distance between the boolean
vectors. (see kulsinski function documentation)
18. ``Y = cdist(XA, XB, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between the boolean
vectors. (see rogerstanimoto function documentation)
19. ``Y = cdist(XA, XB, 'russellrao')``
Computes the Russell-Rao distance between the boolean
vectors. (see russellrao function documentation)
20. ``Y = cdist(XA, XB, 'sokalmichener')``
Computes the Sokal-Michener distance between the boolean
vectors. (see sokalmichener function documentation)
21. ``Y = cdist(XA, XB, 'sokalsneath')``
Computes the Sokal-Sneath distance between the vectors. (see
sokalsneath function documentation)
22. ``Y = cdist(XA, XB, 'wminkowski')``
Computes the weighted Minkowski distance between the
vectors. (see sokalsneath function documentation)
23. ``Y = cdist(XA, XB, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = cdist(XA, XB, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = cdist(XA, XB, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function sokalsneath. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax.::
dm = cdist(XA, XB, 'sokalsneath')
Parameters
----------
XA : ndarray
An :math:`m_A` by :math:`n` array of :math:`m_A`
original observations in an :math:`n`-dimensional space.
XB : ndarray
An :math:`m_B` by :math:`n` array of :math:`m_B`
original observations in an :math:`n`-dimensional space.
metric : string or function
The distance metric to use. The distance function can
be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis', 'matching',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski',
'yule'.
w : ndarray
The weight vector (for weighted Minkowski).
p : double
The p-norm to apply (for Minkowski, weighted and unweighted)
V : ndarray
The variance vector (for standardized Euclidean).
VI : ndarray
The inverse of the covariance matrix (for Mahalanobis).
Returns
-------
Y : ndarray
A :math:`m_A` by :math:`m_B` distance matrix is returned.
For each :math:`i` and :math:`j`, the metric
``dist(u=XA[i], v=XB[j])`` is computed and stored in the
:math:`ij` th entry.
Raises
------
An exception is thrown if ``XA`` and ``XB`` do not have
the same number of columns.
"""
# 21. Y = cdist(XA, XB, 'test_Y')
#
# Computes the distance between all pairs of vectors in X
# using the distance metric Y but with a more succint,
# verifiable, but less efficient implementation.
XA = np.asarray(XA, order='c')
XB = np.asarray(XB, order='c')
#if np.issubsctype(X, np.floating) and not np.issubsctype(X, np.double):
# raise TypeError('Floating point arrays must be 64-bit (got %r).' %
# (X.dtype.type,))
# The C code doesn't do striding.
[XA] = _copy_arrays_if_base_present([_convert_to_double(XA)])
[XB] = _copy_arrays_if_base_present([_convert_to_double(XB)])
s = XA.shape
sB = XB.shape
if len(s) != 2:
raise ValueError('XA must be a 2-dimensional array.')
if len(sB) != 2:
raise ValueError('XB must be a 2-dimensional array.')
if s[1] != sB[1]:
raise ValueError('XA and XB must have the same number of columns '
'(i.e. feature dimension.)')
mA = s[0]
mB = sB[0]
n = s[1]
dm = np.zeros((mA, mB), dtype=np.double)
if callable(metric):
if metric == minkowski:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = minkowski(XA[i, :], XB[j, :], p)
elif metric == wminkowski:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = wminkowski(XA[i, :], XB[j, :], p, w)
elif metric == seuclidean:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = seuclidean(XA[i, :], XB[j, :], V)
elif metric == mahalanobis:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = mahalanobis(XA[i, :], XB[j, :], V)
else:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = metric(XA[i, :], XB[j, :])
elif isinstance(metric, string_types):
mstr = metric.lower()
#if XA.dtype != np.double and \
# (mstr != 'hamming' and mstr != 'jaccard'):
# TypeError('A double array must be passed.')
if mstr in set(['euclidean', 'euclid', 'eu', 'e']):
_distance_wrap.cdist_euclidean_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
elif mstr in set(['sqeuclidean', 'sqe', 'sqeuclid']):
_distance_wrap.cdist_euclidean_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
dm **= 2.0
elif mstr in set(['cityblock', 'cblock', 'cb', 'c']):
_distance_wrap.cdist_city_block_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
elif mstr in set(['hamming', 'hamm', 'ha', 'h']):
if XA.dtype == np.bool:
_distance_wrap.cdist_hamming_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB),
dm)
else:
_distance_wrap.cdist_hamming_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
elif mstr in set(['jaccard', 'jacc', 'ja', 'j']):
if XA.dtype == np.bool:
_distance_wrap.cdist_jaccard_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB),
dm)
else:
_distance_wrap.cdist_jaccard_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
elif mstr in set(['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']):
_distance_wrap.cdist_chebyshev_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
elif mstr in set(['minkowski', 'mi', 'm', 'pnorm']):
_distance_wrap.cdist_minkowski_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm, p)
elif mstr in set(['wminkowski', 'wmi', 'wm', 'wpnorm']):
_distance_wrap.cdist_weighted_minkowski_wrap(_convert_to_double(XA),
_convert_to_double(XB),
dm, p,
_convert_to_double(w))
elif mstr in set(['seuclidean', 'se', 's']):
if V is not None:
V = np.asarray(V, order='c')
if type(V) != np.ndarray:
raise TypeError('Variance vector V must be a numpy array')
if V.dtype != np.double:
raise TypeError('Variance vector V must contain doubles.')
if len(V.shape) != 1:
raise ValueError('Variance vector V must be '
'one-dimensional.')
if V.shape[0] != n:
raise ValueError('Variance vector V must be of the same '
'dimension as the vectors on which the '
'distances are computed.')
# The C code doesn't do striding.
[VV] = _copy_arrays_if_base_present([_convert_to_double(V)])
else:
X = np.vstack([XA, XB])
VV = np.var(X, axis=0, ddof=1)
X = None
del X
_distance_wrap.cdist_seuclidean_wrap(_convert_to_double(XA),
_convert_to_double(XB), VV, dm)
# Need to test whether vectorized cosine works better.
# Find out: Is there a dot subtraction operator so I can
# subtract matrices in a similar way to multiplying them?
# Need to get rid of as much unnecessary C code as possible.
elif mstr in set(['cosine', 'cos']):
normsA = np.sqrt(np.sum(XA * XA, axis=1))
normsB = np.sqrt(np.sum(XB * XB, axis=1))
_distance_wrap.cdist_cosine_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm,
normsA,
normsB)
elif mstr in set(['correlation', 'co']):
XA2 = XA - XA.mean(1)[:, np.newaxis]
XB2 = XB - XB.mean(1)[:, np.newaxis]
#X2 = X - np.matlib.repmat(np.mean(X, axis=1).reshape(m, 1), 1, n)
normsA = np.sqrt(np.sum(XA2 * XA2, axis=1))
normsB = np.sqrt(np.sum(XB2 * XB2, axis=1))
_distance_wrap.cdist_cosine_wrap(_convert_to_double(XA2),
_convert_to_double(XB2),
_convert_to_double(dm),
_convert_to_double(normsA),
_convert_to_double(normsB))
elif mstr in set(['mahalanobis', 'mahal', 'mah']):
if VI is not None:
VI = _convert_to_double(np.asarray(VI, order='c'))
if type(VI) != np.ndarray:
raise TypeError('VI must be a numpy array.')
if VI.dtype != np.double:
raise TypeError('The array must contain 64-bit floats.')
[VI] = _copy_arrays_if_base_present([VI])
else:
X = np.vstack([XA, XB])
V = np.cov(X.T)
X = None
del X
VI = _convert_to_double(np.linalg.inv(V).T.copy())
# (u-v)V^(-1)(u-v)^T
_distance_wrap.cdist_mahalanobis_wrap(_convert_to_double(XA),
_convert_to_double(XB),
VI, dm)
elif mstr == 'canberra':
_distance_wrap.cdist_canberra_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
elif mstr == 'braycurtis':
_distance_wrap.cdist_bray_curtis_wrap(_convert_to_double(XA),
_convert_to_double(XB), dm)
elif mstr == 'yule':
_distance_wrap.cdist_yule_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB), dm)
elif mstr == 'matching':
_distance_wrap.cdist_matching_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB), dm)
elif mstr == 'kulsinski':
_distance_wrap.cdist_kulsinski_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB), dm)
elif mstr == 'dice':
_distance_wrap.cdist_dice_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB), dm)
elif mstr == 'rogerstanimoto':
_distance_wrap.cdist_rogerstanimoto_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB),
dm)
elif mstr == 'russellrao':
_distance_wrap.cdist_russellrao_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB), dm)
elif mstr == 'sokalmichener':
_distance_wrap.cdist_sokalmichener_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB),
dm)
elif mstr == 'sokalsneath':
_distance_wrap.cdist_sokalsneath_bool_wrap(_convert_to_bool(XA),
_convert_to_bool(XB),
dm)
elif metric == 'test_euclidean':
dm = cdist(XA, XB, euclidean)
elif metric == 'test_seuclidean':
if V is None:
V = np.var(np.vstack([XA, XB]), axis=0, ddof=1)
else:
V = np.asarray(V, order='c')
dm = cdist(XA, XB, lambda u, v: seuclidean(u, v, V))
elif metric == 'test_sqeuclidean':
dm = cdist(XA, XB, lambda u, v: sqeuclidean(u, v))
elif metric == 'test_braycurtis':
dm = cdist(XA, XB, braycurtis)
elif metric == 'test_mahalanobis':
if VI is None:
X = np.vstack([XA, XB])
V = np.cov(X.T)
VI = np.linalg.inv(V)
X = None
del X
else:
VI = np.asarray(VI, order='c')
[VI] = _copy_arrays_if_base_present([VI])
# (u-v)V^(-1)(u-v)^T
dm = cdist(XA, XB, (lambda u, v: mahalanobis(u, v, VI)))
elif metric == 'test_canberra':
dm = cdist(XA, XB, canberra)
elif metric == 'test_cityblock':
dm = cdist(XA, XB, cityblock)
elif metric == 'test_minkowski':
dm = cdist(XA, XB, minkowski, p=p)
elif metric == 'test_wminkowski':
dm = cdist(XA, XB, wminkowski, p=p, w=w)
elif metric == 'test_cosine':
dm = cdist(XA, XB, cosine)
elif metric == 'test_correlation':
dm = cdist(XA, XB, correlation)
elif metric == 'test_hamming':
dm = cdist(XA, XB, hamming)
elif metric == 'test_jaccard':
dm = cdist(XA, XB, jaccard)
elif metric == 'test_chebyshev' or metric == 'test_chebychev':
dm = cdist(XA, XB, chebyshev)
elif metric == 'test_yule':
dm = cdist(XA, XB, yule)
elif metric == 'test_matching':
dm = cdist(XA, XB, matching)
elif metric == 'test_dice':
dm = cdist(XA, XB, dice)
elif metric == 'test_kulsinski':
dm = cdist(XA, XB, kulsinski)
elif metric == 'test_rogerstanimoto':
dm = cdist(XA, XB, rogerstanimoto)
elif metric == 'test_russellrao':
dm = cdist(XA, XB, russellrao)
elif metric == 'test_sokalsneath':
dm = cdist(XA, XB, sokalsneath)
elif metric == 'test_sokalmichener':
dm = cdist(XA, XB, sokalmichener)
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
|
bsd-3-clause
|
adaur/SickRage
|
lib/tornado/platform/caresresolver.py
|
193
|
3092
|
from __future__ import absolute_import, division, print_function, with_statement
import pycares
import socket
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.netutil import Resolver, is_valid_ip
class CaresResolver(Resolver):
"""Name resolver based on the c-ares library.
This is a non-blocking and non-threaded resolver. It may not produce
the same results as the system resolver, but can be used for non-blocking
resolution when threads cannot be used.
c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``,
so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is
the default for ``tornado.simple_httpclient``, but other libraries
may default to ``AF_UNSPEC``.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def initialize(self, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb)
self.fds = {}
def _sock_state_cb(self, fd, readable, writable):
state = ((IOLoop.READ if readable else 0) |
(IOLoop.WRITE if writable else 0))
if not state:
self.io_loop.remove_handler(fd)
del self.fds[fd]
elif fd in self.fds:
self.io_loop.update_handler(fd, state)
self.fds[fd] = state
else:
self.io_loop.add_handler(fd, self._handle_events, state)
self.fds[fd] = state
def _handle_events(self, fd, events):
read_fd = pycares.ARES_SOCKET_BAD
write_fd = pycares.ARES_SOCKET_BAD
if events & IOLoop.READ:
read_fd = fd
if events & IOLoop.WRITE:
write_fd = fd
self.channel.process_fd(read_fd, write_fd)
@gen.coroutine
def resolve(self, host, port, family=0):
if is_valid_ip(host):
addresses = [host]
else:
# gethostbyname doesn't take callback as a kwarg
self.channel.gethostbyname(host, family, (yield gen.Callback(1)))
callback_args = yield gen.Wait(1)
assert isinstance(callback_args, gen.Arguments)
assert not callback_args.kwargs
result, error = callback_args.args
if error:
raise Exception('C-Ares returned error %s: %s while resolving %s' %
(error, pycares.errno.strerror(error), host))
addresses = result.addresses
addrinfo = []
for address in addresses:
if '.' in address:
address_family = socket.AF_INET
elif ':' in address:
address_family = socket.AF_INET6
else:
address_family = socket.AF_UNSPEC
if family != socket.AF_UNSPEC and family != address_family:
raise Exception('Requested socket family %d but got %d' %
(family, address_family))
addrinfo.append((address_family, (address, port)))
raise gen.Return(addrinfo)
|
gpl-3.0
|
afaheem88/rally
|
rally/task/sla.py
|
10
|
4319
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SLA (Service-level agreement) is set of details for determining compliance
with contracted values such as maximum error rate or minimum response time.
"""
import abc
import jsonschema
import six
from rally.common.i18n import _
from rally.common.plugin import plugin
def _format_result(criterion_name, success, detail):
"""Returns the SLA result dict corresponding to the current state."""
return {"criterion": criterion_name,
"success": success,
"detail": detail}
class SLAChecker(object):
"""Base SLA checker class."""
def __init__(self, config):
self.config = config
self.unexpected_failure = None
self.aborted_on_sla = False
self.aborted_manually = False
self.sla_criteria = [SLA.get(name)(criterion_value)
for name, criterion_value
in config.get("sla", {}).items()]
def add_iteration(self, iteration):
"""Process the result of a single iteration.
The call to add_iteration() will return True if all the SLA checks
passed, and False otherwise.
:param iteration: iteration result object
"""
return all([sla.add_iteration(iteration) for sla in self.sla_criteria])
def results(self):
results = [sla.result() for sla in self.sla_criteria]
if self.aborted_on_sla:
results.append(_format_result(
"aborted_on_sla", False,
_("Task was aborted due to SLA failure(s).")))
if self.aborted_manually:
results.append(_format_result(
"aborted_manually", False,
_("Task was aborted due to abort signal.")))
if self.unexpected_failure:
results.append(_format_result(
"something_went_wrong", False,
_("Unexpected error: %s") % self.unexpected_failure))
return results
def set_aborted_on_sla(self):
self.aborted_on_sla = True
def set_aborted_manually(self):
self.aborted_manually = True
def set_unexpected_failure(self, exc):
self.unexpected_failure = exc
def configure(name, namespace="default"):
return plugin.configure(name=name, namespace=namespace)
@six.add_metaclass(abc.ABCMeta)
@configure(name="base_sla")
class SLA(plugin.Plugin):
"""Factory for criteria classes."""
def __init__(self, criterion_value):
self.criterion_value = criterion_value
self.success = True
@staticmethod
def validate(config):
properties = dict([(s.get_name(), s.CONFIG_SCHEMA)
for s in SLA.get_all()])
schema = {
"type": "object",
"properties": properties,
"additionalProperties": False,
}
jsonschema.validate(config, schema)
@abc.abstractmethod
def add_iteration(self, iteration):
"""Process the result of a single iteration and perform a SLA check.
The call to add_iteration() will return True if the SLA check passed,
and False otherwise.
:param iteration: iteration result object
:returns: True if the SLA check passed, False otherwise
"""
def result(self):
"""Returns the SLA result dict corresponding to the current state."""
return _format_result(self.get_name(), self.success, self.details())
@abc.abstractmethod
def details(self):
"""Returns the string describing the current results of the SLA."""
def status(self):
"""Return "Passed" or "Failed" depending on the current SLA status."""
return "Passed" if self.success else "Failed"
|
apache-2.0
|
infobloxopen/infoblox-netmri
|
infoblox_netmri/api/broker/v3_1_0/custom_fields_broker.py
|
19
|
7329
|
from ..broker import Broker
class CustomFieldsBroker(Broker):
controller = "custom_fields"
def list(self, **kwargs):
"""Returns list of custom fields defined for requested model or list of custom fields defined for model object if id parameter is passed.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param model: Name of the model to work with. Valid values are Device, Interface, JobSpecification, DetectedChange, DevicePhysical, IssueDesc.
:type model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: Id of the object to get custom fields for.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param fields: Array of attributes to use while searching.
:type fields: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: Search pattern.
:type query: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param allow_blank: Flag which defines whether to return blank custom fields for model object or not.
:type allow_blank: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param limit: Limit the number of records returned.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param start: Record to begin return with.
:type start: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("list"), kwargs)
def list_undefined(self, **kwargs):
"""Returns list of undefined custom field for specified object.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param model: Name of the model to work with. Valid values are Device, Interface, JobSpecification, DetectedChange, DevicePhysical, IssueDesc.
:type model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: Object id.
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("list_undefined"), kwargs)
def import_data(self, **kwargs):
"""Allows importing custom field data using CSV format.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param model: Name of the model to work with. Valid values are Device, Interface, JobSpecification, DetectedChange, DevicePhysical, IssueDesc.
:type model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param file_name: The contents of the CSV file with the custom field data to be imported.
:type file_name: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("import"), kwargs)
def create_field(self, **kwargs):
"""Allows creating new custom field for specified model.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param model: Name of the model to work with. Valid values are Device, Interface, JobSpecification, DetectedChange, DevicePhysical, IssueDesc.
:type model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param name: Name of new custom field.
:type name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param type: Type of new custom field (integer, datetime, date, string).
:type type: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("create_field"), kwargs)
def update_field(self, **kwargs):
"""Allows updating properties of custom field assosiated with specified model (changing name, type).
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param model: Name of the model to work with. Valid values are Device, Interface, JobSpecification, DetectedChange, DevicePhysical, IssueDesc.
:type model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param name: Old custom field name.
:type name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param new_name: New name for custom field.
:type new_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param type: Old or new custom field type (integer, datetime, date, string).
:type type: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("update_field"), kwargs)
def delete_field(self, **kwargs):
"""Allows deleting custom field assosiated with specified model.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param model: Name of the model to work with. Valid values are Device, Interface, JobSpecification, DetectedChange, DevicePhysical, IssueDesc.
:type model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param name: Name of custom field to delete.
:type name: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("delete_field"), kwargs)
|
apache-2.0
|
aweinstock314/aweinstock-ctf-writeups
|
csaw_quals_2015/re300_ftp_sploit.py
|
1
|
1043
|
#!/usr/bin/env python
# nc 54.175.183.202 12012
from pwn import *
import IPython
host = 'localhost' if '--live' not in sys.argv else '54.175.183.202'
p1 = remote(host, 12012)
p1.sendline('USER blankwall')
print(p1.recvuntil(['Please send password for user blankwall']))
p1.sendline('PASS cookie')
print(p1.recvuntil(['logged in']))
'''
p1.sendline('PASV')
port_pattern = 'PASV succesful listening on port: ([0-9]+)\n'
has_port = p1.recvregex(port_pattern)
print(has_port)
port = int(re.findall(port_pattern, has_port)[0], 10)
#p1.sendline('LIST')
#p1.sendline('RETR')
#p1.sendline('flag.txt')
#p1.send('RETRflag.txt')
#p1.send('RETRre_solution.txt')
print('nc %s %d' % (host, port))
#p2 = remote(host, port)
'''
p1.sendline('RDF')
p1.interactive()
'''
avi@debian:~/Documents/csaw_quals_2015_09$ ./re300_ftp_exploit.py --live
[+] Opening connection to 54.175.183.202 on port 12012: Done
Welcome to FTP server
Please send password for user blankwall
logged in
[*] Switching to interactive mode
flag{n0_c0ok1e_ju$t_a_f1ag_f0r_you}
$
'''
|
agpl-3.0
|
xs2maverick/adhocracy3.mercator
|
src/adhocracy_frontend/adhocracy_frontend/scaffolds/adhocracy/version.py
|
26
|
3208
|
# -*- coding: utf-8 -*-
# Author: Douglas Creager <dcreager@dcreager.net>
# This file is placed into the public domain.
# Calculates the current version number. If possible, this is the
# output of “git describe”, modified to conform to the versioning
# scheme that setuptools uses. If “git describe” returns an error
# (most likely because we're in an unpacked copy of a release tarball,
# rather than in a git working copy), then we fall back on reading the
# contents of the RELEASE-VERSION file.
#
# To use this script, simply import it your setup.py file, and use the
# results of get_git_version() as your package version:
#
# from version import *
#
# setup(
# version=get_git_version(),
# .
# .
# .
# )
#
# This will automatically update the RELEASE-VERSION file, if
# necessary. Note that the RELEASE-VERSION file should *not* be
# checked into git; please add it to your top-level .gitignore file.
#
# You'll probably want to distribute the RELEASE-VERSION file in your
# sdist tarballs; to do this, just create a MANIFEST.in file that
# contains the following line:
#
# include RELEASE-VERSION
"""Provide helper functions for getting a version number."""
from subprocess import Popen, PIPE
def call_git_describe(abbrev=4):
"""Call git describe to get the current version number."""
try:
p = Popen(['git', 'describe', '--abbrev=%d' % abbrev],
stdout=PIPE, stderr=PIPE)
p.stderr.close()
line = p.stdout.readlines()[0]
return line.decode('utf-8').strip()
except:
return None
def read_release_version():
""" Read current version number from RELEASE-VERSION."""
try:
f = open('RELEASE-VERSION', 'r')
try:
version = f.readlines()[0]
return version.strip()
finally:
f.close()
except:
return None
def write_release_version(version):
"""Write version number to RELEASE-VERSION."""
f = open('RELEASE-VERSION', 'w')
f.write('%s\n' % version)
f.close()
def get_git_version(abbrev=4):
"""Try to get version from git and fallback to file. """
# Read in the version that's currently in RELEASE-VERSION.
release_version = read_release_version()
# First try to get the current version using “git describe”.
version = call_git_describe(abbrev)
# Adapt to PEP 386 compatible versioning scheme
if (version is not None) and ('-' in version):
parts = version.split('-')
parts[-2] = 'post' + parts[-2]
version = '.'.join(parts[:-1])
# If that doesn't work, fall back on the value that's in
# RELEASE-VERSION.
if version is None:
version = release_version
# If we still don't have anything, that's an error.
if version is None:
raise ValueError('Cannot find the version number!')
# If the current version is different from what's in the
# RELEASE-VERSION file, update the file to be current.
if version != release_version:
write_release_version(version)
# Finally, return the current version.
return version
__all__ = ('get_git_version')
if __name__ == '__main__':
print(get_git_version())
|
agpl-3.0
|
pshen/ansible
|
lib/ansible/modules/cloud/cloudstack/cs_ip_address.py
|
60
|
7162
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Darren Worrall <darren@iweb.co.uk>
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_ip_address
short_description: Manages public IP address associations on Apache CloudStack based clouds.
description:
- Acquires and associates a public IP to an account or project. Due to API
limitations this is not an idempotent call, so be sure to only
conditionally call this when C(state=present)
version_added: '2.0'
author:
- "Darren Worrall (@dazworrall)"
- "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address.
- Required if C(state=absent)
required: false
default: null
domain:
description:
- Domain the IP address is related to.
required: false
default: null
network:
description:
- Network the IP address is related to.
required: false
default: null
vpc:
description:
- VPC the IP address is related to.
required: false
default: null
version_added: "2.2"
account:
description:
- Account the IP address is related to.
required: false
default: null
project:
description:
- Name of the project the IP address is related to.
required: false
default: null
zone:
description:
- Name of the zone in which the IP address is in.
- If not set, default zone is used.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Associate an IP address conditonally
- local_action:
module: cs_ip_address
network: My Network
register: ip_address
when: instance.public_ip is undefined
# Disassociate an IP address
- local_action:
module: cs_ip_address
ip_address: 1.2.3.4
state: absent
'''
RETURN = '''
---
id:
description: UUID of the Public IP address.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: string
sample: 1.2.3.4
zone:
description: Name of zone the IP address is related to.
returned: success
type: string
sample: ch-gva-2
project:
description: Name of project the IP address is related to.
returned: success
type: string
sample: Production
account:
description: Account the IP address is related to.
returned: success
type: string
sample: example account
domain:
description: Domain the IP address is related to.
returned: success
type: string
sample: example domain
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
CloudStackException,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackIPAddress(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackIPAddress, self).__init__(module)
self.returns = {
'ipaddress': 'ip_address',
}
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
args = {
'ipaddress': self.module.params.get('ip_address'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'vpcid': self.get_vpc(key='id'),
}
ip_addresses = self.cs.listPublicIpAddresses(**args)
if ip_addresses:
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def associate_ip_address(self):
self.result['changed'] = True
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'networkid': self.get_network(key='id'),
'zoneid': self.get_zone(key='id'),
'vpcid': self.get_vpc(key='id'),
}
ip_address = None
if not self.module.check_mode:
res = self.cs.associateIpAddress(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
ip_address = self.poll_job(res, 'ipaddress')
return ip_address
def disassociate_ip_address(self):
ip_address = self.get_ip_address()
if not ip_address:
return None
if ip_address['isstaticnat']:
self.module.fail_json(msg="IP address is allocated via static nat")
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.disassociateIpAddress(id=ip_address['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'ipaddress')
return ip_address
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address=dict(required=False),
state=dict(choices=['present', 'absent'], default='present'),
vpc=dict(),
network=dict(),
zone=dict(),
domain=dict(),
account=dict(),
project=dict(),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
required_if=[
('state', 'absent', ['ip_address']),
],
supports_check_mode=True
)
try:
acs_ip_address = AnsibleCloudStackIPAddress(module)
state = module.params.get('state')
if state in ['absent']:
ip_address = acs_ip_address.disassociate_ip_address()
else:
ip_address = acs_ip_address.associate_ip_address()
result = acs_ip_address.get_result(ip_address)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
mewtaylor/django
|
django/contrib/gis/gdal/feature.py
|
439
|
4153
|
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException, OGRIndexError
from django.contrib.gis.gdal.field import Field
from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType
from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_F_* routines are relevant here.
class Feature(GDALBase):
"""
This class that wraps an OGR Feature, needs to be instantiated
from a Layer object.
"""
def __init__(self, feat, layer):
"""
Initializes Feature from a pointer and its Layer object.
"""
if not feat:
raise GDALException('Cannot create OGR Feature, invalid pointer given.')
self.ptr = feat
self._layer = layer
def __del__(self):
"Releases a reference to this object."
if self._ptr and capi:
capi.destroy_feature(self._ptr)
def __getitem__(self, index):
"""
Gets the Field object at the specified index, which may be either
an integer or the Field's string label. Note that the Field object
is not the field's _value_ -- use the `get` method instead to
retrieve the value (e.g. an integer) instead of a Field instance.
"""
if isinstance(index, six.string_types):
i = self.index(index)
else:
if index < 0 or index > self.num_fields:
raise OGRIndexError('index out of range')
i = index
return Field(self, i)
def __iter__(self):
"Iterates over each field in the Feature."
for i in range(self.num_fields):
yield self[i]
def __len__(self):
"Returns the count of fields in this feature."
return self.num_fields
def __str__(self):
"The string name of the feature."
return 'Feature FID %d in Layer<%s>' % (self.fid, self.layer_name)
def __eq__(self, other):
"Does equivalence testing on the features."
return bool(capi.feature_equal(self.ptr, other._ptr))
# #### Feature Properties ####
@property
def encoding(self):
return self._layer._ds.encoding
@property
def fid(self):
"Returns the feature identifier."
return capi.get_fid(self.ptr)
@property
def layer_name(self):
"Returns the name of the layer for the feature."
name = capi.get_feat_name(self._layer._ldefn)
return force_text(name, self.encoding, strings_only=True)
@property
def num_fields(self):
"Returns the number of fields in the Feature."
return capi.get_feat_field_count(self.ptr)
@property
def fields(self):
"Returns a list of fields in the Feature."
return [capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i))
for i in range(self.num_fields)]
@property
def geom(self):
"Returns the OGR Geometry for this Feature."
# Retrieving the geometry pointer for the feature.
geom_ptr = capi.get_feat_geom_ref(self.ptr)
return OGRGeometry(geom_api.clone_geom(geom_ptr))
@property
def geom_type(self):
"Returns the OGR Geometry Type for this Feture."
return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn))
# #### Feature Methods ####
def get(self, field):
"""
Returns the value of the field, instead of an instance of the Field
object. May take a string of the field name or a Field object as
parameters.
"""
field_name = getattr(field, 'name', field)
return self[field_name].value
def index(self, field_name):
"Returns the index of the given field name."
i = capi.get_field_index(self.ptr, force_bytes(field_name))
if i < 0:
raise OGRIndexError('invalid OFT field name given: "%s"' % field_name)
return i
|
bsd-3-clause
|
hslee16/ansible-modules-extras
|
cloud/openstack/os_keystone_domain.py
|
75
|
5392
|
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_keystone_domain
short_description: Manage OpenStack Identity Domains
extends_documentation_fragment: openstack
version_added: "2.1"
description:
- Create, update, or delete OpenStack Identity domains. If a domain
with the supplied name already exists, it will be updated with the
new description and enabled attributes.
options:
name:
description:
- Name that has to be given to the instance
required: true
description:
description:
- Description of the domain
required: false
default: None
enabled:
description:
- Is the domain enabled
required: false
default: True
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a domain
- os_keystone_domain:
cloud: mycloud
state: present
name: demo
description: Demo Domain
# Delete a domain
- os_keystone_domain:
cloud: mycloud
state: absent
name: demo
'''
RETURN = '''
domain:
description: Dictionary describing the domain.
returned: On success when I(state) is 'present'
type: dictionary
contains:
id:
description: Domain ID.
type: string
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
name:
description: Domain name.
type: string
sample: "demo"
description:
description: Domain description.
type: string
sample: "Demo Domain"
enabled:
description: Domain description.
type: boolean
sample: True
id:
description: The domain ID.
returned: On success when I(state) is 'present'
type: string
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
'''
def _needs_update(module, domain):
if domain.description != module.params['description']:
return True
if domain.enabled != module.params['enabled']:
return True
return False
def _system_state_change(module, domain):
state = module.params['state']
if state == 'absent' and domain:
return True
if state == 'present':
if domain is None:
return True
return _needs_update(module, domain)
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
description=dict(default=None),
enabled=dict(default=True, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
description = module.params['description']
enabled = module.params['enabled']
state = module.params['state']
try:
cloud = shade.operator_cloud(**module.params)
domains = cloud.search_domains(filters=dict(name=name))
if len(domains) > 1:
module.fail_json(msg='Domain name %s is not unique' % name)
elif len(domains) == 1:
domain = domains[0]
else:
domain = None
if module.check_mode:
module.exit_json(changed=_system_state_change(module, domain))
if state == 'present':
if domain is None:
domain = cloud.create_domain(
name=name, description=description, enabled=enabled)
changed = True
else:
if _needs_update(module, domain):
domain = cloud.update_domain(
domain.id, name=name, description=description,
enabled=enabled)
changed = True
else:
changed = False
module.exit_json(changed=changed, domain=domain, id=domain.id)
elif state == 'absent':
if domain is None:
changed=False
else:
cloud.delete_domain(domain.id)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
rackstar17/oppia
|
core/domain/summary_services.py
|
1
|
18447
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to operate on activity summaries."""
from core.domain import activity_services
from core.domain import collection_services
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import stats_jobs_continuous
from core.domain import user_services
import feconf
import utils
_LIBRARY_INDEX_GROUPS = [{
'header_i18n_id': 'I18N_LIBRARY_GROUPS_MATHEMATICS_&_STATISTICS',
'search_categories': [
'Mathematics', 'Algebra', 'Arithmetic', 'Calculus', 'Combinatorics',
'Geometry', 'Graph Theory', 'Logic', 'Probability', 'Statistics',
'Trigonometry',
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_COMPUTING',
'search_categories': ['Algorithms', 'Computing', 'Programming'],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_SCIENCE',
'search_categories': [
'Astronomy', 'Biology', 'Chemistry', 'Engineering', 'Environment',
'Medicine', 'Physics',
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_HUMANITIES',
'search_categories': [
'Architecture', 'Art', 'Music', 'Philosophy', 'Poetry'
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_LANGUAGES',
'search_categories': [
'Languages', 'Reading', 'English', 'Latin', 'Spanish', 'Gaulish'
],
}, {
'header_i18n_id': 'I18N_LIBRARY_GROUPS_SOCIAL_SCIENCE',
'search_categories': [
'Business', 'Economics', 'Geography', 'Government', 'History', 'Law'
],
}]
def get_human_readable_contributors_summary(contributors_summary):
contributor_ids = contributors_summary.keys()
contributor_usernames = user_services.get_human_readable_user_ids(
contributor_ids)
contributor_profile_pictures = (
user_services.get_profile_pictures_by_user_ids(contributor_ids))
return {
contributor_usernames[ind]: {
'num_commits': contributors_summary[contributor_ids[ind]],
'profile_picture_data_url': contributor_profile_pictures[
contributor_ids[ind]]
}
for ind in xrange(len(contributor_ids))
}
def get_learner_collection_dict_by_id(
collection_id, user_id, strict=True, allow_invalid_explorations=False,
version=None):
"""Creates and returns a dictionary representation of a collection given by
the provided collection ID. This dictionary contains extra information
along with the dict returned by collection_domain.Collection.to_dict()
which includes useful data for the collection learner view. The information
includes progress in the collection, information about explorations
referenced within the collection, and a slightly nicer data structure for
frontend work.
This raises a ValidationError if the collection retrieved using the given
ID references non-existent explorations.
which includes useful data for the collection learner view.
"""
collection = collection_services.get_collection_by_id(
collection_id, strict=strict, version=version)
exp_ids = collection.exploration_ids
exp_summary_dicts = get_displayable_exp_summary_dicts_matching_ids(
exp_ids, editor_user_id=user_id)
exp_summaries_dict_map = {
exp_summary_dict['id']: exp_summary_dict
for exp_summary_dict in exp_summary_dicts
}
# TODO(bhenning): Users should not be recommended explorations they have
# completed outside the context of a collection (see #1461).
next_exploration_ids = None
completed_exp_ids = None
if user_id:
completed_exp_ids = (
collection_services.get_valid_completed_exploration_ids(
user_id, collection))
next_exploration_ids = collection.get_next_exploration_ids(
completed_exp_ids)
else:
# If the user is not logged in or they have not completed any of
# the explorations yet within the context of this collection,
# recommend the initial explorations.
next_exploration_ids = collection.init_exploration_ids
completed_exp_ids = []
collection_dict = collection.to_dict()
collection_dict['skills'] = collection.skills
collection_dict['playthrough_dict'] = {
'next_exploration_ids': next_exploration_ids,
'completed_exploration_ids': completed_exp_ids
}
collection_dict['version'] = collection.version
collection_is_public = rights_manager.is_collection_public(collection_id)
# Insert an 'exploration' dict into each collection node, where the
# dict includes meta information about the exploration (ID and title).
for collection_node in collection_dict['nodes']:
exploration_id = collection_node['exploration_id']
summary_dict = exp_summaries_dict_map.get(exploration_id)
if not allow_invalid_explorations:
if not summary_dict:
raise utils.ValidationError(
'Expected collection to only reference valid '
'explorations, but found an exploration with ID: %s (was '
'the exploration deleted or is it a private exploration '
'that you do not have edit access to?)'
% exploration_id)
if collection_is_public and rights_manager.is_exploration_private(
exploration_id):
raise utils.ValidationError(
'Cannot reference a private exploration within a public '
'collection, exploration ID: %s' % exploration_id)
if summary_dict:
collection_node['exploration_summary'] = summary_dict
else:
collection_node['exploration_summary'] = None
return collection_dict
def get_displayable_collection_summary_dicts_matching_ids(collection_ids):
"""Returns a list with all collection summary objects that can be
displayed on the library page as collection summary tiles.
"""
collection_summaries = (
collection_services.get_collection_summaries_matching_ids(
collection_ids))
return _get_displayable_collection_summary_dicts(collection_summaries)
def get_displayable_exp_summary_dicts_matching_ids(
exploration_ids, editor_user_id=None):
"""Given a list of exploration ids, optionally filters the list for
explorations that are currently non-private and not deleted, and returns a
list of dicts of the corresponding exploration summaries. This function can
also filter based on a user ID who has edit access to the corresponding
exploration, where the editor ID is for private explorations. Please use
this function when needing summary information to display on exploration
summary tiles in the frontend.
"""
exploration_summaries = (
exp_services.get_exploration_summaries_matching_ids(exploration_ids))
filtered_exploration_summaries = []
for exploration_summary in exploration_summaries:
if exploration_summary is None:
continue
if exploration_summary.status == (
rights_manager.ACTIVITY_STATUS_PRIVATE):
if editor_user_id is None:
continue
if not rights_manager.Actor(editor_user_id).can_edit(
feconf.ACTIVITY_TYPE_EXPLORATION,
exploration_summary.id):
continue
filtered_exploration_summaries.append(exploration_summary)
return get_displayable_exp_summary_dicts(filtered_exploration_summaries)
def get_displayable_exp_summary_dicts(exploration_summaries):
"""Given a list of exploration summary domain objects, returns a list,
with the same number of elements, of the corresponding human-readable
exploration summary dicts.
This assumes that all the exploration summary domain objects passed in are
valid (i.e., none of them are None).
"""
exploration_ids = [
exploration_summary.id
for exploration_summary in exploration_summaries]
view_counts = (
stats_jobs_continuous.StatisticsAggregator.get_views_multi(
exploration_ids))
displayable_exp_summaries = []
for ind, exploration_summary in enumerate(exploration_summaries):
if not exploration_summary:
continue
summary_dict = {
'id': exploration_summary.id,
'title': exploration_summary.title,
'activity_type': feconf.ACTIVITY_TYPE_EXPLORATION,
'category': exploration_summary.category,
'created_on_msec': utils.get_time_in_millisecs(
exploration_summary.exploration_model_created_on),
'objective': exploration_summary.objective,
'language_code': exploration_summary.language_code,
'last_updated_msec': utils.get_time_in_millisecs(
exploration_summary.exploration_model_last_updated
),
'status': exploration_summary.status,
'ratings': exploration_summary.ratings,
'community_owned': exploration_summary.community_owned,
'tags': exploration_summary.tags,
'thumbnail_icon_url': utils.get_thumbnail_icon_url_for_category(
exploration_summary.category),
'thumbnail_bg_color': utils.get_hex_color_for_category(
exploration_summary.category),
'num_views': view_counts[ind],
}
displayable_exp_summaries.append(summary_dict)
return displayable_exp_summaries
def _get_displayable_collection_summary_dicts(collection_summaries):
displayable_collection_summaries = []
for collection_summary in collection_summaries:
if collection_summary and collection_summary.status != (
rights_manager.ACTIVITY_STATUS_PRIVATE):
displayable_collection_summaries.append({
'id': collection_summary.id,
'title': collection_summary.title,
'category': collection_summary.category,
'activity_type': feconf.ACTIVITY_TYPE_COLLECTION,
'objective': collection_summary.objective,
'language_code': collection_summary.language_code,
'tags': collection_summary.tags,
'node_count': collection_summary.node_count,
'last_updated_msec': utils.get_time_in_millisecs(
collection_summary.collection_model_last_updated),
'thumbnail_icon_url': (
utils.get_thumbnail_icon_url_for_category(
collection_summary.category)),
'thumbnail_bg_color': utils.get_hex_color_for_category(
collection_summary.category)})
return displayable_collection_summaries
def get_library_groups(language_codes):
"""Returns a list of groups for the library index page. Each group has a
header and a list of dicts representing activity summaries.
"""
language_codes_suffix = ''
if language_codes:
language_codes_suffix = ' language_code=("%s")' % (
'" OR "'.join(language_codes))
def _generate_query(categories):
# This assumes that 'categories' is non-empty.
return 'category=("%s")%s' % (
'" OR "'.join(categories), language_codes_suffix)
# Collect all collection ids so that the summary details can be retrieved
# with a single get_multi() call.
all_collection_ids = []
header_id_to_collection_ids = {}
for group in _LIBRARY_INDEX_GROUPS:
collection_ids = collection_services.search_collections(
_generate_query(group['search_categories']), 8)[0]
header_id_to_collection_ids[group['header_i18n_id']] = collection_ids
all_collection_ids += collection_ids
collection_summaries = [
summary for summary in
collection_services.get_collection_summaries_matching_ids(
all_collection_ids)
if summary is not None]
collection_summary_dicts = {
summary_dict['id']: summary_dict
for summary_dict in _get_displayable_collection_summary_dicts(
collection_summaries)
}
# Collect all exp ids so that the summary details can be retrieved with a
# single get_multi() call.
all_exp_ids = []
header_to_exp_ids = {}
for group in _LIBRARY_INDEX_GROUPS:
exp_ids = exp_services.search_explorations(
_generate_query(group['search_categories']), 8)[0]
header_to_exp_ids[group['header_i18n_id']] = exp_ids
all_exp_ids += exp_ids
exp_summaries = [
summary for summary in
exp_services.get_exploration_summaries_matching_ids(all_exp_ids)
if summary is not None]
exp_summary_dicts = {
summary_dict['id']: summary_dict
for summary_dict in get_displayable_exp_summary_dicts(exp_summaries)
}
results = []
for group in _LIBRARY_INDEX_GROUPS:
summary_dicts = []
collection_ids_to_display = (
header_id_to_collection_ids[group['header_i18n_id']])
summary_dicts = [
collection_summary_dicts[collection_id]
for collection_id in collection_ids_to_display
if collection_id in collection_summary_dicts]
exp_ids_to_display = header_to_exp_ids[group['header_i18n_id']]
summary_dicts += [
exp_summary_dicts[exp_id] for exp_id in exp_ids_to_display
if exp_id in exp_summary_dicts]
if not summary_dicts:
continue
results.append({
'header_i18n_id': group['header_i18n_id'],
'categories': group['search_categories'],
'activity_summary_dicts': summary_dicts,
'has_full_results_page': True,
'full_results_url': None,
})
return results
def require_activities_to_be_public(activity_references):
"""Raises an exception if any activity reference in the list does not
exist, or is not public.
"""
exploration_ids, collection_ids = activity_services.split_by_type(
activity_references)
activity_summaries_by_type = [{
'type': feconf.ACTIVITY_TYPE_EXPLORATION,
'ids': exploration_ids,
'summaries': exp_services.get_exploration_summaries_matching_ids(
exploration_ids),
}, {
'type': feconf.ACTIVITY_TYPE_COLLECTION,
'ids': collection_ids,
'summaries': collection_services.get_collection_summaries_matching_ids(
collection_ids),
}]
for activities_info in activity_summaries_by_type:
for index, summary in enumerate(activities_info['summaries']):
if summary is None:
raise Exception(
'Cannot feature non-existent %s with id %s' %
(activities_info['type'], activities_info['ids'][index]))
if summary.status == rights_manager.ACTIVITY_STATUS_PRIVATE:
raise Exception(
'Cannot feature private %s with id %s' %
(activities_info['type'], activities_info['ids'][index]))
def get_featured_activity_summary_dicts(language_codes):
"""Returns a list of featured activities with the given language codes.
The return value is sorted according to the list stored in the datastore.
"""
activity_references = activity_services.get_featured_activity_references()
exploration_ids, collection_ids = activity_services.split_by_type(
activity_references)
exp_summary_dicts = get_displayable_exp_summary_dicts_matching_ids(
exploration_ids)
col_summary_dicts = get_displayable_collection_summary_dicts_matching_ids(
collection_ids)
summary_dicts_by_id = {
feconf.ACTIVITY_TYPE_EXPLORATION: {
summary_dict['id']: summary_dict
for summary_dict in exp_summary_dicts
},
feconf.ACTIVITY_TYPE_COLLECTION: {
summary_dict['id']: summary_dict
for summary_dict in col_summary_dicts
},
}
featured_summary_dicts = []
for reference in activity_references:
if reference.id in summary_dicts_by_id[reference.type]:
summary_dict = summary_dicts_by_id[reference.type][reference.id]
if summary_dict and summary_dict['language_code'] in language_codes:
featured_summary_dicts.append(summary_dict)
return featured_summary_dicts
def get_top_rated_exploration_summary_dicts(language_codes, limit):
"""Returns a list of top rated explorations with the given language code.
The return value is sorted in decreasing order of average rating.
"""
filtered_exp_summaries = [
exp_summary for exp_summary in
exp_services.get_top_rated_exploration_summaries(limit).values()
if exp_summary.language_code in language_codes and
sum(exp_summary.ratings.values()) > 0]
sorted_exp_summaries = sorted(
filtered_exp_summaries,
key=lambda exp_summary: exp_summary.scaled_average_rating,
reverse=True)
return get_displayable_exp_summary_dicts(sorted_exp_summaries)
def get_recently_published_exp_summary_dicts(limit):
"""Returns a list of recently published explorations
with the given language code.
"""
recently_published_exploration_summaries = [
exp_summary for exp_summary in
exp_services.get_recently_published_exp_summaries(limit).values()]
# Arranging recently published exploration summaries with respect to time.
# sorted() is used to sort the random list of recently published summaries.
summaries = sorted(
recently_published_exploration_summaries,
key=lambda exp_summary: exp_summary.first_published_msec,
reverse=True)
return get_displayable_exp_summary_dicts(summaries)
|
apache-2.0
|
Orav/kbengine
|
kbe/src/lib/python/Lib/email/mime/audio.py
|
5
|
2747
|
# Copyright (C) 2001-2007 Python Software Foundation
# Author: Anthony Baxter
# Contact: email-sig@python.org
"""Class representing audio/* type MIME documents."""
__all__ = ['MIMEAudio']
import sndhdr
from io import BytesIO
from email import encoders
from email.mime.nonmultipart import MIMENonMultipart
_sndhdr_MIMEmap = {'au' : 'basic',
'wav' :'x-wav',
'aiff':'x-aiff',
'aifc':'x-aiff',
}
# There are others in sndhdr that don't have MIME types. :(
# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma??
def _whatsnd(data):
"""Try to identify a sound file type.
sndhdr.what() has a pretty cruddy interface, unfortunately. This is why
we re-do it here. It would be easier to reverse engineer the Unix 'file'
command and use the standard 'magic' file, as shipped with a modern Unix.
"""
hdr = data[:512]
fakefile = BytesIO(hdr)
for testfn in sndhdr.tests:
res = testfn(hdr, fakefile)
if res is not None:
return _sndhdr_MIMEmap.get(res[0])
return None
class MIMEAudio(MIMENonMultipart):
"""Class for generating audio/* MIME documents."""
def __init__(self, _audiodata, _subtype=None,
_encoder=encoders.encode_base64, **_params):
"""Create an audio/* type MIME document.
_audiodata is a string containing the raw audio data. If this data
can be decoded by the standard Python `sndhdr' module, then the
subtype will be automatically included in the Content-Type header.
Otherwise, you can specify the specific audio subtype via the
_subtype parameter. If _subtype is not given, and no subtype can be
guessed, a TypeError is raised.
_encoder is a function which will perform the actual encoding for
transport of the image data. It takes one argument, which is this
Image instance. It should use get_payload() and set_payload() to
change the payload to the encoded form. It should also add any
Content-Transfer-Encoding or other headers to the message as
necessary. The default encoding is Base64.
Any additional keyword arguments are passed to the base class
constructor, which turns them into parameters on the Content-Type
header.
"""
if _subtype is None:
_subtype = _whatsnd(_audiodata)
if _subtype is None:
raise TypeError('Could not find audio MIME subtype')
MIMENonMultipart.__init__(self, 'audio', _subtype, **_params)
self.set_payload(_audiodata)
_encoder(self)
|
lgpl-3.0
|
ghorn/casadi
|
docs/examples/python/implicit_runge-kutta.py
|
2
|
5148
|
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
from casadi import *
import numpy as N
import matplotlib.pyplot as plt
'''
Demonstration on how to construct a fixed-step implicit Runge-Kutta integrator
@author: Joel Andersson, K.U. Leuven 2013
'''
# End time
tf = 10.0
# Dimensions
nx = 3
np = 1
# Declare variables
x = SX.sym('x', nx) # state
p = SX.sym('u', np) # control
# ODE right hand side function
ode = vertcat((1 - x[1]*x[1])*x[0] - x[1] + p, \
x[0], \
x[0]*x[0] + x[1]*x[1] + p*p)
dae = {'x':x, 'p':p, 'ode':ode}
f = Function('f', [x, p], [ode])
# Number of finite elements
n = 100
# Size of the finite elements
h = tf/n
# Degree of interpolating polynomial
d = 4
# Choose collocation points
tau_root = [0] + collocation_points(d, 'legendre')
# Coefficients of the collocation equation
C = N.zeros((d+1,d+1))
# Coefficients of the continuity equation
D = N.zeros(d+1)
# Dimensionless time inside one control interval
tau = SX.sym('tau')
# For all collocation points
for j in range(d+1):
# Construct Lagrange polynomials to get the polynomial basis at the collocation point
L = 1
for r in range(d+1):
if r != j:
L *= (tau-tau_root[r])/(tau_root[j]-tau_root[r])
# Evaluate the polynomial at the final time to get the coefficients of the continuity equation
lfcn = Function('lfcn', [tau], [L])
D[j] = lfcn(1.0)
# Evaluate the time derivative of the polynomial at all collocation points to get the coefficients of the continuity equation
tfcn = Function('tfcn', [tau], [tangent(L,tau)])
for r in range(d+1): C[j,r] = tfcn(tau_root[r])
# Total number of variables for one finite element
X0 = MX.sym('X0',nx)
P = MX.sym('P',np)
V = MX.sym('V',d*nx)
# Get the state at each collocation point
X = [X0] + vertsplit(V,[r*nx for r in range(d+1)])
# Get the collocation quations (that define V)
V_eq = []
for j in range(1,d+1):
# Expression for the state derivative at the collocation point
xp_j = 0
for r in range (d+1):
xp_j += C[r,j]*X[r]
# Append collocation equations
f_j = f(X[j],P)
V_eq.append(h*f_j - xp_j)
# Concatenate constraints
V_eq = vertcat(*V_eq)
# Root-finding function, implicitly defines V as a function of X0 and P
vfcn = Function('vfcn', [V, X0, P], [V_eq])
# Convert to SX to decrease overhead
vfcn_sx = vfcn.expand()
# Create a implicit function instance to solve the system of equations
ifcn = rootfinder('ifcn', 'newton', vfcn_sx)
V = ifcn(MX(),X0,P)
X = [X0 if r==0 else V[(r-1)*nx:r*nx] for r in range(d+1)]
# Get an expression for the state at the end of the finie element
XF = 0
for r in range(d+1):
XF += D[r]*X[r]
# Get the discrete time dynamics
F = Function('F', [X0,P],[XF])
# Do this iteratively for all finite elements
X = X0
for i in range(n):
X = F(X,P)
# Fixed-step integrator
irk_integrator = Function('irk_integrator', {'x0':X0, 'p':P, 'xf':X},
integrator_in(), integrator_out())
# Create a convensional integrator for reference
ref_integrator = integrator('ref_integrator', 'cvodes', dae, {'tf':tf})
# Test values
x0_val = N.array([0,1,0])
p_val = 0.2
# Make sure that both integrators give consistent results
for F in (irk_integrator,ref_integrator):
print('-------')
print('Testing ' + F.name())
print('-------')
# Generate a new function that calculates forward and reverse directional derivatives
dF = F.factory('dF', ['x0', 'p', 'fwd:x0', 'fwd:p', 'adj:xf'],
['xf', 'fwd:xf', 'adj:x0', 'adj:p']);
arg = {}
# Pass arguments
arg['x0'] = x0_val
arg['p'] = p_val
# Forward sensitivity analysis, first direction: seed p and x0[0]
arg['fwd_x0'] = [1,0,0]
arg['fwd_p'] = 1
# Adjoint sensitivity analysis, seed xf[2]
arg['adj_xf'] = [0,0,1]
# Integrate
res = dF(**arg)
# Get the nondifferentiated results
print('%30s = %s' % ('xf', res['xf']))
# Get the forward sensitivities
print('%30s = %s' % ('d(xf)/d(p)+d(xf)/d(x0[0])', res['fwd_xf']))
# Get the adjoint sensitivities
print('%30s = %s' % ('d(xf[2])/d(x0)', res['adj_x0']))
print('%30s = %s' % ('d(xf[2])/d(p)', res['adj_p']))
|
lgpl-3.0
|
louisLouL/pair_trading
|
capstone_env/lib/python3.6/site-packages/sqlalchemy/ext/associationproxy.py
|
31
|
33423
|
# ext/associationproxy.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Contain the ``AssociationProxy`` class.
The ``AssociationProxy`` is a Python property object which provides
transparent proxied access to the endpoint of an association object.
See the example ``examples/association/proxied_association.py``.
"""
import itertools
import operator
import weakref
from .. import exc, orm, util
from ..orm import collections, interfaces
from ..sql import not_, or_
def association_proxy(target_collection, attr, **kw):
r"""Return a Python property implementing a view of a target
attribute which references an attribute on members of the
target.
The returned value is an instance of :class:`.AssociationProxy`.
Implements a Python property representing a relationship as a collection
of simpler values, or a scalar value. The proxied property will mimic
the collection type of the target (list, dict or set), or, in the case of
a one to one relationship, a simple scalar value.
:param target_collection: Name of the attribute we'll proxy to.
This attribute is typically mapped by
:func:`~sqlalchemy.orm.relationship` to link to a target collection, but
can also be a many-to-one or non-scalar relationship.
:param attr: Attribute on the associated instance or instances we'll
proxy for.
For example, given a target collection of [obj1, obj2], a list created
by this proxy property would look like [getattr(obj1, *attr*),
getattr(obj2, *attr*)]
If the relationship is one-to-one or otherwise uselist=False, then
simply: getattr(obj, *attr*)
:param creator: optional.
When new items are added to this proxied collection, new instances of
the class collected by the target collection will be created. For list
and set collections, the target class constructor will be called with
the 'value' for the new instance. For dict types, two arguments are
passed: key and value.
If you want to construct instances differently, supply a *creator*
function that takes arguments as above and returns instances.
For scalar relationships, creator() will be called if the target is None.
If the target is present, set operations are proxied to setattr() on the
associated object.
If you have an associated object with multiple attributes, you may set
up multiple association proxies mapping to different attributes. See
the unit tests for examples, and for examples of how creator() functions
can be used to construct the scalar relationship on-demand in this
situation.
:param \*\*kw: Passes along any other keyword arguments to
:class:`.AssociationProxy`.
"""
return AssociationProxy(target_collection, attr, **kw)
ASSOCIATION_PROXY = util.symbol('ASSOCIATION_PROXY')
"""Symbol indicating an :class:`InspectionAttr` that's
of type :class:`.AssociationProxy`.
Is assigned to the :attr:`.InspectionAttr.extension_type`
attibute.
"""
class AssociationProxy(interfaces.InspectionAttrInfo):
"""A descriptor that presents a read/write view of an object attribute."""
is_attribute = False
extension_type = ASSOCIATION_PROXY
def __init__(self, target_collection, attr, creator=None,
getset_factory=None, proxy_factory=None,
proxy_bulk_set=None, info=None):
"""Construct a new :class:`.AssociationProxy`.
The :func:`.association_proxy` function is provided as the usual
entrypoint here, though :class:`.AssociationProxy` can be instantiated
and/or subclassed directly.
:param target_collection: Name of the collection we'll proxy to,
usually created with :func:`.relationship`.
:param attr: Attribute on the collected instances we'll proxy
for. For example, given a target collection of [obj1, obj2], a
list created by this proxy property would look like
[getattr(obj1, attr), getattr(obj2, attr)]
:param creator: Optional. When new items are added to this proxied
collection, new instances of the class collected by the target
collection will be created. For list and set collections, the
target class constructor will be called with the 'value' for the
new instance. For dict types, two arguments are passed:
key and value.
If you want to construct instances differently, supply a 'creator'
function that takes arguments as above and returns instances.
:param getset_factory: Optional. Proxied attribute access is
automatically handled by routines that get and set values based on
the `attr` argument for this proxy.
If you would like to customize this behavior, you may supply a
`getset_factory` callable that produces a tuple of `getter` and
`setter` functions. The factory is called with two arguments, the
abstract type of the underlying collection and this proxy instance.
:param proxy_factory: Optional. The type of collection to emulate is
determined by sniffing the target collection. If your collection
type can't be determined by duck typing or you'd like to use a
different collection implementation, you may supply a factory
function to produce those collections. Only applicable to
non-scalar relationships.
:param proxy_bulk_set: Optional, use with proxy_factory. See
the _set() method for details.
:param info: optional, will be assigned to
:attr:`.AssociationProxy.info` if present.
.. versionadded:: 1.0.9
"""
self.target_collection = target_collection
self.value_attr = attr
self.creator = creator
self.getset_factory = getset_factory
self.proxy_factory = proxy_factory
self.proxy_bulk_set = proxy_bulk_set
self.owning_class = None
self.key = '_%s_%s_%s' % (
type(self).__name__, target_collection, id(self))
self.collection_class = None
if info:
self.info = info
@property
def remote_attr(self):
"""The 'remote' :class:`.MapperProperty` referenced by this
:class:`.AssociationProxy`.
.. versionadded:: 0.7.3
See also:
:attr:`.AssociationProxy.attr`
:attr:`.AssociationProxy.local_attr`
"""
return getattr(self.target_class, self.value_attr)
@property
def local_attr(self):
"""The 'local' :class:`.MapperProperty` referenced by this
:class:`.AssociationProxy`.
.. versionadded:: 0.7.3
See also:
:attr:`.AssociationProxy.attr`
:attr:`.AssociationProxy.remote_attr`
"""
return getattr(self.owning_class, self.target_collection)
@property
def attr(self):
"""Return a tuple of ``(local_attr, remote_attr)``.
This attribute is convenient when specifying a join
using :meth:`.Query.join` across two relationships::
sess.query(Parent).join(*Parent.proxied.attr)
.. versionadded:: 0.7.3
See also:
:attr:`.AssociationProxy.local_attr`
:attr:`.AssociationProxy.remote_attr`
"""
return (self.local_attr, self.remote_attr)
def _get_property(self):
return (orm.class_mapper(self.owning_class).
get_property(self.target_collection))
@util.memoized_property
def target_class(self):
"""The intermediary class handled by this :class:`.AssociationProxy`.
Intercepted append/set/assignment events will result
in the generation of new instances of this class.
"""
return self._get_property().mapper.class_
@util.memoized_property
def scalar(self):
"""Return ``True`` if this :class:`.AssociationProxy` proxies a scalar
relationship on the local side."""
scalar = not self._get_property().uselist
if scalar:
self._initialize_scalar_accessors()
return scalar
@util.memoized_property
def _value_is_scalar(self):
return not self._get_property().\
mapper.get_property(self.value_attr).uselist
@util.memoized_property
def _target_is_object(self):
return getattr(self.target_class, self.value_attr).impl.uses_objects
def __get__(self, obj, class_):
if self.owning_class is None:
self.owning_class = class_ and class_ or type(obj)
if obj is None:
return self
if self.scalar:
target = getattr(obj, self.target_collection)
return self._scalar_get(target)
else:
try:
# If the owning instance is reborn (orm session resurrect,
# etc.), refresh the proxy cache.
creator_id, proxy = getattr(obj, self.key)
if id(obj) == creator_id:
return proxy
except AttributeError:
pass
proxy = self._new(_lazy_collection(obj, self.target_collection))
setattr(obj, self.key, (id(obj), proxy))
return proxy
def __set__(self, obj, values):
if self.owning_class is None:
self.owning_class = type(obj)
if self.scalar:
creator = self.creator and self.creator or self.target_class
target = getattr(obj, self.target_collection)
if target is None:
setattr(obj, self.target_collection, creator(values))
else:
self._scalar_set(target, values)
else:
proxy = self.__get__(obj, None)
if proxy is not values:
proxy.clear()
self._set(proxy, values)
def __delete__(self, obj):
if self.owning_class is None:
self.owning_class = type(obj)
delattr(obj, self.key)
def _initialize_scalar_accessors(self):
if self.getset_factory:
get, set = self.getset_factory(None, self)
else:
get, set = self._default_getset(None)
self._scalar_get, self._scalar_set = get, set
def _default_getset(self, collection_class):
attr = self.value_attr
_getter = operator.attrgetter(attr)
getter = lambda target: _getter(target) if target is not None else None
if collection_class is dict:
setter = lambda o, k, v: setattr(o, attr, v)
else:
setter = lambda o, v: setattr(o, attr, v)
return getter, setter
def _new(self, lazy_collection):
creator = self.creator and self.creator or self.target_class
self.collection_class = util.duck_type_collection(lazy_collection())
if self.proxy_factory:
return self.proxy_factory(
lazy_collection, creator, self.value_attr, self)
if self.getset_factory:
getter, setter = self.getset_factory(self.collection_class, self)
else:
getter, setter = self._default_getset(self.collection_class)
if self.collection_class is list:
return _AssociationList(
lazy_collection, creator, getter, setter, self)
elif self.collection_class is dict:
return _AssociationDict(
lazy_collection, creator, getter, setter, self)
elif self.collection_class is set:
return _AssociationSet(
lazy_collection, creator, getter, setter, self)
else:
raise exc.ArgumentError(
'could not guess which interface to use for '
'collection_class "%s" backing "%s"; specify a '
'proxy_factory and proxy_bulk_set manually' %
(self.collection_class.__name__, self.target_collection))
def _inflate(self, proxy):
creator = self.creator and self.creator or self.target_class
if self.getset_factory:
getter, setter = self.getset_factory(self.collection_class, self)
else:
getter, setter = self._default_getset(self.collection_class)
proxy.creator = creator
proxy.getter = getter
proxy.setter = setter
def _set(self, proxy, values):
if self.proxy_bulk_set:
self.proxy_bulk_set(proxy, values)
elif self.collection_class is list:
proxy.extend(values)
elif self.collection_class is dict:
proxy.update(values)
elif self.collection_class is set:
proxy.update(values)
else:
raise exc.ArgumentError(
'no proxy_bulk_set supplied for custom '
'collection_class implementation')
@property
def _comparator(self):
return self._get_property().comparator
def any(self, criterion=None, **kwargs):
"""Produce a proxied 'any' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
if self._target_is_object:
if self._value_is_scalar:
value_expr = getattr(
self.target_class, self.value_attr).has(
criterion, **kwargs)
else:
value_expr = getattr(
self.target_class, self.value_attr).any(
criterion, **kwargs)
else:
value_expr = criterion
# check _value_is_scalar here, otherwise
# we're scalar->scalar - call .any() so that
# the "can't call any() on a scalar" msg is raised.
if self.scalar and not self._value_is_scalar:
return self._comparator.has(
value_expr
)
else:
return self._comparator.any(
value_expr
)
def has(self, criterion=None, **kwargs):
"""Produce a proxied 'has' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
if self._target_is_object:
return self._comparator.has(
getattr(self.target_class, self.value_attr).
has(criterion, **kwargs)
)
else:
if criterion is not None or kwargs:
raise exc.ArgumentError(
"Non-empty has() not allowed for "
"column-targeted association proxy; use ==")
return self._comparator.has()
def contains(self, obj):
"""Produce a proxied 'contains' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
, :meth:`.RelationshipProperty.Comparator.has`,
and/or :meth:`.RelationshipProperty.Comparator.contains`
operators of the underlying proxied attributes.
"""
if self.scalar and not self._value_is_scalar:
return self._comparator.has(
getattr(self.target_class, self.value_attr).contains(obj)
)
else:
return self._comparator.any(**{self.value_attr: obj})
def __eq__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
if obj is None:
return or_(
self._comparator.has(**{self.value_attr: obj}),
self._comparator == None
)
else:
return self._comparator.has(**{self.value_attr: obj})
def __ne__(self, obj):
# note the has() here will fail for collections; eq_()
# is only allowed with a scalar.
return self._comparator.has(
getattr(self.target_class, self.value_attr) != obj)
class _lazy_collection(object):
def __init__(self, obj, target):
self.ref = weakref.ref(obj)
self.target = target
def __call__(self):
obj = self.ref()
if obj is None:
raise exc.InvalidRequestError(
"stale association proxy, parent object has gone out of "
"scope")
return getattr(obj, self.target)
def __getstate__(self):
return {'obj': self.ref(), 'target': self.target}
def __setstate__(self, state):
self.ref = weakref.ref(state['obj'])
self.target = state['target']
class _AssociationCollection(object):
def __init__(self, lazy_collection, creator, getter, setter, parent):
"""Constructs an _AssociationCollection.
This will always be a subclass of either _AssociationList,
_AssociationSet, or _AssociationDict.
lazy_collection
A callable returning a list-based collection of entities (usually an
object attribute managed by a SQLAlchemy relationship())
creator
A function that creates new target entities. Given one parameter:
value. This assertion is assumed::
obj = creator(somevalue)
assert getter(obj) == somevalue
getter
A function. Given an associated object, return the 'value'.
setter
A function. Given an associated object and a value, store that
value on the object.
"""
self.lazy_collection = lazy_collection
self.creator = creator
self.getter = getter
self.setter = setter
self.parent = parent
col = property(lambda self: self.lazy_collection())
def __len__(self):
return len(self.col)
def __bool__(self):
return bool(self.col)
__nonzero__ = __bool__
def __getstate__(self):
return {'parent': self.parent, 'lazy_collection': self.lazy_collection}
def __setstate__(self, state):
self.parent = state['parent']
self.lazy_collection = state['lazy_collection']
self.parent._inflate(self)
class _AssociationList(_AssociationCollection):
"""Generic, converting, list-to-list proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object):
return self.getter(object)
def _set(self, object, value):
return self.setter(object, value)
def __getitem__(self, index):
if not isinstance(index, slice):
return self._get(self.col[index])
else:
return [self._get(member) for member in self.col[index]]
def __setitem__(self, index, value):
if not isinstance(index, slice):
self._set(self.col[index], value)
else:
if index.stop is None:
stop = len(self)
elif index.stop < 0:
stop = len(self) + index.stop
else:
stop = index.stop
step = index.step or 1
start = index.start or 0
rng = list(range(index.start or 0, stop, step))
if step == 1:
for i in rng:
del self[start]
i = start
for item in value:
self.insert(i, item)
i += 1
else:
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s" % (len(value),
len(rng)))
for i, item in zip(rng, value):
self._set(self.col[i], item)
def __delitem__(self, index):
del self.col[index]
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __getslice__(self, start, end):
return [self._get(member) for member in self.col[start:end]]
def __setslice__(self, start, end, values):
members = [self._create(v) for v in values]
self.col[start:end] = members
def __delslice__(self, start, end):
del self.col[start:end]
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or
just use the underlying collection directly from its property
on the parent.
"""
for member in self.col:
yield self._get(member)
return
def append(self, value):
item = self._create(value)
self.col.append(item)
def count(self, value):
return sum([1 for _ in
util.itertools_filter(lambda v: v == value, iter(self))])
def extend(self, values):
for v in values:
self.append(v)
def insert(self, index, value):
self.col[index:index] = [self._create(value)]
def pop(self, index=-1):
return self.getter(self.col.pop(index))
def remove(self, value):
for i, val in enumerate(self):
if val == value:
del self.col[i]
return
raise ValueError("value not in list")
def reverse(self):
"""Not supported, use reversed(mylist)"""
raise NotImplementedError
def sort(self):
"""Not supported, use sorted(mylist)"""
raise NotImplementedError
def clear(self):
del self.col[0:len(self.col)]
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __lt__(self, other):
return list(self) < other
def __le__(self, other):
return list(self) <= other
def __gt__(self, other):
return list(self) > other
def __ge__(self, other):
return list(self) >= other
def __cmp__(self, other):
return cmp(list(self), other)
def __add__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return list(self) + other
def __radd__(self, iterable):
try:
other = list(iterable)
except TypeError:
return NotImplemented
return other + list(self)
def __mul__(self, n):
if not isinstance(n, int):
return NotImplemented
return list(self) * n
__rmul__ = __mul__
def __iadd__(self, iterable):
self.extend(iterable)
return self
def __imul__(self, n):
# unlike a regular list *=, proxied __imul__ will generate unique
# backing objects for each copy. *= on proxied lists is a bit of
# a stretch anyhow, and this interpretation of the __imul__ contract
# is more plausibly useful than copying the backing objects.
if not isinstance(n, int):
return NotImplemented
if n == 0:
self.clear()
elif n > 1:
self.extend(list(self) * (n - 1))
return self
def copy(self):
return list(self)
def __repr__(self):
return repr(list(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (util.callable(func) and func.__name__ == func_name and
not func.__doc__ and hasattr(list, func_name)):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
_NotProvided = util.symbol('_NotProvided')
class _AssociationDict(_AssociationCollection):
"""Generic, converting, dict-to-dict proxy."""
def _create(self, key, value):
return self.creator(key, value)
def _get(self, object):
return self.getter(object)
def _set(self, object, key, value):
return self.setter(object, key, value)
def __getitem__(self, key):
return self._get(self.col[key])
def __setitem__(self, key, value):
if key in self.col:
self._set(self.col[key], key, value)
else:
self.col[key] = self._create(key, value)
def __delitem__(self, key):
del self.col[key]
def __contains__(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def has_key(self, key):
# testlib.pragma exempt:__hash__
return key in self.col
def __iter__(self):
return iter(self.col.keys())
def clear(self):
self.col.clear()
def __eq__(self, other):
return dict(self) == other
def __ne__(self, other):
return dict(self) != other
def __lt__(self, other):
return dict(self) < other
def __le__(self, other):
return dict(self) <= other
def __gt__(self, other):
return dict(self) > other
def __ge__(self, other):
return dict(self) >= other
def __cmp__(self, other):
return cmp(dict(self), other)
def __repr__(self):
return repr(dict(self.items()))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
if key not in self.col:
self.col[key] = self._create(key, default)
return default
else:
return self[key]
def keys(self):
return self.col.keys()
if util.py2k:
def iteritems(self):
return ((key, self._get(self.col[key])) for key in self.col)
def itervalues(self):
return (self._get(self.col[key]) for key in self.col)
def iterkeys(self):
return self.col.iterkeys()
def values(self):
return [self._get(member) for member in self.col.values()]
def items(self):
return [(k, self._get(self.col[k])) for k in self]
else:
def items(self):
return ((key, self._get(self.col[key])) for key in self.col)
def values(self):
return (self._get(self.col[key]) for key in self.col)
def pop(self, key, default=_NotProvided):
if default is _NotProvided:
member = self.col.pop(key)
else:
member = self.col.pop(key, default)
return self._get(member)
def popitem(self):
item = self.col.popitem()
return (item[0], self._get(item[1]))
def update(self, *a, **kw):
if len(a) > 1:
raise TypeError('update expected at most 1 arguments, got %i' %
len(a))
elif len(a) == 1:
seq_or_map = a[0]
# discern dict from sequence - took the advice from
# http://www.voidspace.org.uk/python/articles/duck_typing.shtml
# still not perfect :(
if hasattr(seq_or_map, 'keys'):
for item in seq_or_map:
self[item] = seq_or_map[item]
else:
try:
for k, v in seq_or_map:
self[k] = v
except ValueError:
raise ValueError(
"dictionary update sequence "
"requires 2-element tuples")
for key, value in kw:
self[key] = value
def copy(self):
return dict(self.items())
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (util.callable(func) and func.__name__ == func_name and
not func.__doc__ and hasattr(dict, func_name)):
func.__doc__ = getattr(dict, func_name).__doc__
del func_name, func
class _AssociationSet(_AssociationCollection):
"""Generic, converting, set-to-set proxy."""
def _create(self, value):
return self.creator(value)
def _get(self, object):
return self.getter(object)
def _set(self, object, value):
return self.setter(object, value)
def __len__(self):
return len(self.col)
def __bool__(self):
if self.col:
return True
else:
return False
__nonzero__ = __bool__
def __contains__(self, value):
for member in self.col:
# testlib.pragma exempt:__eq__
if self._get(member) == value:
return True
return False
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or just use
the underlying collection directly from its property on the parent.
"""
for member in self.col:
yield self._get(member)
return
def add(self, value):
if value not in self:
self.col.add(self._create(value))
# for discard and remove, choosing a more expensive check strategy rather
# than call self.creator()
def discard(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
break
def remove(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
return
raise KeyError(value)
def pop(self):
if not self.col:
raise KeyError('pop from an empty set')
member = self.col.pop()
return self._get(member)
def update(self, other):
for value in other:
self.add(value)
def __ior__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.add(value)
return self
def _set(self):
return set(iter(self))
def union(self, other):
return set(self).union(other)
__or__ = union
def difference(self, other):
return set(self).difference(other)
__sub__ = difference
def difference_update(self, other):
for value in other:
self.discard(value)
def __isub__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
for value in other:
self.discard(value)
return self
def intersection(self, other):
return set(self).intersection(other)
__and__ = intersection
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __iand__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def symmetric_difference(self, other):
return set(self).symmetric_difference(other)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
def __ixor__(self, other):
if not collections._set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
return self
def issubset(self, other):
return set(self).issubset(other)
def issuperset(self, other):
return set(self).issuperset(other)
def clear(self):
self.col.clear()
def copy(self):
return set(self)
def __eq__(self, other):
return set(self) == other
def __ne__(self, other):
return set(self) != other
def __lt__(self, other):
return set(self) < other
def __le__(self, other):
return set(self) <= other
def __gt__(self, other):
return set(self) > other
def __ge__(self, other):
return set(self) >= other
def __repr__(self):
return repr(set(self))
def __hash__(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
for func_name, func in list(locals().items()):
if (util.callable(func) and func.__name__ == func_name and
not func.__doc__ and hasattr(set, func_name)):
func.__doc__ = getattr(set, func_name).__doc__
del func_name, func
|
mit
|
gecos-team/gecosws-config-assistant
|
gecosws_config_assistant/util/JSONUtil.py
|
1
|
1672
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# This file is part of Guadalinex
#
# This software is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this package; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = "Abraham Macias Paredes <amacias@solutia-it.es>"
__copyright__ = "Copyright (C) 2015, Junta de Andalucía" + \
"<devmaster@guadalinex.org>"
__license__ = "GPL-2"
import logging
import json
class JSONUtil(object):
'''
Utility class to manipulate JSON.
'''
def __init__(self):
'''
Constructor
'''
self.logger = logging.getLogger('JSONUtil')
def loadJSONFromFile(self, filepath):
''' Loading JSON from file '''
json_data = None
# Get data from data file
try:
gcc_control_file = open(filepath, 'r')
content = gcc_control_file.read()
gcc_control_file.close()
json_data = json.loads(content)
except Exception:
self.logger.warn('Error reading file: ' + filepath)
return json_data
|
gpl-2.0
|
Midnighter/nadist
|
docs/conf.py
|
1
|
1386
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = 'nadist'
year = '2016'
author = 'Moritz E. Beber'
copyright = '{0}, {1}'.format(year, author)
version = release = '0.1.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/midnighter/nadist/issues/%s', '#'),
'pr': ('https://github.com/midnighter/nadist/pull/%s', 'PR #'),
}
import sphinx_py3doc_enhanced_theme
html_theme = "sphinx_py3doc_enhanced_theme"
html_theme_path = [sphinx_py3doc_enhanced_theme.get_html_theme_path()]
html_theme_options = {
'githuburl': 'https://github.com/midnighter/nadist/'
}
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
|
bsd-3-clause
|
analyseuc3m/ANALYSE-v1
|
common/lib/xmodule/xmodule/seq_module.py
|
12
|
17301
|
"""
xModule implementation of a learning sequence
"""
# pylint: disable=abstract-method
import collections
import json
import logging
from pkg_resources import resource_string
import warnings
from lxml import etree
from xblock.core import XBlock
from xblock.fields import Integer, Scope, Boolean, String
from xblock.fragment import Fragment
import newrelic.agent
from .exceptions import NotFoundError
from .fields import Date
from .mako_module import MakoModuleDescriptor
from .progress import Progress
from .x_module import XModule, STUDENT_VIEW
from .xml_module import XmlDescriptor
log = logging.getLogger(__name__)
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
class_priority = ['video', 'problem']
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class SequenceFields(object):
has_children = True
# NOTE: Position is 1-indexed. This is silly, but there are now student
# positions saved on prod, so it's not easy to fix.
position = Integer(help="Last tab viewed in this sequence", scope=Scope.user_state)
due = Date(
display_name=_("Due Date"),
help=_("Enter the date by which problems are due."),
scope=Scope.settings,
)
# Entrance Exam flag -- see cms/contentstore/views/entrance_exam.py for usage
is_entrance_exam = Boolean(
display_name=_("Is Entrance Exam"),
help=_(
"Tag this course module as an Entrance Exam. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=False,
scope=Scope.settings,
)
class ProctoringFields(object):
"""
Fields that are specific to Proctored or Timed Exams
"""
is_time_limited = Boolean(
display_name=_("Is Time Limited"),
help=_(
"This setting indicates whether students have a limited time"
" to view or interact with this courseware component."
),
default=False,
scope=Scope.settings,
)
default_time_limit_minutes = Integer(
display_name=_("Time Limit in Minutes"),
help=_(
"The number of minutes available to students for viewing or interacting with this courseware component."
),
default=None,
scope=Scope.settings,
)
is_proctored_enabled = Boolean(
display_name=_("Is Proctoring Enabled"),
help=_(
"This setting indicates whether this exam is a proctored exam."
),
default=False,
scope=Scope.settings,
)
exam_review_rules = String(
display_name=_("Software Secure Review Rules"),
help=_(
"This setting indicates what rules the proctoring team should follow when viewing the videos."
),
default='',
scope=Scope.settings,
)
is_practice_exam = Boolean(
display_name=_("Is Practice Exam"),
help=_(
"This setting indicates whether this exam is for testing purposes only. Practice exams are not verified."
),
default=False,
scope=Scope.settings,
)
@property
def is_proctored_exam(self):
""" Alias the is_proctored_enabled field to the more legible is_proctored_exam """
return self.is_proctored_enabled
@is_proctored_exam.setter
def is_proctored_exam(self, value):
""" Alias the is_proctored_enabled field to the more legible is_proctored_exam """
self.is_proctored_enabled = value
@XBlock.wants('proctoring')
@XBlock.wants('credit')
@XBlock.needs("user")
@XBlock.needs("bookmarks")
class SequenceModule(SequenceFields, ProctoringFields, XModule):
"""
Layout module which lays out content in a temporal sequence
"""
js = {
'coffee': [resource_string(__name__, 'js/src/sequence/display.coffee')],
'js': [resource_string(__name__, 'js/src/sequence/display/jquery.sequence.js')],
}
css = {
'scss': [resource_string(__name__, 'css/sequence/display.scss')],
}
js_module_name = "Sequence"
def __init__(self, *args, **kwargs):
super(SequenceModule, self).__init__(*args, **kwargs)
# If position is specified in system, then use that instead.
position = getattr(self.system, 'position', None)
if position is not None:
try:
self.position = int(self.system.position)
except (ValueError, TypeError):
# Check for https://openedx.atlassian.net/browse/LMS-6496
warnings.warn(
"Sequential position cannot be converted to an integer: {pos!r}".format(
pos=self.system.position,
),
RuntimeWarning,
)
def get_progress(self):
''' Return the total progress, adding total done and total available.
(assumes that each submodule uses the same "units" for progress.)
'''
# TODO: Cache progress or children array?
children = self.get_children()
progresses = [child.get_progress() for child in children]
progress = reduce(Progress.add_counts, progresses, None)
return progress
def handle_ajax(self, dispatch, data): # TODO: bounds checking
''' get = request.POST instance '''
if dispatch == 'goto_position':
# set position to default value if either 'position' argument not
# found in request or it is a non-positive integer
position = data.get('position', u'1')
if position.isdigit() and int(position) > 0:
self.position = int(position)
else:
self.position = 1
return json.dumps({'success': True})
raise NotFoundError('Unexpected dispatch type')
def student_view(self, context):
# If we're rendering this sequence, but no position is set yet,
# default the position to the first element
if self.position is None:
self.position = 1
## Returns a set of all types of all sub-children
contents = []
fragment = Fragment()
context = context or {}
bookmarks_service = self.runtime.service(self, "bookmarks")
context["username"] = self.runtime.service(self, "user").get_current_user().opt_attrs['edx-platform.username']
parent_module = self.get_parent()
display_names = [
parent_module.display_name_with_default,
self.display_name_with_default
]
# We do this up here because proctored exam functionality could bypass
# rendering after this section.
self._capture_basic_metrics()
# Is this sequential part of a timed or proctored exam?
if self.is_time_limited:
view_html = self._time_limited_student_view(context)
# Do we have an alternate rendering
# from the edx_proctoring subsystem?
if view_html:
fragment.add_content(view_html)
return fragment
display_items = self.get_display_items()
for child in display_items:
is_bookmarked = bookmarks_service.is_bookmarked(usage_key=child.scope_ids.usage_id)
context["bookmarked"] = is_bookmarked
progress = child.get_progress()
rendered_child = child.render(STUDENT_VIEW, context)
fragment.add_frag_resources(rendered_child)
# `titles` is a list of titles to inject into the sequential tooltip display.
# We omit any blank titles to avoid blank lines in the tooltip display.
titles = [title.strip() for title in child.get_content_titles() if title.strip()]
childinfo = {
'content': rendered_child.content,
'title': "\n".join(titles),
'page_title': titles[0] if titles else '',
'progress_status': Progress.to_js_status_str(progress),
'progress_detail': Progress.to_js_detail_str(progress),
'type': child.get_icon_class(),
'id': child.scope_ids.usage_id.to_deprecated_string(),
'bookmarked': is_bookmarked,
'path': " > ".join(display_names + [child.display_name_with_default]),
}
if childinfo['title'] == '':
childinfo['title'] = child.display_name_with_default_escaped
contents.append(childinfo)
params = {
'items': contents,
'element_id': self.location.html_id(),
'item_id': self.location.to_deprecated_string(),
'position': self.position,
'tag': self.location.category,
'ajax_url': self.system.ajax_url,
}
fragment.add_content(self.system.render_template("seq_module.html", params))
self._capture_full_seq_item_metrics(display_items)
self._capture_current_unit_metrics(display_items)
# Get all descendant XBlock types and counts
return fragment
def _locations_in_subtree(self, node):
"""
The usage keys for all descendants of an XBlock/XModule as a flat list.
Includes the location of the node passed in.
"""
stack = [node]
locations = []
while stack:
curr = stack.pop()
locations.append(curr.location)
if curr.has_children:
stack.extend(curr.get_children())
return locations
def _capture_basic_metrics(self):
"""
Capture basic information about this sequence in New Relic.
"""
newrelic.agent.add_custom_parameter('seq.block_id', unicode(self.location))
newrelic.agent.add_custom_parameter('seq.display_name', self.display_name or '')
newrelic.agent.add_custom_parameter('seq.position', self.position)
newrelic.agent.add_custom_parameter('seq.is_time_limited', self.is_time_limited)
def _capture_full_seq_item_metrics(self, display_items):
"""
Capture information about the number and types of XBlock content in
the sequence as a whole. We send this information to New Relic so that
we can do better performance analysis of courseware.
"""
# Basic count of the number of Units (a.k.a. VerticalBlocks) we have in
# this learning sequence
newrelic.agent.add_custom_parameter('seq.num_units', len(display_items))
# Count of all modules (leaf nodes) in this sequence (e.g. videos,
# problems, etc.) The units (verticals) themselves are not counted.
all_item_keys = self._locations_in_subtree(self)
newrelic.agent.add_custom_parameter('seq.num_items', len(all_item_keys))
# Count of all modules by block_type (e.g. "video": 2, "discussion": 4)
block_counts = collections.Counter(usage_key.block_type for usage_key in all_item_keys)
for block_type, count in block_counts.items():
newrelic.agent.add_custom_parameter('seq.block_counts.{}'.format(block_type), count)
def _capture_current_unit_metrics(self, display_items):
"""
Capture information about the current selected Unit within the Sequence.
"""
# Positions are stored with indexing starting at 1. If we get into a
# weird state where the saved position is out of bounds (e.g. the
# content was changed), avoid going into any details about this unit.
if 1 <= self.position <= len(display_items):
# Basic info about the Unit...
current = display_items[self.position - 1]
newrelic.agent.add_custom_parameter('seq.current.block_id', unicode(current.location))
newrelic.agent.add_custom_parameter('seq.current.display_name', current.display_name or '')
# Examining all items inside the Unit (or split_test, conditional, etc.)
child_locs = self._locations_in_subtree(current)
newrelic.agent.add_custom_parameter('seq.current.num_items', len(child_locs))
curr_block_counts = collections.Counter(usage_key.block_type for usage_key in child_locs)
for block_type, count in curr_block_counts.items():
newrelic.agent.add_custom_parameter('seq.current.block_counts.{}'.format(block_type), count)
def _time_limited_student_view(self, context):
"""
Delegated rendering of a student view when in a time
limited view. This ultimately calls down into edx_proctoring
pip installed djangoapp
"""
# None = no overridden view rendering
view_html = None
proctoring_service = self.runtime.service(self, 'proctoring')
credit_service = self.runtime.service(self, 'credit')
# Is this sequence designated as a Timed Examination, which includes
# Proctored Exams
feature_enabled = (
proctoring_service and
credit_service and
self.is_time_limited
)
if feature_enabled:
user_id = self.runtime.user_id
user_role_in_course = 'staff' if self.runtime.user_is_staff else 'student'
course_id = self.runtime.course_id
content_id = self.location
context = {
'display_name': self.display_name,
'default_time_limit_mins': (
self.default_time_limit_minutes if
self.default_time_limit_minutes else 0
),
'is_practice_exam': self.is_practice_exam,
'due_date': self.due
}
# inject the user's credit requirements and fulfillments
if credit_service:
credit_state = credit_service.get_credit_state(user_id, course_id)
if credit_state:
context.update({
'credit_state': credit_state
})
# See if the edx-proctoring subsystem wants to present
# a special view to the student rather
# than the actual sequence content
#
# This will return None if there is no
# overridden view to display given the
# current state of the user
view_html = proctoring_service.get_student_view(
user_id=user_id,
course_id=course_id,
content_id=content_id,
context=context,
user_role=user_role_in_course
)
return view_html
def get_icon_class(self):
child_classes = set(child.get_icon_class()
for child in self.get_children())
new_class = 'other'
for c in class_priority:
if c in child_classes:
new_class = c
return new_class
class SequenceDescriptor(SequenceFields, ProctoringFields, MakoModuleDescriptor, XmlDescriptor):
"""
A Sequences Descriptor object
"""
mako_template = 'widgets/sequence-edit.html'
module_class = SequenceModule
show_in_read_only_mode = True
js = {
'coffee': [resource_string(__name__, 'js/src/sequence/edit.coffee')],
}
js_module_name = "SequenceDescriptor"
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
for child in xml_object:
try:
child_block = system.process_xml(etree.tostring(child, encoding='unicode'))
children.append(child_block.scope_ids.usage_id)
except Exception as e:
log.exception("Unable to load child when parsing Sequence. Continuing...")
if system.error_tracker is not None:
system.error_tracker(u"ERROR: {0}".format(e))
continue
return {}, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('sequential')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
@property
def non_editable_metadata_fields(self):
"""
`is_entrance_exam` should not be editable in the Studio settings editor.
"""
non_editable_fields = super(SequenceDescriptor, self).non_editable_metadata_fields
non_editable_fields.append(self.fields['is_entrance_exam'])
return non_editable_fields
def index_dictionary(self):
"""
Return dictionary prepared with module content and type for indexing.
"""
# return key/value fields in a Python dict object
# values may be numeric / string or dict
# default implementation is an empty dict
xblock_body = super(SequenceDescriptor, self).index_dictionary()
html_body = {
"display_name": self.display_name,
}
if "content" in xblock_body:
xblock_body["content"].update(html_body)
else:
xblock_body["content"] = html_body
xblock_body["content_type"] = "Sequence"
return xblock_body
|
agpl-3.0
|
SRabbelier/Melange
|
thirdparty/google_appengine/lib/django_1_2/django/core/cache/backends/base.py
|
46
|
4723
|
"Base Cache class."
import warnings
from django.core.exceptions import ImproperlyConfigured, DjangoRuntimeWarning
class InvalidCacheBackendError(ImproperlyConfigured):
pass
class CacheKeyWarning(DjangoRuntimeWarning):
pass
# Memcached does not accept keys longer than this.
MEMCACHE_MAX_KEY_LENGTH = 250
class BaseCache(object):
def __init__(self, params):
timeout = params.get('timeout', 300)
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.default_timeout = timeout
def add(self, key, value, timeout=None):
"""
Set a value in the cache if the key does not already exist. If
timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
Returns True if the value was stored, False otherwise.
"""
raise NotImplementedError
def get(self, key, default=None):
"""
Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
"""
raise NotImplementedError
def set(self, key, value, timeout=None):
"""
Set a value in the cache. If timeout is given, that timeout will be
used for the key; otherwise the default cache timeout will be used.
"""
raise NotImplementedError
def delete(self, key):
"""
Delete a key from the cache, failing silently.
"""
raise NotImplementedError
def get_many(self, keys):
"""
Fetch a bunch of keys from the cache. For certain backends (memcached,
pgsql) this can be *much* faster when fetching multiple values.
Returns a dict mapping each key in keys to its value. If the given
key is missing, it will be missing from the response dict.
"""
d = {}
for k in keys:
val = self.get(k)
if val is not None:
d[k] = val
return d
def has_key(self, key):
"""
Returns True if the key is in the cache and has not expired.
"""
return self.get(key) is not None
def incr(self, key, delta=1):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
if key not in self:
raise ValueError("Key '%s' not found" % key)
new_value = self.get(key) + delta
self.set(key, new_value)
return new_value
def decr(self, key, delta=1):
"""
Subtract delta from value in the cache. If the key does not exist, raise
a ValueError exception.
"""
return self.incr(key, -delta)
def __contains__(self, key):
"""
Returns True if the key is in the cache and has not expired.
"""
# This is a separate method, rather than just a copy of has_key(),
# so that it always has the same functionality as has_key(), even
# if a subclass overrides it.
return self.has_key(key)
def set_many(self, data, timeout=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. For certain backends (memcached), this is much more efficient
than calling set() multiple times.
If timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
"""
for key, value in data.items():
self.set(key, value, timeout)
def delete_many(self, keys):
"""
Set a bunch of values in the cache at once. For certain backends
(memcached), this is much more efficient than calling delete() multiple
times.
"""
for key in keys:
self.delete(key)
def clear(self):
"""Remove *all* values from the cache at once."""
raise NotImplementedError
def validate_key(self, key):
"""
Warn about keys that would not be portable to the memcached
backend. This encourages (but does not force) writing backend-portable
cache code.
"""
if len(key) > MEMCACHE_MAX_KEY_LENGTH:
warnings.warn('Cache key will cause errors if used with memcached: '
'%s (longer than %s)' % (key, MEMCACHE_MAX_KEY_LENGTH),
CacheKeyWarning)
for char in key:
if ord(char) < 33 or ord(char) == 127:
warnings.warn('Cache key contains characters that will cause '
'errors if used with memcached: %r' % key,
CacheKeyWarning)
|
apache-2.0
|
edxnercel/edx-platform
|
lms/lib/courseware_search/test/test_lms_search_initializer.py
|
23
|
5404
|
"""
Tests for the lms_search_initializer
"""
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
from xmodule.modulestore.django import modulestore
from courseware.tests.factories import UserFactory
from courseware.tests.test_masquerade import StaffMasqueradeTestCase
from courseware.masquerade import handle_ajax
from lms.lib.courseware_search.lms_search_initializer import LmsSearchInitializer
from lms.lib.courseware_search.lms_filter_generator import LmsSearchFilterGenerator
class LmsSearchInitializerTestCase(StaffMasqueradeTestCase):
""" Test case class to test search initializer """
def build_course(self):
"""
Build up a course tree with an html control
"""
self.global_staff = UserFactory(is_staff=True)
self.course = CourseFactory.create(
org='Elasticsearch',
course='ES101',
run='test_run',
display_name='Elasticsearch test course',
)
self.section = ItemFactory.create(
parent=self.course,
category='chapter',
display_name='Test Section',
)
self.subsection = ItemFactory.create(
parent=self.section,
category='sequential',
display_name='Test Subsection',
)
self.vertical = ItemFactory.create(
parent=self.subsection,
category='vertical',
display_name='Test Unit',
)
self.html = ItemFactory.create(
parent=self.vertical,
category='html',
display_name='Test Html control 1',
)
self.html = ItemFactory.create(
parent=self.vertical,
category='html',
display_name='Test Html control 2',
)
def setUp(self):
super(LmsSearchInitializerTestCase, self).setUp()
self.build_course()
self.user_partition = UserPartition(
id=0,
name='Test User Partition',
description='',
groups=[Group(0, 'Group 1'), Group(1, 'Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(self.user_partition)
modulestore().update_item(self.course, self.global_staff.id) # pylint: disable=no-member
def test_staff_masquerading_added_to_group(self):
"""
Tests that initializer sets masquerading for a staff user in a group.
"""
# Verify that there is no masquerading group initially
_, filter_directory, _ = LmsSearchFilterGenerator.generate_field_filters( # pylint: disable=unused-variable
user=self.global_staff,
course_id=unicode(self.course.id)
)
self.assertIsNone(filter_directory['content_groups'])
# Install a masquerading group
request = self._create_mock_json_request(
self.global_staff,
body='{"role": "student", "user_partition_id": 0, "group_id": 1}'
)
handle_ajax(request, unicode(self.course.id))
# Call initializer
LmsSearchInitializer.set_search_enviroment(
request=request,
course_id=unicode(self.course.id)
)
# Verify that there is masquerading group after masquerade
_, filter_directory, _ = LmsSearchFilterGenerator.generate_field_filters( # pylint: disable=unused-variable
user=self.global_staff,
course_id=unicode(self.course.id)
)
self.assertEqual(filter_directory['content_groups'], [unicode(1)])
def test_staff_masquerading_as_a_staff_user(self):
"""
Tests that initializer sets masquerading for a staff user as staff.
"""
# Install a masquerading group
request = self._create_mock_json_request(
self.global_staff,
body='{"role": "staff"}'
)
handle_ajax(request, unicode(self.course.id))
# Call initializer
LmsSearchInitializer.set_search_enviroment(
request=request,
course_id=unicode(self.course.id)
)
# Verify that there is masquerading group after masquerade
_, filter_directory, _ = LmsSearchFilterGenerator.generate_field_filters( # pylint: disable=unused-variable
user=self.global_staff,
course_id=unicode(self.course.id)
)
self.assertNotIn('content_groups', filter_directory)
def test_staff_masquerading_as_a_student_user(self):
"""
Tests that initializer sets masquerading for a staff user as student.
"""
# Install a masquerading group
request = self._create_mock_json_request(
self.global_staff,
body='{"role": "student"}'
)
handle_ajax(request, unicode(self.course.id))
# Call initializer
LmsSearchInitializer.set_search_enviroment(
request=request,
course_id=unicode(self.course.id)
)
# Verify that there is masquerading group after masquerade
_, filter_directory, _ = LmsSearchFilterGenerator.generate_field_filters( # pylint: disable=unused-variable
user=self.global_staff,
course_id=unicode(self.course.id)
)
self.assertEqual(filter_directory['content_groups'], None)
|
agpl-3.0
|
marc-sensenich/ansible
|
lib/ansible/modules/storage/netapp/na_elementsw_network_interfaces.py
|
8
|
10980
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
'''
Element Software Node Network Interfaces - Bond 1G and 10G configuration
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_elementsw_network_interfaces
short_description: NetApp Element Software Configure Node Network Interfaces
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.7'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Configure Element SW Node Network Interfaces for Bond 1G and 10G IP address.
options:
method:
description:
- Type of Method used to configure the interface.
- method depends on other settings such as the use of a static IP address, which will change the method to static.
- loopback - Used to define the IPv4 loopback interface.
- manual - Used to define interfaces for which no configuration is done by default.
- dhcp - May be used to obtain an IP address via DHCP.
- static - Used to define Ethernet interfaces with statically allocated IPv4 addresses.
choices: ['loopback', 'manual', 'dhcp', 'static']
required: true
ip_address_1g:
description:
- IP address for the 1G network.
required: true
ip_address_10g:
description:
- IP address for the 10G network.
required: true
subnet_1g:
description:
- 1GbE Subnet Mask.
required: true
subnet_10g:
description:
- 10GbE Subnet Mask.
required: true
gateway_address_1g:
description:
- Router network address to send packets out of the local network.
required: true
gateway_address_10g:
description:
- Router network address to send packets out of the local network.
required: true
mtu_1g:
description:
- Maximum Transmission Unit for 1GbE, Largest packet size that a network protocol can transmit.
- Must be greater than or equal to 1500 bytes.
default: '1500'
mtu_10g:
description:
- Maximum Transmission Unit for 10GbE, Largest packet size that a network protocol can transmit.
- Must be greater than or equal to 1500 bytes.
default: '1500'
dns_nameservers:
description:
- List of addresses for domain name servers.
dns_search_domains:
description:
- List of DNS search domains.
bond_mode_1g:
description:
- Bond mode for 1GbE configuration.
choices: ['ActivePassive', 'ALB', 'LACP']
default: 'ActivePassive'
bond_mode_10g:
description:
- Bond mode for 10GbE configuration.
choices: ['ActivePassive', 'ALB', 'LACP']
default: 'ActivePassive'
lacp_1g:
description:
- Link Aggregation Control Protocol useful only if LACP is selected as the Bond Mode.
- Slow - Packets are transmitted at 30 second intervals.
- Fast - Packets are transmitted in 1 second intervals.
choices: ['Fast', 'Slow']
default: 'Slow'
lacp_10g:
description:
- Link Aggregation Control Protocol useful only if LACP is selected as the Bond Mode.
- Slow - Packets are transmitted at 30 second intervals.
- Fast - Packets are transmitted in 1 second intervals.
choices: ['Fast', 'Slow']
default: 'Slow'
virtual_network_tag:
description:
- This is the primary network tag. All nodes in a cluster have the same VLAN tag.
'''
EXAMPLES = """
- name: Set Node network interfaces configuration for Bond 1G and 10G properties
tags:
- elementsw_network_interfaces
na_elementsw_network_interfaces:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
method: static
ip_address_1g: 10.226.109.68
ip_address_10g: 10.226.201.72
subnet_1g: 255.255.255.0
subnet_10g: 255.255.255.0
gateway_address_1g: 10.193.139.1
gateway_address_10g: 10.193.140.1
mtu_1g: 1500
mtu_10g: 9000
bond_mode_1g: ActivePassive
bond_mode_10g: LACP
lacp_10g: Fast
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
try:
from solidfire.models import Network, NetworkConfig
HAS_SF_SDK = True
except Exception:
HAS_SF_SDK = False
class ElementSWNetworkInterfaces(object):
"""
Element Software Network Interfaces - Bond 1G and 10G Network configuration
"""
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
method=dict(required=True, type='str', choices=['loopback', 'manual', 'dhcp', 'static']),
ip_address_1g=dict(required=True, type='str'),
ip_address_10g=dict(required=True, type='str'),
subnet_1g=dict(required=True, type='str'),
subnet_10g=dict(required=True, type='str'),
gateway_address_1g=dict(required=True, type='str'),
gateway_address_10g=dict(required=True, type='str'),
mtu_1g=dict(required=False, type='str', default='1500'),
mtu_10g=dict(required=False, type='str', default='1500'),
dns_nameservers=dict(required=False, type=list),
dns_search_domains=dict(required=False, type=list),
bond_mode_1g=dict(required=False, type='str', choices=['ActivePassive', 'ALB', 'LACP'], default='ActivePassive'),
bond_mode_10g=dict(required=False, type='str', choices=['ActivePassive', 'ALB', 'LACP'], default='ActivePassive'),
lacp_1g=dict(required=False, type='str', choices=['Fast', 'Slow'], default='Slow'),
lacp_10g=dict(required=False, type='str', choices=['Fast', 'Slow'], default='Slow'),
virtual_network_tag=dict(required=False, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
input_params = self.module.params
self.method = input_params['method']
self.ip_address_1g = input_params['ip_address_1g']
self.ip_address_10g = input_params['ip_address_10g']
self.subnet_1g = input_params['subnet_1g']
self.subnet_10g = input_params['subnet_10g']
self.gateway_address_1g = input_params['gateway_address_1g']
self.gateway_address_10g = input_params['gateway_address_10g']
self.mtu_1g = input_params['mtu_1g']
self.mtu_10g = input_params['mtu_10g']
self.dns_nameservers = input_params['dns_nameservers']
self.dns_search_domains = input_params['dns_search_domains']
self.bond_mode_1g = input_params['bond_mode_1g']
self.bond_mode_10g = input_params['bond_mode_10g']
self.lacp_1g = input_params['lacp_1g']
self.lacp_10g = input_params['lacp_10g']
self.virtual_network_tag = input_params['virtual_network_tag']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module, port=442)
def set_network_config(self):
"""
set network configuration
"""
try:
self.sfe.set_network_config(network=self.network_object)
except Exception as exception_object:
self.module.fail_json(msg='Error network setting for node %s' % (to_native(exception_object)),
exception=traceback.format_exc())
def get_network_params_object(self):
"""
Get Element SW Network object
:description: get Network object
:return: NetworkConfig object
:rtype: object(NetworkConfig object)
"""
try:
bond_1g_network = NetworkConfig(method=self.method,
address=self.ip_address_1g,
netmask=self.subnet_1g,
gateway=self.gateway_address_1g,
mtu=self.mtu_1g,
dns_nameservers=self.dns_nameservers,
dns_search=self.dns_search_domains,
bond_mode=self.bond_mode_1g,
bond_lacp_rate=self.lacp_1g,
virtual_network_tag=self.virtual_network_tag)
bond_10g_network = NetworkConfig(method=self.method,
address=self.ip_address_10g,
netmask=self.subnet_10g,
gateway=self.gateway_address_10g,
mtu=self.mtu_10g,
dns_nameservers=self.dns_nameservers,
dns_search=self.dns_search_domains,
bond_mode=self.bond_mode_10g,
bond_lacp_rate=self.lacp_10g,
virtual_network_tag=self.virtual_network_tag)
network_object = Network(bond1_g=bond_1g_network,
bond10_g=bond_10g_network)
return network_object
except Exception as e:
self.module.fail_json(msg='Error with setting up network object for node 1G and 10G configuration : %s' % to_native(e),
exception=to_native(e))
def apply(self):
"""
Check connection and initialize node with cluster ownership
"""
changed = False
result_message = None
self.network_object = self.get_network_params_object()
if self.network_object is not None:
self.set_network_config()
changed = True
else:
result_message = "Skipping changes, No change requested"
self.module.exit_json(changed=changed, msg=result_message)
def main():
"""
Main function
"""
elementsw_network_interfaces = ElementSWNetworkInterfaces()
elementsw_network_interfaces.apply()
if __name__ == '__main__':
main()
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.