text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# (c) 2014, Maciej Delmanowski <drybjed@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from functools import partial
import types
from ansible.module_utils import six
try:
import netaddr
except ImportError:
# in this case, we'll make the filters return error messages (see bottom)
netaddr = None
else:
class mac_linux(netaddr.mac_unix):
pass
mac_linux.word_fmt = '%.2x'
from ansible import errors
# ---- IP address and network query helpers ----
def _empty_ipaddr_query(v, vtype):
# We don't have any query to process, so just check what type the user
# expects, and return the IP address in a correct format
if v:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
def _first_last(v):
if v.size == 2:
first_usable = int(netaddr.IPAddress(v.first))
last_usable = int(netaddr.IPAddress(v.last))
return first_usable, last_usable
elif v.size > 1:
first_usable = int(netaddr.IPAddress(v.first + 1))
last_usable = int(netaddr.IPAddress(v.last - 1))
return first_usable, last_usable
def _6to4_query(v, vtype, value):
if v.version == 4:
if v.size == 1:
ipconv = str(v.ip)
elif v.size > 1:
if v.ip != v.network:
ipconv = str(v.ip)
else:
ipconv = False
if ipaddr(ipconv, 'public'):
numbers = list(map(int, ipconv.split('.')))
try:
return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers)
except Exception:
return False
elif v.version == 6:
if vtype == 'address':
if ipaddr(str(v), '2002::/16'):
return value
elif vtype == 'network':
if v.ip != v.network:
if ipaddr(str(v.ip), '2002::/16'):
return value
else:
return False
def _ip_query(v):
if v.size == 1:
return str(v.ip)
if v.size > 1:
# /31 networks in netaddr have no broadcast address
if v.ip != v.network or not v.broadcast:
return str(v.ip)
def _gateway_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _address_prefix_query(v):
if v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _bool_ipaddr_query(v):
if v:
return True
def _broadcast_query(v):
if v.size > 2:
return str(v.broadcast)
def _cidr_query(v):
return str(v)
def _cidr_lookup_query(v, iplist, value):
try:
if v in iplist:
return value
except Exception:
return False
def _first_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size == 2:
return str(netaddr.IPAddress(int(v.network)))
elif v.size > 1:
return str(netaddr.IPAddress(int(v.network) + 1))
def _host_query(v):
if v.size == 1:
return str(v)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _hostmask_query(v):
return str(v.hostmask)
def _int_query(v, vtype):
if vtype == 'address':
return int(v.ip)
elif vtype == 'network':
return str(int(v.ip)) + '/' + str(int(v.prefixlen))
def _ip_prefix_query(v):
if v.size == 2:
return str(v.ip) + '/' + str(v.prefixlen)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + '/' + str(v.prefixlen)
def _ip_netmask_query(v):
if v.size == 2:
return str(v.ip) + ' ' + str(v.netmask)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + ' ' + str(v.netmask)
'''
def _ip_wildcard_query(v):
if v.size == 2:
return str(v.ip) + ' ' + str(v.hostmask)
elif v.size > 1:
if v.ip != v.network:
return str(v.ip) + ' ' + str(v.hostmask)
'''
def _ipv4_query(v, value):
if v.version == 6:
try:
return str(v.ipv4())
except Exception:
return False
else:
return value
def _ipv6_query(v, value):
if v.version == 4:
return str(v.ipv6())
else:
return value
def _last_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
return str(netaddr.IPAddress(last_usable))
def _link_local_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v.version == 4:
if ipaddr(str(v_ip), '169.254.0.0/24'):
return value
elif v.version == 6:
if ipaddr(str(v_ip), 'fe80::/10'):
return value
def _loopback_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if v_ip.is_loopback():
return value
def _multicast_query(v, value):
if v.is_multicast():
return value
def _net_query(v):
if v.size > 1:
if v.ip == v.network:
return str(v.network) + '/' + str(v.prefixlen)
def _netmask_query(v):
return str(v.netmask)
def _network_query(v):
'''Return the network of a given IP or subnet'''
return str(v.network)
def _network_id_query(v):
'''Return the network of a given IP or subnet'''
return str(v.network)
def _network_netmask_query(v):
return str(v.network) + ' ' + str(v.netmask)
def _network_wildcard_query(v):
return str(v.network) + ' ' + str(v.hostmask)
def _next_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
next_ip = int(netaddr.IPAddress(int(v.ip) + 1))
if next_ip >= first_usable and next_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) + 1))
def _prefix_query(v):
return int(v.prefixlen)
def _previous_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
previous_ip = int(netaddr.IPAddress(int(v.ip) - 1))
if previous_ip >= first_usable and previous_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) - 1))
def _private_query(v, value):
if v.is_private():
return value
def _public_query(v, value):
v_ip = netaddr.IPAddress(str(v.ip))
if (v_ip.is_unicast() and not v_ip.is_private() and
not v_ip.is_loopback() and not v_ip.is_netmask() and
not v_ip.is_hostmask()):
return value
def _range_usable_query(v, vtype):
if vtype == 'address':
"Does it make sense to raise an error"
raise errors.AnsibleFilterError('Not a network address')
elif vtype == 'network':
if v.size > 1:
first_usable, last_usable = _first_last(v)
first_usable = str(netaddr.IPAddress(first_usable))
last_usable = str(netaddr.IPAddress(last_usable))
return "{0}-{1}".format(first_usable, last_usable)
def _revdns_query(v):
v_ip = netaddr.IPAddress(str(v.ip))
return v_ip.reverse_dns
def _size_query(v):
return v.size
def _size_usable_query(v):
if v.size == 1:
return 0
elif v.size == 2:
return 2
return v.size - 2
def _subnet_query(v):
return str(v.cidr)
def _type_query(v):
if v.size == 1:
return 'address'
if v.size > 1:
if v.ip != v.network:
return 'address'
else:
return 'network'
def _unicast_query(v, value):
if v.is_unicast():
return value
def _version_query(v):
return v.version
def _wrap_query(v, vtype, value):
if v.version == 6:
if vtype == 'address':
return '[' + str(v.ip) + ']'
elif vtype == 'network':
return '[' + str(v.ip) + ']/' + str(v.prefixlen)
else:
return value
# ---- HWaddr query helpers ----
def _bare_query(v):
v.dialect = netaddr.mac_bare
return str(v)
def _bool_hwaddr_query(v):
if v:
return True
def _int_hwaddr_query(v):
return int(v)
def _cisco_query(v):
v.dialect = netaddr.mac_cisco
return str(v)
def _empty_hwaddr_query(v, value):
if v:
return value
def _linux_query(v):
v.dialect = mac_linux
return str(v)
def _postgresql_query(v):
v.dialect = netaddr.mac_pgsql
return str(v)
def _unix_query(v):
v.dialect = netaddr.mac_unix
return str(v)
def _win_query(v):
v.dialect = netaddr.mac_eui48
return str(v)
# ---- IP address and network filters ----
# Returns a minified list of subnets or a single subnet that spans all of
# the inputs.
def cidr_merge(value, action='merge'):
if not hasattr(value, '__iter__'):
raise errors.AnsibleFilterError('cidr_merge: expected iterable, got ' + repr(value))
if action == 'merge':
try:
return [str(ip) for ip in netaddr.cidr_merge(value)]
except Exception as e:
raise errors.AnsibleFilterError('cidr_merge: error in netaddr:\n%s' % e)
elif action == 'span':
# spanning_cidr needs at least two values
if len(value) == 0:
return None
elif len(value) == 1:
try:
return str(netaddr.IPNetwork(value[0]))
except Exception as e:
raise errors.AnsibleFilterError('cidr_merge: error in netaddr:\n%s' % e)
else:
try:
return str(netaddr.spanning_cidr(value))
except Exception as e:
raise errors.AnsibleFilterError('cidr_merge: error in netaddr:\n%s' % e)
else:
raise errors.AnsibleFilterError("cidr_merge: invalid action '%s'" % action)
def ipaddr(value, query='', version=False, alias='ipaddr'):
''' Check if string is an IP address or network and filter it '''
query_func_extra_args = {
'': ('vtype',),
'6to4': ('vtype', 'value'),
'cidr_lookup': ('iplist', 'value'),
'first_usable': ('vtype',),
'int': ('vtype',),
'ipv4': ('value',),
'ipv6': ('value',),
'last_usable': ('vtype',),
'link-local': ('value',),
'loopback': ('value',),
'lo': ('value',),
'multicast': ('value',),
'next_usable': ('vtype',),
'previous_usable': ('vtype',),
'private': ('value',),
'public': ('value',),
'unicast': ('value',),
'range_usable': ('vtype',),
'wrap': ('vtype', 'value'),
}
query_func_map = {
'': _empty_ipaddr_query,
'6to4': _6to4_query,
'address': _ip_query,
'address/prefix': _address_prefix_query, # deprecate
'bool': _bool_ipaddr_query,
'broadcast': _broadcast_query,
'cidr': _cidr_query,
'cidr_lookup': _cidr_lookup_query,
'first_usable': _first_usable_query,
'gateway': _gateway_query, # deprecate
'gw': _gateway_query, # deprecate
'host': _host_query,
'host/prefix': _address_prefix_query, # deprecate
'hostmask': _hostmask_query,
'hostnet': _gateway_query, # deprecate
'int': _int_query,
'ip': _ip_query,
'ip/prefix': _ip_prefix_query,
'ip_netmask': _ip_netmask_query,
# 'ip_wildcard': _ip_wildcard_query, built then could not think of use case
'ipv4': _ipv4_query,
'ipv6': _ipv6_query,
'last_usable': _last_usable_query,
'link-local': _link_local_query,
'lo': _loopback_query,
'loopback': _loopback_query,
'multicast': _multicast_query,
'net': _net_query,
'next_usable': _next_usable_query,
'netmask': _netmask_query,
'network': _network_query,
'network_id': _network_id_query,
'network/prefix': _subnet_query,
'network_netmask': _network_netmask_query,
'network_wildcard': _network_wildcard_query,
'prefix': _prefix_query,
'previous_usable': _previous_usable_query,
'private': _private_query,
'public': _public_query,
'range_usable': _range_usable_query,
'revdns': _revdns_query,
'router': _gateway_query, # deprecate
'size': _size_query,
'size_usable': _size_usable_query,
'subnet': _subnet_query,
'type': _type_query,
'unicast': _unicast_query,
'v4': _ipv4_query,
'v6': _ipv6_query,
'version': _version_query,
'wildcard': _hostmask_query,
'wrap': _wrap_query,
}
vtype = None
if not value:
return False
elif value is True:
return False
# Check if value is a list and parse each element
elif isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, str(query), version):
_ret.append(ipaddr(element, str(query), version))
if _ret:
return _ret
else:
return list()
# Check if value is a number and convert it to an IP address
elif str(value).isdigit():
# We don't know what IP version to assume, so let's check IPv4 first,
# then IPv6
try:
if ((not version) or (version and version == 4)):
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = int(value)
v.prefixlen = 32
elif version and version == 6:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# IPv4 didn't work the first time, so it definitely has to be IPv6
except Exception:
try:
v = netaddr.IPNetwork('::/0')
v.value = int(value)
v.prefixlen = 128
# The value is too big for IPv6. Are you a nanobot?
except Exception:
return False
# We got an IP address, let's mark it as such
value = str(v)
vtype = 'address'
# value has not been recognized, check if it's a valid IP string
else:
try:
v = netaddr.IPNetwork(value)
# value is a valid IP string, check if user specified
# CIDR prefix or just an IP address, this will indicate default
# output format
try:
address, prefix = value.split('/')
vtype = 'network'
except Exception:
vtype = 'address'
# value hasn't been recognized, maybe it's a numerical CIDR?
except Exception:
try:
address, prefix = value.split('/')
address.isdigit()
address = int(address)
prefix.isdigit()
prefix = int(prefix)
# It's not numerical CIDR, give up
except Exception:
return False
# It is something, so let's try and build a CIDR from the parts
try:
v = netaddr.IPNetwork('0.0.0.0/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv4 CIDR
except Exception:
try:
v = netaddr.IPNetwork('::/0')
v.value = address
v.prefixlen = prefix
# It's not a valid IPv6 CIDR. Give up.
except Exception:
return False
# We have a valid CIDR, so let's write it in correct format
value = str(v)
vtype = 'network'
# We have a query string but it's not in the known query types. Check if
# that string is a valid subnet, if so, we can check later if given IP
# address/network is inside that specific subnet
try:
# ?? 6to4 and link-local were True here before. Should they still?
if query and (query not in query_func_map or query == 'cidr_lookup') and ipaddr(query, 'network'):
iplist = netaddr.IPSet([netaddr.IPNetwork(query)])
query = 'cidr_lookup'
except Exception:
pass
# This code checks if value maches the IP version the user wants, ie. if
# it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()")
# If version does not match, return False
if version and v.version != version:
return False
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
try:
float(query)
if v.size == 1:
if vtype == 'address':
return str(v.ip)
elif vtype == 'network':
return str(v)
elif v.size > 1:
try:
return str(v[query]) + '/' + str(v.prefixlen)
except Exception:
return False
else:
return value
except Exception:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def ipmath(value, amount):
try:
if '/' in value:
ip = netaddr.IPNetwork(value).ip
else:
ip = netaddr.IPAddress(value)
except (netaddr.AddrFormatError, ValueError):
msg = 'You must pass a valid IP address; {0} is invalid'.format(value)
raise errors.AnsibleFilterError(msg)
if not isinstance(amount, int):
msg = (
'You must pass an integer for arithmetic; '
'{0} is not a valid integer'
).format(amount)
raise errors.AnsibleFilterError(msg)
return str(ip + amount)
def ipwrap(value, query=''):
try:
if isinstance(value, (list, tuple, types.GeneratorType)):
_ret = []
for element in value:
if ipaddr(element, query, version=False, alias='ipwrap'):
_ret.append(ipaddr(element, 'wrap'))
else:
_ret.append(element)
return _ret
else:
_ret = ipaddr(value, query, version=False, alias='ipwrap')
if _ret:
return ipaddr(_ret, 'wrap')
else:
return value
except Exception:
return value
def ipv4(value, query=''):
return ipaddr(value, query, version=4, alias='ipv4')
def ipv6(value, query=''):
return ipaddr(value, query, version=6, alias='ipv6')
# Split given subnet into smaller subnets or find out the biggest subnet of
# a given IP address with given CIDR prefix
# Usage:
#
# - address or address/prefix | ipsubnet
# returns CIDR subnet of a given input
#
# - address/prefix | ipsubnet(cidr)
# returns number of possible subnets for given CIDR prefix
#
# - address/prefix | ipsubnet(cidr, index)
# returns new subnet with given CIDR prefix
#
# - address | ipsubnet(cidr)
# returns biggest subnet with given CIDR prefix that address belongs to
#
# - address | ipsubnet(cidr, index)
# returns next indexed subnet which contains given address
#
# - address/prefix | ipsubnet(subnet/prefix)
# return the index of the subnet in the subnet
def ipsubnet(value, query='', index='x'):
''' Manipulate IPv4/IPv6 subnets '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except Exception:
return False
query_string = str(query)
if not query:
return str(value)
elif query_string.isdigit():
vsize = ipaddr(v, 'size')
query = int(query)
try:
float(index)
index = int(index)
if vsize > 1:
try:
return str(list(value.subnet(query))[index])
except Exception:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[index])
except Exception:
return False
except Exception:
if vsize > 1:
try:
return str(len(list(value.subnet(query))))
except Exception:
return False
elif vsize == 1:
try:
return str(value.supernet(query)[0])
except Exception:
return False
elif query_string:
vtype = ipaddr(query, 'type')
if vtype == 'address':
v = ipaddr(query, 'cidr')
elif vtype == 'network':
v = ipaddr(query, 'subnet')
else:
msg = 'You must pass a valid subnet or IP address; {0} is invalid'.format(query_string)
raise errors.AnsibleFilterError(msg)
query = netaddr.IPNetwork(v)
for i, subnet in enumerate(query.subnet(value.prefixlen), 1):
if subnet == value:
return str(i)
msg = '{0} is not in the subnet {1}'.format(value.cidr, query.cidr)
raise errors.AnsibleFilterError(msg)
return False
# Returns the nth host within a network described by value.
# Usage:
#
# - address or address/prefix | nthhost(nth)
# returns the nth host within the given network
def nthhost(value, query=''):
''' Get the nth host within a given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
value = netaddr.IPNetwork(v)
except Exception:
return False
if not query:
return False
try:
nth = int(query)
if value.size > nth:
return value[nth]
except ValueError:
return False
return False
# Returns the next nth usable ip within a network described by value.
def next_nth_usable(value, offset):
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
v = netaddr.IPNetwork(v)
except Exception:
return False
if type(offset) != int:
raise errors.AnsibleFilterError('Must pass in an integer')
if v.size > 1:
first_usable, last_usable = _first_last(v)
nth_ip = int(netaddr.IPAddress(int(v.ip) + offset))
if nth_ip >= first_usable and nth_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) + offset))
# Returns the previous nth usable ip within a network described by value.
def previous_nth_usable(value, offset):
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
v = netaddr.IPNetwork(v)
except Exception:
return False
if type(offset) != int:
raise errors.AnsibleFilterError('Must pass in an integer')
if v.size > 1:
first_usable, last_usable = _first_last(v)
nth_ip = int(netaddr.IPAddress(int(v.ip) - offset))
if nth_ip >= first_usable and nth_ip <= last_usable:
return str(netaddr.IPAddress(int(v.ip) - offset))
def _range_checker(ip_check, first, last):
'''
Tests whether an ip address is within the bounds of the first and last address.
:param ip_check: The ip to test if it is within first and last.
:param first: The first IP in the range to test against.
:param last: The last IP in the range to test against.
:return: bool
'''
if ip_check >= first and ip_check <= last:
return True
else:
return False
def _address_normalizer(value):
'''
Used to validate an address or network type and return it in a consistent format.
This is being used for future use cases not currently available such as an address range.
:param value: The string representation of an address or network.
:return: The address or network in the normalized form.
'''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address' or vtype == "network":
v = ipaddr(value, 'subnet')
except Exception:
return False
return v
def network_in_usable(value, test):
'''
Checks whether 'test' is a useable address or addresses in 'value'
:param: value: The string representation of an address or network to test against.
:param test: The string representation of an address or network to validate if it is within the range of 'value'.
:return: bool
'''
# normalize value and test variables into an ipaddr
v = _address_normalizer(value)
w = _address_normalizer(test)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
v_first = ipaddr(ipaddr(v, 'first_usable') or ipaddr(v, 'address'), 'int')
v_last = ipaddr(ipaddr(v, 'last_usable') or ipaddr(v, 'address'), 'int')
w_first = ipaddr(ipaddr(w, 'network') or ipaddr(w, 'address'), 'int')
w_last = ipaddr(ipaddr(w, 'broadcast') or ipaddr(w, 'address'), 'int')
if _range_checker(w_first, v_first, v_last) and _range_checker(w_last, v_first, v_last):
return True
else:
return False
def network_in_network(value, test):
'''
Checks whether the 'test' address or addresses are in 'value', including broadcast and network
:param: value: The network address or range to test against.
:param test: The address or network to validate if it is within the range of 'value'.
:return: bool
'''
# normalize value and test variables into an ipaddr
v = _address_normalizer(value)
w = _address_normalizer(test)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
v_first = ipaddr(ipaddr(v, 'network') or ipaddr(v, 'address'), 'int')
v_last = ipaddr(ipaddr(v, 'broadcast') or ipaddr(v, 'address'), 'int')
w_first = ipaddr(ipaddr(w, 'network') or ipaddr(w, 'address'), 'int')
w_last = ipaddr(ipaddr(w, 'broadcast') or ipaddr(w, 'address'), 'int')
if _range_checker(w_first, v_first, v_last) and _range_checker(w_last, v_first, v_last):
return True
else:
return False
def reduce_on_network(value, network):
'''
Reduces a list of addresses to only the addresses that match a given network.
:param: value: The list of addresses to filter on.
:param: network: The network to validate against.
:return: The reduced list of addresses.
'''
# normalize network variable into an ipaddr
n = _address_normalizer(network)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
n_first = ipaddr(ipaddr(n, 'network') or ipaddr(n, 'address'), 'int')
n_last = ipaddr(ipaddr(n, 'broadcast') or ipaddr(n, 'address'), 'int')
# create an empty list to fill and return
r = []
for address in value:
# normalize address variables into an ipaddr
a = _address_normalizer(address)
# get first and last addresses as integers to compare value and test; or cathes value when case is /32
a_first = ipaddr(ipaddr(a, 'network') or ipaddr(a, 'address'), 'int')
a_last = ipaddr(ipaddr(a, 'broadcast') or ipaddr(a, 'address'), 'int')
if _range_checker(a_first, n_first, n_last) and _range_checker(a_last, n_first, n_last):
r.append(address)
return r
# Returns the SLAAC address within a network for a given HW/MAC address.
# Usage:
#
# - prefix | slaac(mac)
def slaac(value, query=''):
''' Get the SLAAC address within given network '''
try:
vtype = ipaddr(value, 'type')
if vtype == 'address':
v = ipaddr(value, 'cidr')
elif vtype == 'network':
v = ipaddr(value, 'subnet')
if ipaddr(value, 'version') != 6:
return False
value = netaddr.IPNetwork(v)
except Exception:
return False
if not query:
return False
try:
mac = hwaddr(query, alias='slaac')
eui = netaddr.EUI(mac)
except Exception:
return False
return eui.ipv6(value.network)
# ---- HWaddr / MAC address filters ----
def hwaddr(value, query='', alias='hwaddr'):
''' Check if string is a HW/MAC address and filter it '''
query_func_extra_args = {
'': ('value',),
}
query_func_map = {
'': _empty_hwaddr_query,
'bare': _bare_query,
'bool': _bool_hwaddr_query,
'int': _int_hwaddr_query,
'cisco': _cisco_query,
'eui48': _win_query,
'linux': _linux_query,
'pgsql': _postgresql_query,
'postgresql': _postgresql_query,
'psql': _postgresql_query,
'unix': _unix_query,
'win': _win_query,
}
try:
v = netaddr.EUI(value)
except Exception:
if query and query != 'bool':
raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value)
extras = []
for arg in query_func_extra_args.get(query, tuple()):
extras.append(locals()[arg])
try:
return query_func_map[query](v, *extras)
except KeyError:
raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
return False
def macaddr(value, query=''):
return hwaddr(value, query, alias='macaddr')
def _need_netaddr(f_name, *args, **kwargs):
raise errors.AnsibleFilterError("The %s filter requires python's netaddr be "
"installed on the ansible controller" % f_name)
def ip4_hex(arg, delimiter=''):
''' Convert an IPv4 address to Hexadecimal notation '''
numbers = list(map(int, arg.split('.')))
return '{0:02x}{sep}{1:02x}{sep}{2:02x}{sep}{3:02x}'.format(*numbers, sep=delimiter)
# ---- Ansible filters ----
class FilterModule(object):
''' IP address and network manipulation filters '''
filter_map = {
# IP addresses and networks
'cidr_merge': cidr_merge,
'ipaddr': ipaddr,
'ipmath': ipmath,
'ipwrap': ipwrap,
'ip4_hex': ip4_hex,
'ipv4': ipv4,
'ipv6': ipv6,
'ipsubnet': ipsubnet,
'next_nth_usable': next_nth_usable,
'network_in_network': network_in_network,
'network_in_usable': network_in_usable,
'reduce_on_network': reduce_on_network,
'nthhost': nthhost,
'previous_nth_usable': previous_nth_usable,
'slaac': slaac,
# MAC / HW addresses
'hwaddr': hwaddr,
'macaddr': macaddr
}
def filters(self):
if netaddr:
return self.filter_map
else:
# Need to install python's netaddr for these filters to work
return dict((f, partial(_need_netaddr, f)) for f in self.filter_map)
|
t794104/ansible
|
lib/ansible/plugins/filter/ipaddr.py
|
Python
|
gpl-3.0
| 32,458 | 0.000924 |
#!/usr/bin/env python
# Copyright (C) 2011 Aaron Lindsay <aaron@aclindsay.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from time import sleep
import atexit
import logging
from signal import SIGTERM
pid_file = "" #holds the name of file holding our pid
def daemonize(pid_filename, daemon_fn):
"""Daemonize the current process, store the new pid in pid_filename, and
call daemon_fn() to continue execution."""
global pid_file
pid_file = pid_filename
try:
#fork off a process, kill the parent
if os.fork() > 0:
os._exit(0)
except:
logging.error("Failed to fork new process.")
os._exit(0)
os.chdir("/")
os.setsid() #start a new session, with this as the session leader
os.umask(0) #reset file creation mask
#fork again
try:
if os.fork() > 0:
os._exit(0)
except:
logging.error("Failed to fork new process.")
os._exit(0)
#flush all terminal 'files' and redirect them to /dev/null
sys.stdout.flush()
sys.stderr.flush()
null = os.open('/dev/null', os.O_RDWR)
os.dup2(null, sys.stdin.fileno())
os.dup2(null, sys.stdout.fileno())
os.dup2(null, sys.stderr.fileno())
os.close(null)
#store our current pid in the given pidfile
atexit.register(rm_pid_file) #delete pid file when current process exits
pid = os.getpid()
try:
with open(pid_file,'w') as f:
f.write(str(pid))
f.close()
except:
logging.error("Failed to create pid file at %s" %
(pid_filename))
os._exit(0)
#run the function with "real work" in it
daemon_fn()
def rm_pid_file():
global pid_file
os.remove(pid_file)
def aengelize(pid_filename):
"""Make the daemonized process represented by the given filename 'go to
heaven'."""
try:
with open(pid_filename,'r') as f:
pid = int(f.read().strip())
f.close()
except:
logging.error("Failed to open pid file at %s. Process already exited?"
% (pid_filename))
sys.exit(0)
#kill process
try:
#try to kill process for 11 seconds
for i in range(0,110):
os.kill(pid, SIGTERM)
sleep(0.1)
logging.error("Failed to stop process")
except OSError, err:
if str(err).find("No such process") <= 0:
logging.error("Failed to stop process")
sys.exit(1)
|
aclindsa/asink-python
|
src/shared/daemon.py
|
Python
|
gpl-2.0
| 3,138 | 0.007648 |
import os
import codecs
try:
from setuptools import (setup, find_packages)
except ImportError:
from distutils.core import (setup, find_packages)
VERSION = (0, 2, 0)
__version__ = '.'.join(map(str, VERSION[:3])) + "".join(VERSION[3:])
__package_name__ = 'pelican-readtime'
__description__ = 'Plugin for Pelican that computes average read time.'
__contact_names__ = 'David Jenkins, Deepak Bhalla, Jonathan Dektiar'
__contact_emails__ = 'djenkinsdev@gmail.com, contact@deepakrb.com, contact@jonathandekhtiar.eu'
__homepage__ = 'https://github.com/JenkinsDev/pelican-readtime'
__repository_url__ = 'https://github.com/JenkinsDev/pelican-readtime'
__download_url__ = 'https://github.com/JenkinsDev/pelican-readtime'
__docformat__ = 'markdown'
__license__ = 'MIT'
__keywords__ = 'pelican blogging blog static webdevelopment plugin pelican-plugin readtime python python3 python2'
here = os.path.abspath(os.path.dirname(__file__))
if os.path.exists('README.rst'):
# codec is used for consistent encoding
long_description = codecs.open(
os.path.join(here, 'README.rst'), 'r', 'utf-8').read()
else:
long_description = 'See ' + __homepage__
setup(
name=__package_name__,
version=__version__,
description=__description__,
long_description=long_description,
url=__repository_url__,
download_url=__download_url__,
license='MIT',
author=__contact_names__,
author_email=__contact_emails__,
maintainer=__contact_names__,
maintainer_email=__contact_emails__,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords=__keywords__,
packages=[''],
install_requires=['pelican>=3.6'],
zip_safe=True,
include_package_data=True
)
|
JenkinsDev/pelican-readtime
|
setup.py
|
Python
|
mit
| 2,230 | 0.000897 |
# -*- coding: utf-8 -*-
from django.core.exceptions import ValidationError
from django.forms.util import flatatt, ErrorDict, ErrorList
from django.test import TestCase
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy
class FormsUtilTestCase(TestCase):
# Tests for forms/util.py module.
def test_flatatt(self):
###########
# flatatt #
###########
self.assertEqual(flatatt({'id': "header"}), u' id="header"')
self.assertEqual(flatatt({'class': "news", 'title': "Read this"}), u' class="news" title="Read this"')
self.assertEqual(flatatt({}), u'')
def test_validation_error(self):
###################
# ValidationError #
###################
# Can take a string.
self.assertHTMLEqual(str(ErrorList(ValidationError("There was an error.").messages)),
'<ul class="errorlist"><li>There was an error.</li></ul>')
# Can take a unicode string.
self.assertHTMLEqual(unicode(ErrorList(ValidationError(u"Not \u03C0.").messages)),
u'<ul class="errorlist"><li>Not π.</li></ul>')
# Can take a lazy string.
self.assertHTMLEqual(str(ErrorList(ValidationError(ugettext_lazy("Error.")).messages)),
'<ul class="errorlist"><li>Error.</li></ul>')
# Can take a list.
self.assertHTMLEqual(str(ErrorList(ValidationError(["Error one.", "Error two."]).messages)),
'<ul class="errorlist"><li>Error one.</li><li>Error two.</li></ul>')
# Can take a mixture in a list.
self.assertHTMLEqual(str(ErrorList(ValidationError(["First error.", u"Not \u03C0.", ugettext_lazy("Error.")]).messages)),
'<ul class="errorlist"><li>First error.</li><li>Not π.</li><li>Error.</li></ul>')
class VeryBadError:
def __unicode__(self): return u"A very bad error."
# Can take a non-string.
self.assertHTMLEqual(str(ErrorList(ValidationError(VeryBadError()).messages)),
'<ul class="errorlist"><li>A very bad error.</li></ul>')
# Escapes non-safe input but not input marked safe.
example = 'Example of link: <a href="http://www.example.com/">example</a>'
self.assertHTMLEqual(str(ErrorList([example])),
'<ul class="errorlist"><li>Example of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorList([mark_safe(example)])),
'<ul class="errorlist"><li>Example of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorDict({'name': example})),
'<ul class="errorlist"><li>nameExample of link: <a href="http://www.example.com/">example</a></li></ul>')
self.assertHTMLEqual(str(ErrorDict({'name': mark_safe(example)})),
'<ul class="errorlist"><li>nameExample of link: <a href="http://www.example.com/">example</a></li></ul>')
|
LethusTI/supportcenter
|
vendor/django/tests/regressiontests/forms/tests/util.py
|
Python
|
gpl-3.0
| 3,154 | 0.008566 |
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.signals import user_logged_out
from django.dispatch import receiver
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from repanier.auth_backend import RepanierAuthBackend
@login_required()
@csrf_protect
@never_cache
def logout_view(request):
"""
Logs out the user and displays 'You are logged out' message.
"""
logout(request)
# pages-root is the django cms root page.
# pages-root may be replaced by login_form to go to the login form instead of the home page
# The reverse may be replaced by "/" to also go to the home page
return HttpResponseRedirect(reverse("pages-root"))
@receiver(user_logged_out)
def receiver_user_logged_out(sender, request, user, **kwargs):
RepanierAuthBackend.remove_staff_right(user=user)
|
pcolmant/repanier
|
repanier/views/logout_view.py
|
Python
|
gpl-3.0
| 1,016 | 0.000984 |
#!/usr/bin/env python
#------------------------------------------------------------
# Script which demonstrates how to find the best-fit
# parameters of a Voigt line-shape model
#
# Vog, 26 Mar 2012
#------------------------------------------------------------
import numpy
from matplotlib.pyplot import figure, show, rc
from scipy.special import wofz
from kapteyn import kmpfit
ln2 = numpy.log(2)
def voigt(x, y):
# The Voigt function is also the real part of
# w(z) = exp(-z^2) erfc(iz), the complex probability function,
# which is also known as the Faddeeva function. Scipy has
# implemented this function under the name wofz()
z = x + 1j*y
I = wofz(z).real
return I
def Voigt(nu, alphaD, alphaL, nu_0, A, a=0, b=0):
# The Voigt line shape in terms of its physical parameters
f = numpy.sqrt(ln2)
x = (nu-nu_0)/alphaD * f
y = alphaL/alphaD * f
backg = a + b*nu
V = A*f/(alphaD*numpy.sqrt(numpy.pi)) * voigt(x, y) + backg
return V
def funcV(p, x):
# Compose the Voigt line-shape
alphaD, alphaL, nu_0, I, a, b = p
return Voigt(x, alphaD, alphaL, nu_0, I, a, b)
def funcG(p, x):
# Model function is a gaussian
A, mu, sigma, zerolev = p
return( A * numpy.exp(-(x-mu)*(x-mu)/(2*sigma*sigma)) + zerolev )
def residualsV(p, data):
# Return weighted residuals of Voigt
x, y, err = data
return (y-funcV(p,x)) / err
def residualsG(p, data):
# Return weighted residuals of Gauss
x, y, err = data
return (y-funcG(p,x)) / err
# Data from simulated MUSE cube
x = numpy.array([854.05,854.18,854.31,854.44,854.57,854.7,854.83,854.96,\
855.09,855.22,855.35,855.48,855.61,855.74,855.87,856.0,\
856.13,856.26,856.39,856.52,856.65,856.78,856.91])
y = numpy.array([6.31683382764,6.41273839772,6.43047296256,6.37437933311,\
6.34883451462,6.30711287633,6.24409954622,6.09241716936,\
5.75421549752,5.20381929725,4.18020502292,3.64663145132,\
4.25251198746,5.23945118487,5.76701752096,6.06587703526,\
6.15751018003,6.25985588506,6.35063433647,6.41795488447,\
6.42002335563,6.35883554071,6.36915982142])
N = len(y)
err = numpy.ones(N)
A = -2
alphaD = 0.5
alphaL = 0.5
a = 6
b = 0
nu_0 = 855
p0 = [alphaD, alphaL, nu_0, A, a, b]
# Do the fit
fitter = kmpfit.Fitter(residuals=residualsV, data=(x,y,err))
fitter.parinfo = [{}, {}, {}, {}, {}, {'fixed':True}] # Take zero level fixed in fit
fitter.fit(params0=p0)
print("\n========= Fit results Voigt profile ==========")
print("Initial params:", fitter.params0)
print("Params: ", fitter.params)
print("Iterations: ", fitter.niter)
print("Function ev: ", fitter.nfev)
print("Uncertainties: ", fitter.xerror)
print("dof: ", fitter.dof)
print("chi^2, rchi2: ", fitter.chi2_min, fitter.rchi2_min)
print("stderr: ", fitter.stderr)
print("Status: ", fitter.status)
alphaD, alphaL, nu_0, I, a_back, b_back = fitter.params
c1 = 1.0692
c2 = 0.86639
hwhm = 0.5*(c1*alphaL+numpy.sqrt(c2*alphaL**2+4*alphaD**2))
print("\nFWHM Voigt profile: ", 2*hwhm)
f = numpy.sqrt(ln2)
Y = alphaL/alphaD * f
amp = I/alphaD*numpy.sqrt(ln2/numpy.pi)*voigt(0,Y)
print("Amplitude Voigt profile:", amp)
print("Area under profile: ", I)
# Fit the Gaussian model
p0 = [-3, 855, 0.5, 6.3]
fitterG = kmpfit.Fitter(residuals=residualsG, data=(x,y,err))
#fitterG.parinfo = [{}, {}, {}, {}, {}] # Take zero level fixed in fit
fitterG.fit(params0=p0)
print("\n========= Fit results Gaussian profile ==========")
print("Initial params:", fitterG.params0)
print("Params: ", fitterG.params)
print("Iterations: ", fitterG.niter)
print("Function ev: ", fitterG.nfev)
print("Uncertainties: ", fitterG.xerror)
print("dof: ", fitterG.dof)
print("chi^2, rchi2: ", fitterG.chi2_min, fitterG.rchi2_min)
print("stderr: ", fitterG.stderr)
print("Status: ", fitterG.status)
fwhmG = 2*numpy.sqrt(2*numpy.log(2))*fitterG.params[2]
print("FWHM Gaussian: ", fwhmG)
# Plot the result
rc('legend', fontsize=6)
fig = figure()
frame1 = fig.add_subplot(1,1,1)
xd = numpy.linspace(x.min(), x.max(), 200)
frame1.plot(x, y, 'bo', label="data")
label = "Model with Voigt function"
frame1.plot(xd, funcV(fitter.params,xd), 'g', label=label)
label = "Model with Gaussian function"
frame1.plot(xd, funcG(fitterG.params,xd), 'm', ls='--', label=label)
offset = a_back+b_back*nu_0
frame1.plot((nu_0-hwhm,nu_0+hwhm), (offset+amp/2,offset+amp/2), 'r', label='fwhm')
frame1.plot(xd, a_back+b_back*xd, "y", label='Background')
frame1.set_xlabel("$\\nu$")
frame1.set_ylabel("$\\phi(\\nu)$")
vals = (fitter.chi2_min, fitter.rchi2_min, fitter.dof)
title = "Profile data with Voigt- vs. Gaussian model"
frame1.set_title(title, y=1.05)
frame1.grid(True)
leg = frame1.legend(loc=3)
show()
|
kapteyn-astro/kapteyn
|
doc/source/EXAMPLES/kmpfit_voigt.py
|
Python
|
bsd-3-clause
| 4,841 | 0.021483 |
from django.core import cache
from django.core.exceptions import MiddlewareNotUsed
from versionedcache.debug import CacheClass
class CacheDebugMiddleware(object):
def __init__(self):
if not isinstance(cache.cache, CacheClass):
raise MiddlewareNotUsed()
def process_request(self, request):
if request.user.is_superuser and 'cache_debug' in request.GET:
action = request.GET['cache_debug']
# only two actions allowed
if action not in ('turn_off', 'write_only'):
return
# implement action
getattr(cache.cache, action)()
|
ella/django-versionedcache
|
versionedcache/middleware.py
|
Python
|
bsd-3-clause
| 647 | 0.004637 |
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fuel_agent import errors
from fuel_agent.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class Parted(object):
def __init__(self, name, label):
self.name = name
self.label = label
self.partitions = []
self.install_bootloader = False
def add_partition(self, **kwargs):
# TODO(kozhukalov): validate before appending
# calculating partition name based on device name and partition count
kwargs['name'] = self.next_name()
kwargs['count'] = self.next_count()
kwargs['device'] = self.name
# if begin is given use its value else use end of last partition
kwargs['begin'] = kwargs.get('begin', self.next_begin())
# if end is given use its value else
# try to calculate it based on size kwarg or
# raise KeyError
# (kwargs.pop['size'] will raise error if size is not set)
kwargs['end'] = kwargs.get('end') or \
kwargs['begin'] + kwargs.pop('size')
# if partition_type is given use its value else
# try to calculate it automatically
kwargs['partition_type'] = \
kwargs.get('partition_type', self.next_type())
partition = Partition(**kwargs)
self.partitions.append(partition)
return partition
@property
def logical(self):
return filter(lambda x: x.type == 'logical', self.partitions)
@property
def primary(self):
return filter(lambda x: x.type == 'primary', self.partitions)
@property
def extended(self):
found = filter(lambda x: x.type == 'extended', self.partitions)
if found:
return found[0]
def next_type(self):
if self.label == 'gpt':
return 'primary'
elif self.label == 'msdos':
if self.extended:
return 'logical'
elif len(self.partitions) < 3 and not self.extended:
return 'primary'
elif len(self.partitions) == 3 and not self.extended:
return 'extended'
#NOTE(agordeev): how to reach that condition?
else:
return 'logical'
def next_count(self, next_type=None):
next_type = next_type or self.next_type()
if next_type == 'logical':
return len(self.logical) + 5
return len(self.partitions) + 1
def next_begin(self):
if not self.partitions:
return 1
if self.partitions[-1] == self.extended:
return self.partitions[-1].begin
return self.partitions[-1].end
def next_name(self):
if self.next_type() == 'extended':
return None
separator = ''
if 'cciss' in self.name or 'loop' in self.name:
separator = 'p'
return '%s%s%s' % (self.name, separator, self.next_count())
class Partition(object):
def __init__(self, name, count, device, begin, end, partition_type,
flags=None, guid=None, configdrive=False):
self.name = name
self.count = count
self.device = device
self.name = name
self.begin = begin
self.end = end
self.type = partition_type
self.flags = flags or []
self.guid = guid
self.configdrive = configdrive
def set_flag(self, flag):
if flag not in self.flags:
self.flags.append(flag)
def set_guid(self, guid):
self.guid = guid
class Pv(object):
def __init__(self, name, metadatasize=16, metadatacopies=2):
self.name = name
self.metadatasize = metadatasize
self.metadatacopies = metadatacopies
class Vg(object):
def __init__(self, name, pvnames=None):
self.name = name
self.pvnames = pvnames or []
def add_pv(self, pvname):
if pvname not in self.pvnames:
self.pvnames.append(pvname)
class Lv(object):
def __init__(self, name, vgname, size):
self.name = name
self.vgname = vgname
self.size = size
@property
def device_name(self):
return '/dev/mapper/%s-%s' % (self.vgname.replace('-', '--'),
self.name.replace('-', '--'))
class Md(object):
def __init__(self, name, level,
devices=None, spares=None):
self.name = name
self.level = level
self.devices = devices or []
self.spares = spares or []
def add_device(self, device):
if device in self.devices or device in self.spares:
raise errors.MDDeviceDuplicationError(
'Error while attaching device to md: '
'device %s is already attached' % device)
self.devices.append(device)
def add_spare(self, device):
if device in self.devices or device in self.spares:
raise errors.MDDeviceDuplicationError(
'Error while attaching device to md: '
'device %s is already attached' % device)
self.spares.append(device)
class Fs(object):
def __init__(self, device, mount=None,
fs_type=None, fs_options=None, fs_label=None):
self.device = device
self.mount = mount
self.type = fs_type or 'xfs'
self.options = fs_options or ''
self.label = fs_label or ''
class PartitionScheme(object):
def __init__(self):
self.parteds = []
self.mds = []
self.pvs = []
self.vgs = []
self.lvs = []
self.fss = []
self.kernel_params = ''
def add_parted(self, **kwargs):
parted = Parted(**kwargs)
self.parteds.append(parted)
return parted
def add_pv(self, **kwargs):
pv = Pv(**kwargs)
self.pvs.append(pv)
return pv
def add_vg(self, **kwargs):
vg = Vg(**kwargs)
self.vgs.append(vg)
return vg
def add_lv(self, **kwargs):
lv = Lv(**kwargs)
self.lvs.append(lv)
return lv
def add_fs(self, **kwargs):
fs = Fs(**kwargs)
self.fss.append(fs)
return fs
def add_md(self, **kwargs):
mdkwargs = {}
mdkwargs['name'] = kwargs.get('name') or self.md_next_name()
mdkwargs['level'] = kwargs.get('level') or 'mirror'
md = Md(**mdkwargs)
self.mds.append(md)
return md
def md_by_name(self, name):
found = filter(lambda x: x.name == name, self.mds)
if found:
return found[0]
def md_by_mount(self, mount):
fs = self.fs_by_mount(mount)
if fs:
return self.md_by_name(fs.device)
def md_attach_by_mount(self, device, mount, spare=False, **kwargs):
md = self.md_by_mount(mount)
if not md:
md = self.add_md(**kwargs)
fskwargs = {}
fskwargs['device'] = md.name
fskwargs['mount'] = mount
fskwargs['fs_type'] = kwargs.pop('fs_type', None)
fskwargs['fs_options'] = kwargs.pop('fs_options', None)
fskwargs['fs_label'] = kwargs.pop('fs_label', None)
self.add_fs(**fskwargs)
md.add_spare(device) if spare else md.add_device(device)
return md
def md_next_name(self):
count = 0
while True:
name = '/dev/md%s' % count
if name not in [md.name for md in self.mds]:
return name
if count >= 127:
raise errors.MDAlreadyExistsError(
'Error while generating md name: '
'names from /dev/md0 to /dev/md127 seem to be busy, '
'try to generate md name manually')
count += 1
def vg_by_name(self, vgname):
found = filter(lambda x: (x.name == vgname), self.vgs)
if found:
return found[0]
def pv_by_name(self, pvname):
found = filter(lambda x: (x.name == pvname), self.pvs)
if found:
return found[0]
def vg_attach_by_name(self, pvname, vgname,
metadatasize=16, metadatacopies=2):
vg = self.vg_by_name(vgname) or self.add_vg(name=vgname)
pv = self.pv_by_name(pvname) or self.add_pv(
name=pvname, metadatasize=metadatasize,
metadatacopies=metadatacopies)
vg.add_pv(pv.name)
def fs_by_mount(self, mount):
found = filter(lambda x: (x.mount and x.mount == mount), self.fss)
if found:
return found[0]
def fs_by_device(self, device):
found = filter(lambda x: x.device == device, self.fss)
if found:
return found[0]
def lv_by_device_name(self, device_name):
found = filter(lambda x: x.device_name == device_name, self.lvs)
if found:
return found[0]
def root_device(self):
fs = self.fs_by_mount('/')
if not fs:
raise errors.WrongPartitionSchemeError(
'Error while trying to find root device: '
'root file system not found')
return fs.device
def boot_device(self, grub_version=2):
# We assume /boot is a separate partition. If it is not
# then we try to use root file system
boot_fs = self.fs_by_mount('/boot') or self.fs_by_mount('/')
if not boot_fs:
raise errors.WrongPartitionSchemeError(
'Error while trying to find boot device: '
'boot file system not fount, '
'it must be a separate mount point')
if grub_version == 1:
# Legacy GRUB has a limitation. It is not able to mount MD devices.
# If it is MD compatible it is only able to ignore MD metadata
# and to mount one of those devices which are parts of MD device,
# but it is possible only if MD device is a MIRROR.
md = self.md_by_name(boot_fs.device)
if md:
try:
return md.devices[0]
except IndexError:
raise errors.WrongPartitionSchemeError(
'Error while trying to find boot device: '
'md device %s does not have devices attached' %
md.name)
# Legacy GRUB is not able to mount LVM devices.
if self.lv_by_device_name(boot_fs.device):
raise errors.WrongPartitionSchemeError(
'Error while trying to find boot device: '
'found device is %s but legacy grub is not able to '
'mount logical volumes' %
boot_fs.device)
return boot_fs.device
def configdrive_device(self):
# Configdrive device must be a small (about 10M) partition
# on one of node hard drives. This partition is necessary
# only if one uses cloud-init with configdrive.
for parted in self.parteds:
for prt in parted.partitions:
if prt.configdrive:
return prt.name
def append_kernel_params(self, kernel_params):
self.kernel_params += ' ' + kernel_params
|
zhaochao/fuel-web
|
fuel_agent/fuel_agent/objects/partition.py
|
Python
|
apache-2.0
| 11,745 | 0.000085 |
# -*- coding: utf-8 -*-
from sqlalchemy.ext.declarative import declarative_base
from zope.sqlalchemy import ZopeTransactionExtension
from sqlalchemy.orm import scoped_session, sessionmaker
DeclarativeBase = declarative_base()
maker = sessionmaker(autoflush=True, autocommit=False,
extension=ZopeTransactionExtension())
DBSession = scoped_session(maker)
metadata = DeclarativeBase.metadata
def init_model(engine1 ):
"""Call me before using any of the tables or classes in the model."""
DBSession.configure(bind=engine1)
metadata.bind = engine1
from .logsurvey import LogSurvey
|
tongpa/tgext.pylogservice
|
tgext/pylogservice/models/__init__.py
|
Python
|
mit
| 623 | 0.008026 |
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Utility functions for Cyclades Tests
Cyclades require a lot helper functions and `common'
had grown too much.
"""
import time
import IPy
import base64
import socket
import random
import paramiko
import tempfile
import subprocess
from kamaki.clients import ClientError
from synnefo_tools.burnin.common import BurninTests, MB, GB, QADD, QREMOVE, \
QDISK, QVM, QRAM, QIP, QCPU, QNET
# pylint: disable=too-many-public-methods
class CycladesTests(BurninTests):
"""Extends the BurninTests class for Cyclades"""
def _parse_images(self):
"""Find images given to command line"""
if self.images is None:
self.info("No --images given. Will use the default %s",
"^Debian Base$")
filters = ["name:^Debian Base$"]
else:
filters = self.images
avail_images = self._find_images(filters)
self.info("Found %s images to choose from", len(avail_images))
return avail_images
def _parse_flavors(self):
"""Find flavors given to command line"""
flavors = self._get_list_of_flavors(detail=True)
if self.flavors is None:
self.info("No --flavors given. Will use all of them")
avail_flavors = flavors
else:
avail_flavors = self._find_flavors(self.flavors, flavors=flavors)
self.info("Found %s flavors to choose from", len(avail_flavors))
return avail_flavors
def _try_until_timeout_expires(self, opmsg, check_fun):
"""Try to perform an action until timeout expires"""
assert callable(check_fun), "Not a function"
action_timeout = self.action_timeout
action_warning = self.action_warning
if action_warning > action_timeout:
action_warning = action_timeout
start_time = int(time.time())
end_time = start_time + action_warning
while end_time > time.time():
try:
ret_value = check_fun()
self.info("Operation `%s' finished in %s seconds",
opmsg, int(time.time()) - start_time)
return ret_value
except Retry:
time.sleep(self.query_interval)
self.warning("Operation `%s' is taking too long after %s seconds",
opmsg, int(time.time()) - start_time)
end_time = start_time + action_timeout
while end_time > time.time():
try:
ret_value = check_fun()
self.info("Operation `%s' finished in %s seconds",
opmsg, int(time.time()) - start_time)
return ret_value
except Retry:
time.sleep(self.query_interval)
self.error("Operation `%s' timed out after %s seconds",
opmsg, int(time.time()) - start_time)
self.fail("time out")
def _try_once(self, opmsg, check_fun, should_fail=False):
"""Try to perform an action once"""
assert callable(check_fun), "Not a function"
ret_value = None
failed = False
try:
ret_value = check_fun()
except Retry:
failed = True
if failed and not should_fail:
self.error("Operation `%s' failed", opmsg)
elif not failed and should_fail:
self.error("Operation `%s' should have failed", opmsg)
else:
return ret_value
def _get_list_of_servers(self, detail=False):
"""Get (detailed) list of servers"""
if detail:
self.info("Getting detailed list of servers")
else:
self.info("Getting simple list of servers")
return self.clients.cyclades.list_servers(detail=detail)
def _get_list_of_networks(self, detail=False):
"""Get (detailed) list of networks"""
if detail:
self.info("Getting detailed list of networks")
else:
self.info("Getting simple list of networks")
return self.clients.network.list_networks(detail=detail)
def _get_server_details(self, server, quiet=False):
"""Get details for a server"""
if not quiet:
self.info("Getting details for server %s with id %s",
server['name'], server['id'])
return self.clients.cyclades.get_server_details(server['id'])
# pylint: disable=too-many-arguments
def _create_server(self, image, flavor, personality=None,
network=False, project_id=None):
"""Create a new server"""
if network:
fip = self._create_floating_ip(project_id=project_id)
port = self._create_port(fip['floating_network_id'],
floating_ip=fip)
networks = [{'port': port['id']}]
else:
networks = None
name = image.get('name', image.get('display_name', ''))
servername = "%s for %s" % (self.run_id, name)
self.info("Creating a server with name %s", servername)
self.info("Using image %s with id %s", name, image['id'])
self.info("Using flavor %s with id %s", flavor['name'], flavor['id'])
server = self.clients.cyclades.create_server(
servername, flavor['id'], image['id'],
personality=personality, networks=networks,
project_id=project_id)
self.info("Server id: %s", server['id'])
self.info("Server password: %s", server['adminPass'])
self.assertEqual(server['name'], servername)
self.assertEqual(server['flavor']['id'], flavor['id'])
self.assertEqual(server['image']['id'], image['id'])
self.assertEqual(server['status'], "BUILD")
if project_id is None:
project_id = self._get_uuid()
self.assertEqual(server['tenant_id'], project_id)
# Verify quotas
changes = \
{project_id:
[(QDISK, QADD, flavor['disk'], GB),
(QVM, QADD, 1, None),
(QRAM, QADD, flavor['ram'], MB),
(QCPU, QADD, flavor['vcpus'], None)]}
self._check_quotas(changes)
return server
def _delete_servers(self, servers, error=False):
"""Deleting a number of servers in parallel"""
# Disconnect floating IPs
if not error:
# If there is the possibility for the machine to be in
# ERROR state we cannot delete its ports.
for srv in servers:
self.info(
"Disconnecting all floating IPs from server with id %s",
srv['id'])
self._disconnect_from_network(srv)
# Delete servers
for srv in servers:
self.info("Sending the delete request for server with id %s",
srv['id'])
self.clients.cyclades.delete_server(srv['id'])
if error:
curr_states = ["ACTIVE", "ERROR", "STOPPED", "BUILD"]
else:
curr_states = ["ACTIVE"]
for srv in servers:
self._insist_on_server_transition(srv, curr_states, "DELETED")
# Servers no longer in server list
new_servers = [s['id'] for s in self._get_list_of_servers()]
for srv in servers:
self.info("Verifying that server with id %s is no longer in "
"server list", srv['id'])
self.assertNotIn(srv['id'], new_servers)
# Verify quotas
self._verify_quotas_deleted(servers)
def _verify_quotas_deleted(self, servers):
"""Verify quotas for a number of deleted servers"""
changes = dict()
for server in servers:
project = server['tenant_id']
if project not in changes:
changes[project] = []
flavor = \
self.clients.compute.get_flavor_details(server['flavor']['id'])
new_changes = [
(QDISK, QREMOVE, flavor['disk'], GB),
(QVM, QREMOVE, 1, None),
(QRAM, QREMOVE, flavor['ram'], MB),
(QCPU, QREMOVE, flavor['vcpus'], None)]
changes[project].extend(new_changes)
self._check_quotas(changes)
def _get_connection_username(self, server):
"""Determine the username to use to connect to the server"""
users = server['metadata'].get("users", None)
ret_user = None
if users is not None:
user_list = users.split()
if "root" in user_list:
ret_user = "root"
else:
ret_user = random.choice(user_list)
else:
# Return the login name for connections based on the server OS
self.info("Could not find `users' metadata in server. Let's guess")
os_value = server['metadata'].get("os")
if os_value in ("Ubuntu", "Kubuntu", "Fedora"):
ret_user = "user"
elif os_value in ("windows", "windows_alpha1"):
ret_user = "Administrator"
else:
ret_user = "root"
self.assertIsNotNone(ret_user)
self.info("User's login name: %s", ret_user)
return ret_user
def _insist_on_server_transition(self, server, curr_statuses, new_status):
"""Insist on server transiting from curr_statuses to new_status"""
def check_fun():
"""Check server status"""
srv = self._get_server_details(server, quiet=True)
if srv['status'] in curr_statuses:
raise Retry()
elif srv['status'] == new_status:
return
else:
msg = "Server \"%s\" with id %s went to unexpected status %s"
self.error(msg, server['name'], server['id'], srv['status'])
self.fail(msg % (server['name'], server['id'], srv['status']))
opmsg = "Waiting for server \"%s\" with id %s to become %s"
self.info(opmsg, server['name'], server['id'], new_status)
opmsg = opmsg % (server['name'], server['id'], new_status)
self._try_until_timeout_expires(opmsg, check_fun)
def _insist_on_snapshot_transition(self, snapshot,
curr_statuses, new_status):
"""Insist on snapshot transiting from curr_statuses to new_status"""
def check_fun():
"""Check snapstho status"""
snap = \
self.clients.block_storage.get_snapshot_details(snapshot['id'])
if snap['status'] in curr_statuses:
raise Retry()
elif snap['status'] == new_status:
return
else:
msg = "Snapshot \"%s\" with id %s went to unexpected status %s"
self.error(msg, snapshot['display_name'],
snapshot['id'], snap['status'])
opmsg = "Waiting for snapshot \"%s\" with id %s to become %s"
self.info(opmsg, snapshot['display_name'], snapshot['id'], new_status)
opmsg = opmsg % (snapshot['display_name'], snapshot['id'], new_status)
self._try_until_timeout_expires(opmsg, check_fun)
def _insist_on_snapshot_deletion(self, snapshot_id):
"""Insist on snapshot deletion"""
def check_fun():
"""Check snapshot details"""
try:
self.clients.block_storage.get_snapshot_details(snapshot_id)
except ClientError as err:
if err.status != 404:
raise
else:
raise Retry()
opmsg = "Waiting for snapshot %s to be deleted"
self.info(opmsg, snapshot_id)
opmsg = opmsg % snapshot_id
self._try_until_timeout_expires(opmsg, check_fun)
def _insist_on_network_transition(self, network,
curr_statuses, new_status):
"""Insist on network transiting from curr_statuses to new_status"""
def check_fun():
"""Check network status"""
ntw = self.clients.network.get_network_details(network['id'])
if ntw['status'] in curr_statuses:
raise Retry()
elif ntw['status'] == new_status:
return
else:
msg = "Network %s with id %s went to unexpected status %s"
self.error(msg, network['name'], network['id'], ntw['status'])
self.fail(msg %
(network['name'], network['id'], ntw['status']))
opmsg = "Waiting for network \"%s\" with id %s to become %s"
self.info(opmsg, network['name'], network['id'], new_status)
opmsg = opmsg % (network['name'], network['id'], new_status)
self._try_until_timeout_expires(opmsg, check_fun)
def _insist_on_tcp_connection(self, family, host, port):
"""Insist on tcp connection"""
def check_fun():
"""Get a connected socket from the specified family to host:port"""
sock = None
for res in socket.getaddrinfo(host, port, family,
socket.SOCK_STREAM, 0,
socket.AI_PASSIVE):
fam, socktype, proto, _, saddr = res
try:
sock = socket.socket(fam, socktype, proto)
except socket.error:
sock = None
continue
try:
sock.connect(saddr)
except socket.error:
sock.close()
sock = None
continue
if sock is None:
raise Retry
return sock
familystr = {socket.AF_INET: "IPv4", socket.AF_INET6: "IPv6",
socket.AF_UNSPEC: "Unspecified-IPv4/6"}
opmsg = "Connecting over %s to %s:%s"
self.info(opmsg, familystr.get(family, "Unknown"), host, port)
opmsg = opmsg % (familystr.get(family, "Unknown"), host, port)
return self._try_until_timeout_expires(opmsg, check_fun)
def _get_ips(self, server, version=4, network=None):
"""Get the IPs of a server from the detailed server info
If network not given then get the public IPs. Else the IPs
attached to that network
"""
assert version in (4, 6)
nics = server['attachments']
addrs = []
for nic in nics:
net_id = nic['network_id']
if network is None:
if self.clients.network.get_network_details(net_id)['public']:
if nic['ipv' + str(version)]:
addrs.append(nic['ipv' + str(version)])
else:
if net_id == network['id']:
if nic['ipv' + str(version)]:
addrs.append(nic['ipv' + str(version)])
self.assertGreater(len(addrs), 0,
"Can not get IPs from server attachments")
for addr in addrs:
self.assertEqual(IPy.IP(addr).version(), version)
if network is None:
msg = "Server's public IPv%s is %s"
for addr in addrs:
self.info(msg, version, addr)
else:
msg = "Server's IPv%s attached to network \"%s\" is %s"
for addr in addrs:
self.info(msg, version, network['id'], addr)
return addrs
def _insist_on_ping(self, ip_addr, version=4, should_fail=False):
"""Test server responds to a single IPv4 of IPv6 ping"""
def check_fun():
"""Ping to server"""
cmd = ("ping%s -c 3 -w 20 %s" %
("6" if version == 6 else "", ip_addr))
ping = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
ping.communicate()
ret = ping.wait()
if ret != 0:
raise Retry
assert version in (4, 6)
opmsg = "Sent IPv%s ping requests to %s"
self.info(opmsg, version, ip_addr)
opmsg = opmsg % (version, ip_addr)
if should_fail:
self._try_once(opmsg, check_fun, should_fail=True)
else:
self._try_until_timeout_expires(opmsg, check_fun)
def _image_is(self, image, osfamily):
"""Return true if the image is of `osfamily'"""
d_image = self.clients.cyclades.get_image_details(image['id'])
return d_image['metadata']['osfamily'].lower().find(osfamily) >= 0
# pylint: disable=no-self-use
def _ssh_execute(self, hostip, username, password, command):
"""Execute a command via ssh"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(hostip, username=username, password=password)
except (paramiko.SSHException, socket.error) as err:
self.warning("%s", err.message)
raise Retry()
_, stdout, _ = ssh.exec_command(command)
status = stdout.channel.recv_exit_status()
output = stdout.readlines()
ssh.close()
return output, status
def _insist_get_hostname_over_ssh(self, hostip, username, password):
"""Connect to server using ssh and get it's hostname"""
def check_fun():
"""Get hostname"""
try:
lines, status = self._ssh_execute(
hostip, username, password, "hostname")
self.assertEqual(status, 0)
self.assertEqual(len(lines), 1)
# Remove new line
return lines[0].strip('\n')
except AssertionError:
raise Retry()
opmsg = "Connecting to server using ssh and get it's hostname"
self.info(opmsg)
hostname = self._try_until_timeout_expires(opmsg, check_fun)
self.info("Server's hostname is %s", hostname)
return hostname
# pylint: disable=too-many-arguments
def _check_file_through_ssh(self, hostip, username, password,
remotepath, content):
"""Fetch file from server and compare contents"""
def check_fun():
"""Fetch file"""
try:
transport = paramiko.Transport((hostip, 22))
transport.connect(username=username, password=password)
with tempfile.NamedTemporaryFile() as ftmp:
sftp = paramiko.SFTPClient.from_transport(transport)
sftp.get(remotepath, ftmp.name)
sftp.close()
transport.close()
self.info("Comparing file contents")
remote_content = base64.b64encode(ftmp.read())
self.assertEqual(content, remote_content)
except paramiko.SSHException as err:
self.warning("%s", err.message)
raise Retry()
opmsg = "Fetching file %s from remote server" % remotepath
self.info(opmsg)
self._try_until_timeout_expires(opmsg, check_fun)
# ----------------------------------
# Networks
def _create_network(self, cidr="10.0.1.0/28", dhcp=True,
project_id=None):
"""Create a new private network"""
name = self.run_id
network = self.clients.network.create_network(
"MAC_FILTERED", name=name, shared=False,
project_id=project_id)
self.info("Network with id %s created", network['id'])
subnet = self.clients.network.create_subnet(
network['id'], cidr=cidr, enable_dhcp=dhcp)
self.info("Subnet with id %s created", subnet['id'])
# Verify quotas
if project_id is None:
project_id = self._get_uuid()
changes = \
{project_id: [(QNET, QADD, 1, None)]}
self._check_quotas(changes)
# Test if the right name is assigned
self.assertEqual(network['name'], name)
self.assertEqual(network['tenant_id'], project_id)
return network
def _delete_networks(self, networks, error=False):
"""Delete a network"""
for net in networks:
self.info("Deleting network with id %s", net['id'])
self.clients.network.delete_network(net['id'])
if error:
curr_states = ["ACTIVE", "SNF:DRAINED", "ERROR"]
else:
curr_states = ["ACTIVE", "SNF:DRAINED"]
for net in networks:
self._insist_on_network_transition(net, curr_states, "DELETED")
# Networks no longer in network list
new_networks = [n['id'] for n in self._get_list_of_networks()]
for net in networks:
self.info("Verifying that network with id %s is no longer in "
"network list", net['id'])
self.assertNotIn(net['id'], new_networks)
# Verify quotas
changes = \
{self._get_uuid(): [(QNET, QREMOVE, len(networks), None)]}
self._check_quotas(changes)
def _get_public_networks(self, networks=None):
"""Get the public networks"""
if networks is None:
networks = self._get_list_of_networks(detail=True)
self.info("Getting the public networks")
public_networks = []
for net in networks:
if net['SNF:floating_ip_pool'] and net['public']:
public_networks.append(net)
self.assertNotEqual(public_networks, [],
"Could not find a public network to use")
return public_networks
def _create_floating_ip(self, project_id=None):
"""Create a new floating ip"""
pub_nets = self._get_public_networks()
for pub_net in pub_nets:
self.info("Creating a new floating ip for network with id %s",
pub_net['id'])
try:
fip = self.clients.network.create_floatingip(
pub_net['id'], project_id=project_id)
except ClientError as err:
self.warning("%s: %s", err.message, err.details)
continue
# Verify that floating ip has been created
fips = self.clients.network.list_floatingips()
fips = [f['id'] for f in fips]
self.assertIn(fip['id'], fips)
# Verify quotas
if project_id is None:
project_id = self._get_uuid()
changes = \
{project_id: [(QIP, QADD, 1, None)]}
self._check_quotas(changes)
# Check that IP is IPv4
self.assertEquals(IPy.IP(fip['floating_ip_address']).version(), 4)
self.info("Floating IP %s with id %s created",
fip['floating_ip_address'], fip['id'])
return fip
self.fail("No more IP addresses available")
def _create_port(self, network_id, device_id=None, floating_ip=None):
"""Create a new port attached to the a specific network"""
self.info("Creating a new port to network with id %s", network_id)
if floating_ip is not None:
fixed_ips = [{'ip_address': floating_ip['floating_ip_address']}]
else:
fixed_ips = None
port = self.clients.network.create_port(network_id,
device_id=device_id,
fixed_ips=fixed_ips)
# Verify that port created
ports = self.clients.network.list_ports()
ports = [p['id'] for p in ports]
self.assertIn(port['id'], ports)
# Insist on creation
if device_id is None:
self._insist_on_port_transition(port, ["BUILD"], "DOWN")
else:
self._insist_on_port_transition(port, ["BUILD", "DOWN"], "ACTIVE")
self.info("Port with id %s created", port['id'])
return port
def _insist_on_port_transition(self, port, curr_statuses, new_status):
"""Insist on port transiting from curr_statuses to new_status"""
def check_fun():
"""Check port status"""
portd = self.clients.network.get_port_details(port['id'])
if portd['status'] in curr_statuses:
raise Retry()
elif portd['status'] == new_status:
return
else:
msg = "Port %s went to unexpected status %s"
self.fail(msg % (portd['id'], portd['status']))
opmsg = "Waiting for port %s to become %s"
self.info(opmsg, port['id'], new_status)
opmsg = opmsg % (port['id'], new_status)
self._try_until_timeout_expires(opmsg, check_fun)
def _insist_on_port_deletion(self, portid):
"""Insist on port deletion"""
def check_fun():
"""Check port details"""
try:
self.clients.network.get_port_details(portid)
except ClientError as err:
if err.status != 404:
raise
else:
raise Retry()
opmsg = "Waiting for port %s to be deleted"
self.info(opmsg, portid)
opmsg = opmsg % portid
self._try_until_timeout_expires(opmsg, check_fun)
def _delete_arp_entry(self, fip):
"""Delete floating IP from ARP table"""
cmd = (["/usr/sbin/arp", "-d", fip])
subp = subprocess.Popen(
cmd, shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
subp.communicate()
ret = subp.wait()
if ret != 0:
self.warning("Could not remove floating IP %s from arp cache"
% fip)
def _disconnect_from_network(self, server, network=None):
"""Disconnnect server from network"""
if network is None:
# Disconnect from all public networks
for net in self._get_public_networks():
self._disconnect_from_network(server, network=net)
return
lports = self.clients.network.list_ports()
ports = []
for port in lports:
dport = self.clients.network.get_port_details(port['id'])
if str(dport['network_id']) == str(network['id']) \
and str(dport['device_id']) == str(server['id']):
ports.append(dport)
# Find floating IPs attached to these ports
ports_id = [p['id'] for p in ports]
fips = [f for f in self.clients.network.list_floatingips()
if str(f['port_id']) in ports_id]
# First destroy the ports
for port in ports:
self.info("Destroying port with id %s", port['id'])
self.clients.network.delete_port(port['id'])
self._insist_on_port_deletion(port['id'])
for fip in fips:
self._delete_arp_entry(fip['floating_ip_address'])
# Then delete the floating IPs
self._delete_floating_ips(fips)
def _delete_floating_ips(self, fips):
"""Delete floating ips"""
# Renew the list of floating IP objects
# (It may have been changed, i.e. a port may have been deleted).
if not fips:
return
fip_ids = [f['id'] for f in fips]
new_fips = [f for f in self.clients.network.list_floatingips()
if f['id'] in fip_ids]
for fip in new_fips:
port_id = fip['port_id']
if port_id:
self.info("Destroying port with id %s", port_id)
self.clients.network.delete_port(port_id)
self._insist_on_port_deletion(port_id)
self.info("Destroying floating IP %s with id %s",
fip['floating_ip_address'], fip['id'])
self.clients.network.delete_floatingip(fip['id'])
# Check that floating IPs have been deleted
list_ips = [f['id'] for f in self.clients.network.list_floatingips()]
for fip in fips:
self.assertNotIn(fip['id'], list_ips)
# Verify quotas
changes = dict()
for fip in fips:
project = fip['tenant_id']
if project not in changes:
changes[project] = []
changes[project].append((QIP, QREMOVE, 1, None))
self._check_quotas(changes)
def _find_project(self, flavors, projects=None):
"""Return a pair of flavor, project that we can use"""
if projects is None:
projects = self.quotas.keys()
# XXX: Well there seems to be no easy way to find how many resources
# we have left in a project (we have to substract usage from limit,
# check both per_user and project quotas, blah, blah). For now
# just return the first flavor with the first project and lets hope
# that it fits.
return (flavors[0], projects[0])
# # Get only the quotas for the given 'projects'
# quotas = dict()
# for prj, qts in self.quotas.items():
# if prj in projects:
# quotas[prj] = qts
#
# results = []
# for flv in flavors:
# for prj, qts in quotas.items():
# self.debug("Testing flavor %s, project %s", flv['name'], prj)
# condition = \
# (flv['ram'] <= qts['cyclades.ram']['usage'] and
# flv['vcpus'] <= qts['cyclades.cpu']['usage'] and
# flv['disk'] <= qts['cyclades.disk']['usage'] and
# qts['cyclades.vm']['usage'] >= 1)
# if condition:
# results.append((flv, prj))
#
# if not results:
# msg = "Couldn't find a suitable flavor to use for current qutoas"
# self.error(msg)
#
# return random.choice(results)
class Retry(Exception):
"""Retry the action
This is used by _try_unit_timeout_expires method.
"""
|
olgabrani/synnefo
|
snf-tools/synnefo_tools/burnin/cyclades_common.py
|
Python
|
gpl-3.0
| 30,688 | 0 |
# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
# Copyright (C) 2013 Kristoffer Gronlund <kgronlund@suse.com>
# See COPYING for license information.
import os
import sys
import time
import re
import bz2
from . import config
from . import command
from . import completers as compl
from . import utils
from . import ui_utils
from . import userdir
from . import xmlutil
from . import constants
from . import options
from .cibconfig import mkset_obj, cib_factory
from .msg import common_err, common_debug, common_info
from .msg import syntax_err
from . import history
from . import cmd_status
ptest_options = ["@v+", "nograph", "scores", "actions", "utilization"]
@utils.memoize
def crm_report():
return history.Report()
class History(command.UI):
'''
The history class
'''
name = "history"
def __init__(self):
command.UI.__init__(self)
self.current_session = None
self._source_inited = False
def _init_source(self):
if self._source_inited:
return True
self._source_inited = True
return self._set_source(options.history)
def _set_period(self, from_time='', to_time=''):
'''
parse time specs and set period
'''
from_dt = to_dt = None
if from_time:
from_dt = utils.parse_time(from_time)
if not from_dt:
return False
if to_time:
to_dt = utils.parse_time(to_time)
if not to_dt:
return False
if to_dt and from_dt:
if to_dt < from_dt:
from_dt, to_dt = to_dt, from_dt
elif to_dt == from_dt:
common_err("%s - %s: To and from dates cannot be the same" % (from_time, to_time))
return False
return crm_report().set_period(from_dt, to_dt)
def _check_source(self, src):
'a (very) quick source check'
if src == "live":
return True
if os.path.isfile(src) or os.path.isdir(src):
return True
return False
def _set_source(self, src, live_from_time=None):
'''
Have the last history source survive the History
and Report instances
'''
common_debug("setting source to %s" % src)
if not self._check_source(src):
if os.path.exists(crm_report().get_session_dir(src)):
common_debug("Interpreting %s as session" % src)
if crm_report().load_state(crm_report().get_session_dir(src)):
options.history = crm_report().get_source()
crm_report().prepare_source()
self.current_session = src
return True
else:
common_err("source %s doesn't exist" % src)
return False
crm_report().set_source(src)
options.history = src
self.current_session = None
to_time = ''
if src == "live":
from_time = time.ctime(live_from_time and live_from_time or (time.time() - 60*60))
else:
from_time = ''
return self._set_period(from_time, to_time)
@command.skill_level('administrator')
def do_source(self, context, src=None):
"usage: source {<dir>|<file>|live}"
if src is None:
print "Current source: %s" % (options.history)
return True
self._init_source()
if src != options.history:
return self._set_source(src)
@command.skill_level('administrator')
@command.alias('timeframe')
def do_limit(self, context, from_time='', to_time=''):
"usage: limit [<from_time> [<to_time>]]"
self._init_source()
if options.history == "live" and not from_time:
from_time = time.ctime(time.time() - 60*60)
return self._set_period(from_time, to_time)
@command.skill_level('administrator')
def do_refresh(self, context, force=''):
"usage: refresh"
self._init_source()
if options.history != "live":
common_info("nothing to refresh if source isn't live")
return False
if force:
if force != "force" and force != "--force":
context.fatal_error("Expected 'force' or '--force' (was '%s')" % (force))
force = True
return crm_report().refresh_source(force)
@command.skill_level('administrator')
def do_detail(self, context, detail_lvl):
"usage: detail <detail_level>"
self._init_source()
detail_num = utils.convert2ints(detail_lvl)
if detail_num is None or detail_num not in (0, 1):
context.fatal_error("Expected '0' or '1' (was '%s')" % (detail_lvl))
return crm_report().set_detail(detail_lvl)
@command.skill_level('administrator')
@command.completers_repeating(compl.call(lambda: crm_report().node_list()))
def do_setnodes(self, context, *args):
"usage: setnodes <node> [<node> ...]"
self._init_source()
if options.history != "live":
common_info("setting nodes not necessary for existing reports, proceeding anyway")
return crm_report().set_nodes(*args)
@command.skill_level('administrator')
def do_info(self, context):
"usage: info"
self._init_source()
return crm_report().info()
@command.skill_level('administrator')
def do_latest(self, context):
"usage: latest"
self._init_source()
if not utils.wait4dc("transition", not options.batch):
return False
self._set_source("live")
crm_report().refresh_source()
f = self._get_pe_byidx(-1)
if not f:
return False
crm_report().show_transition_log(f)
@command.skill_level('administrator')
@command.completers_repeating(compl.call(lambda: crm_report().rsc_list()))
def do_resource(self, context, *args):
"usage: resource <rsc> [<rsc> ...]"
self._init_source()
return crm_report().resource(*args)
@command.skill_level('administrator')
@command.wait
@command.completers_repeating(compl.call(lambda: crm_report().node_list()))
def do_node(self, context, *args):
"usage: node <node> [<node> ...]"
self._init_source()
return crm_report().node(*args)
@command.skill_level('administrator')
@command.completers_repeating(compl.call(lambda: crm_report().node_list()))
def do_log(self, context, *args):
"usage: log [<node> ...]"
self._init_source()
return crm_report().log(*args)
def ptest(self, nograph, scores, utilization, actions, verbosity):
'Send a decompressed self.pe_file to ptest'
try:
s = bz2.decompress(open(self.pe_file).read())
except IOError, msg:
common_err("open: %s" % msg)
return False
return utils.run_ptest(s, nograph, scores, utilization, actions, verbosity)
@command.skill_level('administrator')
def do_events(self, context):
"usage: events"
self._init_source()
return crm_report().events()
@command.skill_level('administrator')
@command.completers_repeating(compl.join(compl.call(lambda: crm_report().peinputs_list()),
compl.choice(['v'])))
def do_peinputs(self, context, *args):
"""usage: peinputs [{<range>|<number>} ...] [v]"""
self._init_source()
argl = list(args)
opt_l = utils.fetch_opts(argl, ["v"])
if argl:
l = []
for s in argl:
a = utils.convert2ints(s.split(':'))
if a and len(a) == 2 and not utils.check_range(a):
common_err("%s: invalid peinputs range" % a)
return False
l += crm_report().pelist(a, long=("v" in opt_l))
else:
l = crm_report().pelist(long=("v" in opt_l))
if not l:
return False
s = '\n'.join(l)
utils.page_string(s)
def _get_pe_byname(self, s):
l = crm_report().find_pe_files(s)
if len(l) == 0:
common_err("%s: path not found" % s)
return None
elif len(l) > 1:
common_err("%s: path ambiguous" % s)
return None
return l[0]
def _get_pe_byidx(self, idx):
l = crm_report().pelist()
if len(l) < abs(idx):
if idx == -1:
common_err("no transitions found in the source")
else:
common_err("PE input file for index %d not found" % (idx+1))
return None
return l[idx]
def _get_pe_bynum(self, n):
l = crm_report().pelist([n])
if len(l) == 0:
common_err("PE file %d not found" % n)
return None
elif len(l) > 1:
common_err("PE file %d ambiguous" % n)
return None
return l[0]
def _get_pe_input(self, pe_spec):
'''Get PE input file from the <number>|<index>|<file>
spec.'''
if re.search('pe-', pe_spec):
f = self._get_pe_byname(pe_spec)
elif utils.is_int(pe_spec):
n = int(pe_spec)
if n <= 0:
f = self._get_pe_byidx(n-1)
else:
f = self._get_pe_bynum(n)
else:
f = self._get_pe_byidx(-1)
return f
def _show_pe(self, f, opt_l):
self.pe_file = f # self.pe_file needed by self.ptest
ui_utils.ptestlike(self.ptest, 'vv', "transition", opt_l)
return crm_report().show_transition_log(f)
def _display_dot(self, f):
if not config.core.dotty:
common_err("install graphviz to draw transition graphs")
return False
f = crm_report().pe2dot(f)
if not f:
common_err("dot file not found in the report")
return False
utils.show_dot_graph(f, keep_file=True, desc="configuration graph")
return True
def _pe2shadow(self, f, argl):
try:
name = argl[0]
except:
name = os.path.basename(f).replace(".bz2", "")
common_info("transition %s saved to shadow %s" % (f, name))
return xmlutil.pe2shadow(f, name)
@command.skill_level('administrator')
@command.completers(compl.join(compl.call(lambda: crm_report().peinputs_list()),
compl.choice(['log', 'showdot', 'save'])))
def do_transition(self, context, *args):
"""usage: transition [<number>|<index>|<file>] [nograph] [v...] [scores] [actions] [utilization]
transition showdot [<number>|<index>|<file>]
transition log [<number>|<index>|<file>]
transition save [<number>|<index>|<file> [name]]"""
self._init_source()
argl = list(args)
subcmd = "show"
if argl and argl[0] in ("showdot", "log", "save", "tags"):
subcmd = argl[0]
del argl[0]
if subcmd == "show":
opt_l = utils.fetch_opts(argl, ptest_options)
if argl:
f = self._get_pe_input(argl[0])
del argl[0]
else:
f = self._get_pe_byidx(-1)
if (subcmd == "save" and len(argl) > 1) or \
(subcmd in ("show", "showdot", "log") and argl):
syntax_err(args, context="transition")
return False
if not f:
return False
if subcmd == "show":
common_info("running ptest with %s" % f)
rc = self._show_pe(f, opt_l)
elif subcmd == "showdot":
rc = self._display_dot(f)
elif subcmd == "save":
rc = self._pe2shadow(f, argl)
elif subcmd == "tags":
rc = crm_report().show_transition_tags(f)
else:
rc = crm_report().show_transition_log(f, True)
return rc
def _save_cib_env(self):
try:
self._cib_f_save = os.environ["CIB_file"]
except:
self._cib_f_save = None
def _reset_cib_env(self):
if self._cib_f_save:
os.environ["CIB_file"] = self._cib_f_save
else:
try:
del os.environ["CIB_file"]
except:
pass
def _setup_cib_env(self, pe_f):
'''Setup the CIB_file environment variable.
Alternatively, we could (or should) use shadows, but the
file/shadow management would be a bit involved.'''
if pe_f != "live":
os.environ["CIB_file"] = pe_f
else:
self._reset_cib_env()
def _pe_config_obj(self, pe_f):
'''Return set_obj of the configuration. It can later be
rendered using the repr() method.'''
self._setup_cib_env(pe_f)
if not cib_factory.refresh():
set_obj = mkset_obj("NOOBJ")
else:
set_obj = mkset_obj()
return set_obj
def _pe_config_noclr(self, pe_f):
'''Configuration with no formatting (no colors).'''
return self._pe_config_obj(pe_f).repr_nopretty()
def _pe_config_plain(self, pe_f):
'''Configuration with no formatting (but with colors).'''
return self._pe_config_obj(pe_f).repr(format=0)
def _pe_config(self, pe_f):
'''Formatted configuration.'''
return self._pe_config_obj(pe_f).repr()
def _pe_status(self, pe_f):
'''Return status as a string.'''
self._setup_cib_env(pe_f)
rc, s = cmd_status.crm_mon()
if rc != 0:
if s:
common_err("crm_mon exited with code %d and said: %s" %
(rc, s))
else:
common_err("crm_mon exited with code %d" % rc)
return None
return s
def _pe_status_nohdr(self, pe_f):
'''Return status (without header) as a string.'''
self._setup_cib_env(pe_f)
rc, s = cmd_status.crm_mon()
if rc != 0:
common_err("crm_mon exited with code %d and said: %s" %
(rc, s))
return None
l = s.split('\n')
for i, ln in enumerate(l):
if ln == "":
break
try:
while l[i] == "":
i += 1
except:
pass
return '\n'.join(l[i:])
def _get_diff_pe_input(self, t):
if t != "live":
return self._get_pe_input(t)
if not utils.get_dc():
common_err("cluster not running")
return None
return "live"
def _render_pe(self, pe_fun, t):
pe_f = self._get_diff_pe_input(t)
if not pe_f:
return None
self._save_cib_env()
s = pe_fun(pe_f)
self._reset_cib_env()
return s
def _worddiff(self, s1, s2):
s = None
f1 = utils.str2tmp(s1)
f2 = utils.str2tmp(s2)
if f1 and f2:
_, s = utils.get_stdout("wdiff %s %s" % (f1, f2))
for f in (f1, f2):
try:
os.unlink(f)
except:
pass
return s
def _unidiff(self, s1, s2, t1, t2):
s = None
f1 = utils.str2tmp(s1)
f2 = utils.str2tmp(s2)
if f1 and f2:
_, s = utils.get_stdout("diff -U 0 -d -b --label %s --label %s %s %s" %
(t1, t2, f1, f2))
for f in (f1, f2):
try:
os.unlink(f)
except:
pass
return s
def _diffhtml(self, s1, s2, t1, t2):
import difflib
fromlines = s1.split('\n')
tolines = s2.split('\n')
diff_l = difflib.HtmlDiff(wrapcolumn=60).make_table(
fromlines, tolines, t1, t2)
return ''.join(diff_l)
def _diff(self, pe_fun, t1, t2, html=False, wdiff=False):
s1 = self._render_pe(pe_fun, t1)
s2 = self._render_pe(pe_fun, t2)
if not s1 or not s2:
return None
if html:
s = self._diffhtml(s1, s2, t1, t2)
elif wdiff:
s = self._worddiff(s1, s2)
else:
s = self._unidiff(s1, s2, t1, t2)
return s
def _common_pe_render_check(self, context, opt_l, *args):
if context.previous_level_is("cibconfig") and cib_factory.has_cib_changed():
common_err("please try again after committing CIB changes")
return False
argl = list(args)
supported_l = ["status"]
if context.get_command_name() == "diff":
supported_l.append("html")
opt_l += utils.fetch_opts(argl, supported_l)
if argl:
syntax_err(' '.join(argl), context=context.get_command_name())
return False
return True
@command.skill_level('administrator')
@command.name('_dump')
def do_dump(self, context, t, *args):
'''dump configuration or status to a file and print file
name.
NB: The configuration is color rendered, but note that
that depends on the current value of the TERM variable.
'''
self._init_source()
opt_l = []
if not self._common_pe_render_check(context, opt_l, *args):
return False
if "status" in opt_l:
s = self._render_pe(self._pe_status_nohdr, t)
else:
s = utils.term_render(self._render_pe(self._pe_config_plain, t))
if context.previous_level_is("cibconfig"):
cib_factory.refresh()
if not s:
return False
print utils.str2tmp(s)
@command.skill_level('administrator')
@command.completers(compl.join(compl.call(lambda: crm_report().peinputs_list()),
compl.choice(['live'])),
compl.choice(['status']))
def do_show(self, context, t, *args):
"usage: show <pe> [status]"
self._init_source()
opt_l = []
if not self._common_pe_render_check(context, opt_l, *args):
return False
showfun = self._pe_config
if "status" in opt_l:
showfun = self._pe_status
s = self._render_pe(showfun, t)
if context.previous_level_is("cibconfig"):
cib_factory.refresh()
if not s:
return False
utils.page_string(s)
@command.skill_level('administrator')
@command.completers(compl.join(compl.call(lambda: crm_report().peinputs_list()),
compl.choice(['live'])))
def do_graph(self, context, t, *args):
"usage: graph <pe> [<gtype> [<file> [<img_format>]]]"
self._init_source()
pe_f = self._get_diff_pe_input(t)
if not pe_f:
return False
rc, gtype, outf, ftype = ui_utils.graph_args(args)
if not rc:
return False
rc, d = utils.load_graphviz_file(userdir.GRAPHVIZ_USER_FILE)
if rc and d:
constants.graph = d
set_obj = self._pe_config_obj(pe_f)
if not outf:
rc = set_obj.show_graph(gtype)
elif gtype == ftype:
rc = set_obj.save_graph(gtype, outf)
else:
rc = set_obj.graph_img(gtype, outf, ftype)
if context.previous_level_is("cibconfig"):
cib_factory.refresh()
return rc
@command.skill_level('administrator')
@command.completers(compl.join(compl.call(lambda: crm_report().peinputs_list()),
compl.choice(['live'])),
compl.join(compl.call(lambda: crm_report().peinputs_list()),
compl.choice(['live'])))
def do_diff(self, context, t1, t2, *args):
"usage: diff <pe> <pe> [status] [html]"
self._init_source()
opt_l = []
if not self._common_pe_render_check(context, opt_l, *args):
return False
showfun = self._pe_config_plain
mkhtml = "html" in opt_l
if "status" in opt_l:
showfun = self._pe_status_nohdr
elif mkhtml:
showfun = self._pe_config_noclr
s = self._diff(showfun, t1, t2, html=mkhtml)
if context.previous_level_is("cibconfig"):
cib_factory.refresh()
if s is None:
return False
if not mkhtml:
utils.page_string(s)
else:
sys.stdout.writelines(s)
@command.skill_level('administrator')
@command.completers(compl.join(compl.call(lambda: crm_report().peinputs_list()),
compl.choice(['live'])),
compl.join(compl.call(lambda: crm_report().peinputs_list()),
compl.choice(['live'])))
def do_wdiff(self, context, t1, t2, *args):
"usage: wdiff <pe> <pe> [status]"
self._init_source()
opt_l = []
if not self._common_pe_render_check(context, opt_l, *args):
return False
showfun = self._pe_config_plain
if "status" in opt_l:
showfun = self._pe_status_nohdr
s = self._diff(showfun, t1, t2, wdiff=True)
if context.previous_level_is("cibconfig"):
cib_factory.refresh()
if s is None:
return False
utils.page_string(s)
@command.skill_level('administrator')
@command.completers(compl.call(lambda: crm_report().session_subcmd_list()),
compl.call(lambda: crm_report().session_list()))
def do_session(self, context, subcmd=None, name=None):
"usage: session [{save|load|delete} <name> | pack [<name>] | update | list]"
self._init_source()
if not subcmd:
print "current session: %s" % self.current_session
return True
# verify arguments
if subcmd not in ("save", "load", "pack", "delete", "list", "update"):
common_err("unknown history session subcmd: %s" % subcmd)
return False
if name:
if subcmd not in ("save", "load", "pack", "delete"):
syntax_err(subcmd, context='session')
return False
if not utils.is_filename_sane(name):
return False
elif subcmd not in ("list", "update", "pack"):
syntax_err(subcmd, context='session')
return False
elif subcmd in ("update", "pack") and not self.current_session:
common_err("need to load a history session before update/pack")
return False
# do work
if not name:
# some commands work on the existing session
name = self.current_session
rc = crm_report().manage_session(subcmd, name)
# set source appropriately
if rc and subcmd in ("save", "load"):
options.history = crm_report().get_source()
crm_report().prepare_source()
self.current_session = name
elif rc and subcmd == "delete":
if name == self.current_session:
common_info("current history session deleted, setting source to live")
self._set_source("live")
return rc
@command.skill_level('administrator')
@command.completers(compl.choice(['clear']))
def do_exclude(self, context, arg=None):
"usage: exclude [<regex>|clear]"
self._init_source()
if not arg:
return crm_report().manage_excludes("show")
elif arg == "clear":
return crm_report().manage_excludes("clear")
return crm_report().manage_excludes("add", arg)
# vim:ts=4:sw=4:et:
|
ingted/crmsh
|
modules/ui_history.py
|
Python
|
gpl-2.0
| 23,658 | 0.001395 |
"""
Utilities to facilitate experimentation
"""
from __future__ import absolute_import
import logging
from decimal import Decimal
import six
from django.utils.timezone import now
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from course_modes.models import format_course_price, get_cosmetic_verified_display_price, CourseMode
from lms.djangoapps.courseware.access import has_staff_access_to_preview_mode
from lms.djangoapps.courseware.date_summary import verified_upgrade_deadline_link, verified_upgrade_link_is_valid
from entitlements.models import CourseEntitlement
from lms.djangoapps.commerce.utils import EcommerceService
from openedx.core.djangoapps.catalog.utils import get_programs
from openedx.core.djangoapps.django_comment_common.models import Role
from openedx.core.djangoapps.waffle_utils import WaffleFlag, WaffleFlagNamespace
from openedx.features.course_duration_limits.access import get_user_course_expiration_date
from openedx.features.course_duration_limits.models import CourseDurationLimitConfig
from student.models import CourseEnrollment
from xmodule.partitions.partitions_service import get_all_partitions_for_course, get_user_partition_groups
# Import this for backwards compatibility (so that anyone importing this function from here doesn't break)
from .stable_bucketing import stable_bucketing_hash_group # pylint: disable=unused-import
logger = logging.getLogger(__name__)
# TODO: clean up as part of REVEM-199 (START)
experiments_namespace = WaffleFlagNamespace(name=u'experiments')
# .. toggle_name: experiments.add_programs
# .. toggle_implementation: WaffleFlag
# .. toggle_default: False
# .. toggle_description: Toggle for adding the current course's program information to user metadata
# .. toggle_category: experiments
# .. toggle_use_cases: monitored_rollout
# .. toggle_creation_date: 2019-2-25
# .. toggle_expiration_date: None
# .. toggle_warnings: None
# .. toggle_tickets: REVEM-63, REVEM-198
# .. toggle_status: supported
PROGRAM_INFO_FLAG = WaffleFlag(
waffle_namespace=experiments_namespace,
flag_name=u'add_programs',
flag_undefined_default=False
)
# .. toggle_name: experiments.add_dashboard_info
# .. toggle_implementation: WaffleFlag
# .. toggle_default: False
# .. toggle_description: Toggle for adding info about each course to the dashboard metadata
# .. toggle_category: experiments
# .. toggle_use_cases: monitored_rollout
# .. toggle_creation_date: 2019-3-28
# .. toggle_expiration_date: None
# .. toggle_warnings: None
# .. toggle_tickets: REVEM-118
# .. toggle_status: supported
DASHBOARD_INFO_FLAG = WaffleFlag(experiments_namespace,
u'add_dashboard_info',
flag_undefined_default=False)
# TODO END: clean up as part of REVEM-199 (End)
def check_and_get_upgrade_link_and_date(user, enrollment=None, course=None):
"""
For an authenticated user, return a link to allow them to upgrade
in the specified course.
Returns the upgrade link and upgrade deadline for a user in a given course given
that the user is within the window to upgrade defined by our dynamic pacing feature;
otherwise, returns None for both the link and date.
"""
if enrollment is None and course is None:
logger.warn(u'Must specify either an enrollment or a course')
return (None, None)
if enrollment:
if course is None:
course = enrollment.course
elif enrollment.course_id != course.id:
logger.warn(u'{} refers to a different course than {} which was supplied. Enrollment course id={}, '
u'repr={!r}, deprecated={}. Course id={}, repr={!r}, deprecated={}.'
.format(enrollment,
course,
enrollment.course_id,
enrollment.course_id,
enrollment.course_id.deprecated,
course.id,
course.id,
course.id.deprecated
)
)
return (None, None)
if enrollment.user_id != user.id:
logger.warn(u'{} refers to a different user than {} which was supplied. Enrollment user id={}, repr={!r}. '
u'User id={}, repr={!r}.'.format(enrollment,
user,
enrollment.user_id,
enrollment.user_id,
user.id,
user.id,
)
)
return (None, None)
if enrollment is None:
enrollment = CourseEnrollment.get_enrollment(user, course.id)
if user.is_authenticated and verified_upgrade_link_is_valid(enrollment):
return (
verified_upgrade_deadline_link(user, course),
enrollment.upgrade_deadline
)
return (None, None)
# TODO: clean up as part of REVEM-199 (START)
def get_program_price_and_skus(courses):
"""
Get the total program price and purchase skus from these courses in the program
"""
program_price = 0
skus = []
for course in courses:
course_price, course_sku = get_course_entitlement_price_and_sku(course)
if course_price is not None and course_sku is not None:
program_price = Decimal(program_price) + Decimal(course_price)
skus.append(course_sku)
if program_price <= 0:
program_price = None
skus = None
else:
program_price = format_course_price(program_price)
program_price = six.text_type(program_price)
return program_price, skus
def get_course_entitlement_price_and_sku(course):
"""
Get the entitlement price and sku from this course.
Try to get them from the first non-expired, verified entitlement that has a price and a sku. If that doesn't work,
fall back to the first non-expired, verified course run that has a price and a sku.
"""
for entitlement in course.get('entitlements', []):
if entitlement.get('mode') == 'verified' and entitlement['price'] and entitlement['sku']:
expires = entitlement.get('expires')
if not expires or expires > now():
return entitlement['price'], entitlement['sku']
course_runs = course.get('course_runs', [])
published_course_runs = [run for run in course_runs if run['status'] == 'published']
for published_course_run in published_course_runs:
for seat in published_course_run['seats']:
if seat.get('type') == 'verified' and seat['price'] and seat['sku']:
price = Decimal(seat.get('price'))
return price, seat.get('sku')
return None, None
def get_unenrolled_courses(courses, user_enrollments):
"""
Given a list of courses and a list of user enrollments, return the courses in which the user is not enrolled.
Depending on the enrollments that are passed in, this method can be used to determine the courses in a program in
which the user has not yet enrolled or the courses in a program for which the user has not yet purchased a
certificate.
"""
# Get the enrollment course ids here, so we don't need to loop through them for every course run
enrollment_course_ids = {enrollment.course_id for enrollment in user_enrollments}
unenrolled_courses = []
for course in courses:
if not is_enrolled_in_course(course, enrollment_course_ids):
unenrolled_courses.append(course)
return unenrolled_courses
def is_enrolled_in_all_courses(courses, user_enrollments):
"""
Determine if the user is enrolled in all of the courses
"""
# Get the enrollment course ids here, so we don't need to loop through them for every course run
enrollment_course_ids = {enrollment.course_id for enrollment in user_enrollments}
for course in courses:
if not is_enrolled_in_course(course, enrollment_course_ids):
# User is not enrolled in this course, meaning they are not enrolled in all courses in the program
return False
# User is enrolled in all courses in the program
return True
def is_enrolled_in_course(course, enrollment_course_ids):
"""
Determine if the user is enrolled in this course
"""
course_runs = course.get('course_runs')
if course_runs:
for course_run in course_runs:
if is_enrolled_in_course_run(course_run, enrollment_course_ids):
return True
return False
def is_enrolled_in_course_run(course_run, enrollment_course_ids):
"""
Determine if the user is enrolled in this course run
"""
key = None
try:
key = course_run.get('key')
course_run_key = CourseKey.from_string(key)
return course_run_key in enrollment_course_ids
except InvalidKeyError:
logger.warn(
u'Unable to determine if user was enrolled since the course key {} is invalid'.format(key)
)
return False # Invalid course run key. Assume user is not enrolled.
def get_dashboard_course_info(user, dashboard_enrollments):
"""
Given a list of enrollments shown on the dashboard, return a dict of course ids and experiment info for that course
"""
course_info = None
if DASHBOARD_INFO_FLAG.is_enabled():
# Get the enrollments here since the dashboard filters out those with completed entitlements
user_enrollments = CourseEnrollment.objects.select_related('course').filter(user_id=user.id)
course_info = {
str(dashboard_enrollment.course): get_base_experiment_metadata_context(dashboard_enrollment.course,
user,
dashboard_enrollment,
user_enrollments)
for dashboard_enrollment in dashboard_enrollments
}
return course_info
# TODO: clean up as part of REVEM-199 (END)
def get_experiment_user_metadata_context(course, user):
"""
Return a context dictionary with the keys used by the user_metadata.html.
"""
enrollment = None
# TODO: clean up as part of REVO-28 (START)
user_enrollments = None
audit_enrollments = None
has_non_audit_enrollments = False
try:
user_enrollments = CourseEnrollment.objects.select_related('course').filter(user_id=user.id)
has_non_audit_enrollments = user_enrollments.exclude(mode__in=CourseMode.UPSELL_TO_VERIFIED_MODES).exists()
# TODO: clean up as part of REVO-28 (END)
enrollment = CourseEnrollment.objects.select_related(
'course'
).get(user_id=user.id, course_id=course.id)
except CourseEnrollment.DoesNotExist:
pass # Not enrolled, use the default values
has_entitlements = False
if user.is_authenticated():
has_entitlements = CourseEntitlement.objects.filter(user=user).exists()
context = get_base_experiment_metadata_context(course, user, enrollment, user_enrollments)
has_staff_access = has_staff_access_to_preview_mode(user, course.id)
forum_roles = []
if user.is_authenticated:
forum_roles = list(Role.objects.filter(users=user, course_id=course.id).values_list('name').distinct())
# get user partition data
if user.is_authenticated():
partition_groups = get_all_partitions_for_course(course)
user_partitions = get_user_partition_groups(course.id, partition_groups, user, 'name')
else:
user_partitions = {}
# TODO: clean up as part of REVO-28 (START)
context['has_non_audit_enrollments'] = has_non_audit_enrollments or has_entitlements
# TODO: clean up as part of REVO-28 (END)
context['has_staff_access'] = has_staff_access
context['forum_roles'] = forum_roles
context['partition_groups'] = user_partitions
return context
def get_base_experiment_metadata_context(course, user, enrollment, user_enrollments):
"""
Return a context dictionary with the keys used by dashboard_metadata.html and user_metadata.html
"""
enrollment_mode = None
enrollment_time = None
# TODO: clean up as part of REVEM-199 (START)
program_key = get_program_context(course, user_enrollments)
# TODO: clean up as part of REVEM-199 (END)
if enrollment and enrollment.is_active:
enrollment_mode = enrollment.mode
enrollment_time = enrollment.created
# upgrade_link and upgrade_date should be None if user has passed their dynamic pacing deadline.
upgrade_link, upgrade_date = check_and_get_upgrade_link_and_date(user, enrollment, course)
return {
'upgrade_link': upgrade_link,
'upgrade_price': six.text_type(get_cosmetic_verified_display_price(course)),
'enrollment_mode': enrollment_mode,
'enrollment_time': enrollment_time,
'pacing_type': 'self_paced' if course.self_paced else 'instructor_paced',
'upgrade_deadline': upgrade_date,
'audit_access_deadline': get_audit_access_expiration(user, course),
'course_key': course.id,
'course_start': course.start,
'course_end': course.end,
# TODO: clean up as part of REVEM-199 (START)
'program_key_fields': program_key,
# TODO: clean up as part of REVEM-199 (END)
}
def get_audit_access_expiration(user, course):
"""
Return the expiration date for the user's audit access to this course.
"""
if not CourseDurationLimitConfig.enabled_for_enrollment(user=user, course_key=course.id):
return None
return get_user_course_expiration_date(user, course)
# TODO: clean up as part of REVEM-199 (START)
def get_program_context(course, user_enrollments):
"""
Return a context dictionary with program information.
"""
program_key = None
non_audit_enrollments = user_enrollments.exclude(mode__in=CourseMode.UPSELL_TO_VERIFIED_MODES)
if PROGRAM_INFO_FLAG.is_enabled():
programs = get_programs(course=course.id)
if programs:
# A course can be in multiple programs, but we're just grabbing the first one
program = programs[0]
complete_enrollment = False
has_courses_left_to_purchase = False
total_courses = None
courses = program.get('courses')
courses_left_to_purchase_price = None
courses_left_to_purchase_url = None
program_uuid = program.get('uuid')
is_eligible_for_one_click_purchase = program.get('is_program_eligible_for_one_click_purchase')
if courses is not None:
total_courses = len(courses)
complete_enrollment = is_enrolled_in_all_courses(courses, user_enrollments)
# Get the price and purchase URL of the program courses the user has yet to purchase. Say a
# program has 3 courses (A, B and C), and the user previously purchased a certificate for A.
# The user is enrolled in audit mode for B. The "left to purchase price" should be the price of
# B+C.
courses_left_to_purchase = get_unenrolled_courses(courses, non_audit_enrollments)
if courses_left_to_purchase:
has_courses_left_to_purchase = True
if courses_left_to_purchase and is_eligible_for_one_click_purchase:
courses_left_to_purchase_price, courses_left_to_purchase_skus = \
get_program_price_and_skus(courses_left_to_purchase)
if courses_left_to_purchase_skus:
courses_left_to_purchase_url = EcommerceService().get_checkout_page_url(
*courses_left_to_purchase_skus, program_uuid=program_uuid)
program_key = {
'uuid': program_uuid,
'title': program.get('title'),
'marketing_url': program.get('marketing_url'),
'status': program.get('status'),
'is_eligible_for_one_click_purchase': is_eligible_for_one_click_purchase,
'total_courses': total_courses,
'complete_enrollment': complete_enrollment,
'has_courses_left_to_purchase': has_courses_left_to_purchase,
'courses_left_to_purchase_price': courses_left_to_purchase_price,
'courses_left_to_purchase_url': courses_left_to_purchase_url,
}
return program_key
# TODO: clean up as part of REVEM-199 (START)
|
ESOedX/edx-platform
|
lms/djangoapps/experiments/utils.py
|
Python
|
agpl-3.0
| 17,007 | 0.003763 |
#-*- coding: utf-8 -*-
import urllib2
import json
import CommonFunctions
common = CommonFunctions
from xml.dom import minidom
from resources.lib import utils
from resources.lib import globalvar
title=['ARTE']
img=['arte']
readyForUse=True
def fix_text(text):
return text.replace('&','&').encode('utf-8').replace(''',' ')
def list_shows(channel,folder):
shows=[]
d=dict()
filePath=utils.downloadCatalog('http://www.arte.tv/papi/tvguide-flow/sitemap/feeds/videos/F.xml','ARTE.XML',False)
if folder=='none':
xml = open(filePath).read()
url=common.parseDOM(xml, "url")
for i in range(0, len(url)):
categoryTab=common.parseDOM(url[i], "video:category")
if len(categoryTab)>0:
category=fix_text(categoryTab[0])
if category not in d:
shows.append( [channel,category,category,'','folder'] )
d[category]=category
else:
xml = open(filePath).read()
url=common.parseDOM(xml, "url")
for i in range(0, len(url)):
titleTab=common.parseDOM(url[i], "video:title")
if len(titleTab)>0:
title=fix_text(titleTab[0])
categoryTab=common.parseDOM(url[i], "video:category")
if globalvar.ADDON.getSetting('arteFull')=='true':
videoTag=common.parseDOM(url[i], "video:tag")[0]
else:
videoTag='ARTE+7'
if len(categoryTab)>0:
if(fix_text(categoryTab[0])==folder and title not in d and videoTag=='ARTE+7'):
shows.append( [channel,title,title,'','shows'] )
d[title]=title
return shows
def getVideoURL(channel,video_id):
#Get JSON file
jsonFile=urllib2.urlopen('http://arte.tv/papi/tvguide/videos/stream/player/F/'+ video_id + '/ALL/ALL.json').read()
#Parse JSON to
jsoncat = json.loads(jsonFile)
url=''
if globalvar.ADDON.getSetting('%sQuality' % (channel))=='HD':
#HD HTTP
if 'HTTP_MP4_SQ_1' in jsoncat['videoJsonPlayer']['VSR']:
url=jsoncat['videoJsonPlayer']['VSR']['HTTP_MP4_SQ_1']['url']
#HD RTMP
else:
url=jsoncat['videoJsonPlayer']['VSR']['RTMP_SQ_1']['streamer'] + jsoncat['videoJsonPlayer']['VSR']['RTMP_SQ_1']['url']
if globalvar.ADDON.getSetting('%sQuality' % (channel))=='SD' or url=='':
#SD HTTP
if 'HLS_SQ_1':
url=jsoncat['videoJsonPlayer']['VSR']['HLS_SQ_1']['url']
#SD RTMP
else:
url=jsoncat['videoJsonPlayer']['VSR']['RTMP_MQ_1']['streamer'] + jsoncat['videoJsonPlayer']['VSR']['RTMP_MQ_1']['url']
url=jsoncat['videoJsonPlayer']['VSR']['HLS_SQ_1']['url']
return url
def list_videos(channel,show_title):
videos=[]
filePath=utils.downloadCatalog('http://www.arte.tv/papi/tvguide-flow/sitemap/feeds/videos/F.xml','ARTE.XML',False)
xml = open(filePath).read()
url=common.parseDOM(xml, "url")
for i in range(0, len(url)):
titleTab=common.parseDOM(url[i], "video:title")
if len(titleTab)>0:
title=fix_text(titleTab[0])
if(title==show_title):
name=''
image_url=''
date=''
duration=''
views=''
desc=''
rating=''
tmpTab=common.parseDOM(url[i], "video:publication_date")
if len(tmpTab)>0:
date=tmpTab[0][:10]
tmpTab=common.parseDOM(url[i], "video:duration")
if len(tmpTab)>0:
duration=float(tmpTab[0])/60
tmpTab=common.parseDOM(url[i], "video:view_count")
if len(tmpTab)>0:
views=tmpTab[0]
tmpTab=common.parseDOM(url[i], "video:rating")
if len(tmpTab)>0:
rating=tmpTab[0]
descriptionTab=common.parseDOM(url[i], "video:description")
if len(descriptionTab)>0:
name=fix_text(descriptionTab[0])
desc=fix_text(descriptionTab[0])
tmpTab=common.parseDOM(url[i],"video:player_loc")
if len(tmpTab)>0:
if tmpTab[0]=="1":
tmpTab=common.parseDOM(url[i], "video:id")
if len(tmpTab)>0:
video_id=tmpTab[0][28:28+10] + "_PLUS7-F"
else:
start=tmpTab[0].find("%2Fplayer%2FF%2F")
end=tmpTab[0].find("%2F", start+16)
video_id=tmpTab[0][start+16:end]
if video_id.find("EXTRAIT")>0 :
name="Extrait-" + name
videoTag=common.parseDOM(url[i], "video:tag")[0]
picTab=common.parseDOM(url[i], "video:thumbnail_loc")
if len(picTab)>0:
image_url=picTab[0]
infoLabels={ "Title": name,"Plot":desc,"Aired":date,"Duration": duration, "Year":date[:4]}
if not(globalvar.ADDON.getSetting('arteFull')=='true' and videoTag!='ARTE+7'):
videos.append( [channel, video_id, name, image_url,infoLabels,'play'] )
return videos
|
julfla/plugin.video.freplay
|
resources/lib/channels/arte.py
|
Python
|
gpl-2.0
| 5,259 | 0.034417 |
"""Test cases for the switcher_kis component."""
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Generator
from pytest import raises
from homeassistant.components.switcher_kis import (
CONF_AUTO_OFF,
DATA_DEVICE,
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
SERVICE_SET_AUTO_OFF_SCHEMA,
SIGNAL_SWITCHER_DEVICE_UPDATE,
)
from homeassistant.const import CONF_ENTITY_ID
from homeassistant.core import Context, callback
from homeassistant.exceptions import Unauthorized, UnknownUser
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_setup_component
from homeassistant.util import dt
from .consts import (
DUMMY_AUTO_OFF_SET,
DUMMY_DEVICE_ID,
DUMMY_DEVICE_NAME,
DUMMY_DEVICE_STATE,
DUMMY_ELECTRIC_CURRENT,
DUMMY_IP_ADDRESS,
DUMMY_MAC_ADDRESS,
DUMMY_PHONE_ID,
DUMMY_POWER_CONSUMPTION,
DUMMY_REMAINING_TIME,
MANDATORY_CONFIGURATION,
SWITCH_ENTITY_ID,
)
from tests.common import async_fire_time_changed, async_mock_service
if TYPE_CHECKING:
from aioswitcher.devices import SwitcherV2Device
from tests.common import MockUser
async def test_failed_config(
hass: HomeAssistantType, mock_failed_bridge: Generator[None, Any, None]
) -> None:
"""Test failed configuration."""
assert await async_setup_component(hass, DOMAIN, MANDATORY_CONFIGURATION) is False
async def test_minimal_config(
hass: HomeAssistantType, mock_bridge: Generator[None, Any, None]
) -> None:
"""Test setup with configuration minimal entries."""
assert await async_setup_component(hass, DOMAIN, MANDATORY_CONFIGURATION)
async def test_discovery_data_bucket(
hass: HomeAssistantType, mock_bridge: Generator[None, Any, None]
) -> None:
"""Test the event send with the updated device."""
assert await async_setup_component(hass, DOMAIN, MANDATORY_CONFIGURATION)
await hass.async_block_till_done()
device = hass.data[DOMAIN].get(DATA_DEVICE)
assert device.device_id == DUMMY_DEVICE_ID
assert device.ip_addr == DUMMY_IP_ADDRESS
assert device.mac_addr == DUMMY_MAC_ADDRESS
assert device.name == DUMMY_DEVICE_NAME
assert device.state == DUMMY_DEVICE_STATE
assert device.remaining_time == DUMMY_REMAINING_TIME
assert device.auto_off_set == DUMMY_AUTO_OFF_SET
assert device.power_consumption == DUMMY_POWER_CONSUMPTION
assert device.electric_current == DUMMY_ELECTRIC_CURRENT
assert device.phone_id == DUMMY_PHONE_ID
async def test_set_auto_off_service(
hass: HomeAssistantType,
mock_bridge: Generator[None, Any, None],
mock_api: Generator[None, Any, None],
hass_owner_user: "MockUser",
hass_read_only_user: "MockUser",
) -> None:
"""Test the set_auto_off service."""
assert await async_setup_component(hass, DOMAIN, MANDATORY_CONFIGURATION)
await hass.async_block_till_done()
assert hass.services.has_service(DOMAIN, SERVICE_SET_AUTO_OFF_NAME)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
{CONF_ENTITY_ID: SWITCH_ENTITY_ID, CONF_AUTO_OFF: DUMMY_AUTO_OFF_SET},
blocking=True,
context=Context(user_id=hass_owner_user.id),
)
with raises(Unauthorized) as unauthorized_read_only_exc:
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
{CONF_ENTITY_ID: SWITCH_ENTITY_ID, CONF_AUTO_OFF: DUMMY_AUTO_OFF_SET},
blocking=True,
context=Context(user_id=hass_read_only_user.id),
)
assert unauthorized_read_only_exc.type is Unauthorized
with raises(Unauthorized) as unauthorized_wrong_entity_exc:
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
{
CONF_ENTITY_ID: "light.not_related_entity",
CONF_AUTO_OFF: DUMMY_AUTO_OFF_SET,
},
blocking=True,
context=Context(user_id=hass_owner_user.id),
)
assert unauthorized_wrong_entity_exc.type is Unauthorized
with raises(UnknownUser) as unknown_user_exc:
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
{CONF_ENTITY_ID: SWITCH_ENTITY_ID, CONF_AUTO_OFF: DUMMY_AUTO_OFF_SET},
blocking=True,
context=Context(user_id="not_real_user"),
)
assert unknown_user_exc.type is UnknownUser
service_calls = async_mock_service(
hass, DOMAIN, SERVICE_SET_AUTO_OFF_NAME, SERVICE_SET_AUTO_OFF_SCHEMA
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
{CONF_ENTITY_ID: SWITCH_ENTITY_ID, CONF_AUTO_OFF: DUMMY_AUTO_OFF_SET},
)
await hass.async_block_till_done()
assert len(service_calls) == 1
assert str(service_calls[0].data[CONF_AUTO_OFF]) == DUMMY_AUTO_OFF_SET.lstrip("0")
async def test_signal_dispatcher(
hass: HomeAssistantType, mock_bridge: Generator[None, Any, None]
) -> None:
"""Test signal dispatcher dispatching device updates every 4 seconds."""
assert await async_setup_component(hass, DOMAIN, MANDATORY_CONFIGURATION)
await hass.async_block_till_done()
@callback
def verify_update_data(device: "SwitcherV2Device") -> None:
"""Use as callback for signal dispatcher."""
pass
async_dispatcher_connect(hass, SIGNAL_SWITCHER_DEVICE_UPDATE, verify_update_data)
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=5))
|
tchellomello/home-assistant
|
tests/components/switcher_kis/test_init.py
|
Python
|
apache-2.0
| 5,602 | 0.000893 |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='bloom',
version='0.4.4',
packages=find_packages(exclude=['test']),
package_data={
'bloom.generators.debian': [
'bloom/generators/debian/templates/*',
'bloom/generators/debian/templates/source/*'
]
},
include_package_data=True,
install_requires=[
'argparse',
'catkin-pkg >= 0.1.14',
'distribute',
'empy',
'python-dateutil',
'PyYAML',
'rosdep >= 0.10.3',
'rosdistro >= 0.2.12',
'vcstools >= 0.1.22',
],
author='Tully Foote, William Woodall',
author_email='tfoote@willowgarage.com, william@osrfoundation.org',
maintainer='William Woodall',
maintainer_email='william@osrfoundation.org',
url='http://www.ros.org/wiki/bloom',
download_url='http://pr.willowgarage.com/downloads/bloom/',
keywords=['ROS'],
classifiers=['Programming Language :: Python',
'License :: OSI Approved :: BSD License'],
description="Bloom is a release automation tool.",
long_description="""\
Bloom provides tools for releasing software on top of a git repository \
and leverages tools and patterns from git-buildpackage. Additionally, \
bloom leverages meta and build information from catkin \
(https://github.com/ros/catkin) to automate release branching and the \
generation of platform specific source packages, like debian's src-debs.""",
license='BSD',
test_suite='test',
entry_points={
'console_scripts': [
'git-bloom-config = bloom.commands.git.config:main',
'git-bloom-import-upstream = bloom.commands.git.import_upstream:main',
'git-bloom-branch = bloom.commands.git.branch:main',
'git-bloom-patch = bloom.commands.git.patch.patch_main:main',
'git-bloom-generate = bloom.commands.git.generate:main',
'git-bloom-release = bloom.commands.git.release:main',
'bloom-export-upstream = bloom.commands.export_upstream:main',
'bloom-update = bloom.commands.update:main',
'bloom-release = bloom.commands.release:main',
'bloom-generate = bloom.commands.generate:main'
],
'bloom.generators': [
'release = bloom.generators.release:ReleaseGenerator',
'rosrelease = bloom.generators.rosrelease:RosReleaseGenerator',
'debian = bloom.generators.debian:DebianGenerator',
'rosdebian = bloom.generators.rosdebian:RosDebianGenerator'
],
'bloom.generate_cmds': [
'debian = bloom.generators.debian.generate_cmd:description',
'rosdebian = bloom.generators.rosdebian:description'
]
}
)
|
130s/bloom
|
setup.py
|
Python
|
bsd-3-clause
| 2,778 | 0.00036 |
#!/usr/bin/env python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO: remove this script when GYP has for loops
from __future__ import print_function
import sys
import optparse
def main(argv):
parser = optparse.OptionParser()
usage = 'usage: %s [options ...] format_string locale_list'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-d', dest='dash_to_underscore', action="store_true",
default=False,
help='map "en-US" to "en" and "-" to "_" in locales')
(options, arglist) = parser.parse_args(argv)
if len(arglist) < 3:
print('ERROR: need string and list of locales')
return 1
str_template = arglist[1]
locales = arglist[2:]
results = []
for locale in locales:
# For Cocoa to find the locale at runtime, it needs to use '_' instead
# of '-' (http://crbug.com/20441). Also, 'en-US' should be represented
# simply as 'en' (http://crbug.com/19165, http://crbug.com/25578).
if options.dash_to_underscore:
if locale == 'en-US':
locale = 'en'
locale = locale.replace('-', '_')
results.append(str_template.replace('ZZLOCALE', locale))
# Quote each element so filename spaces don't mess up GYP's attempt to parse
# it into a list.
print(' '.join(["'%s'" % x for x in results]))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
endlessm/chromium-browser
|
build/apply_locales.py
|
Python
|
bsd-3-clause
| 1,497 | 0.011356 |
###############################################################################
#
# Copyright 2010 Locomatix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
__all__ = ['create_feed', 'delete_feed', 'list_feeds', \
'create_object','delete_object', 'delete_all_objects', \
'list_objects', 'query_objects', \
'update_attributes','get_attributes', 'update_location','get_location', \
'search_nearby', 'query_search_nearby', \
'search_region', 'query_search_region', \
'create_zone', 'activate_zone', 'get_zone', 'deactivate_zone', \
'delete_zone', 'delete_all_zones', 'list_zones', \
'create_fence', 'activate_fence','get_fence','deactivate_fence', \
'delete_fence', 'delete_all_fences', 'list_fences' \
'get_location_history', 'query_location_history', \
'get_space_activity', 'query_space_activity', \
'get_histogram', 'query_histogram'
]
from create_feed import create_feed
from delete_feed import delete_feed
from list_feeds import list_feeds
from create_object import create_object
from delete_object import delete_object
from delete_all_objects import delete_all_objects
from list_objects import list_objects
from query_objects import query_objects
from update_attributes import update_attributes
from get_attributes import get_attributes
from update_location import update_location
from get_location import get_location
from create_zone import create_zone
from activate_zone import activate_zone
from get_zone import get_zone
from delete_zone import delete_zone
from delete_all_zones import delete_all_zones
from deactivate_zone import deactivate_zone
from list_zones import list_zones
from create_fence import create_fence
from activate_fence import activate_fence
from get_fence import get_fence
from deactivate_fence import deactivate_fence
from delete_fence import delete_fence
from delete_all_fences import delete_all_fences
from list_fences import list_fences
from search_region import search_region
from query_search_region import query_search_region
from search_nearby import search_nearby
from query_search_nearby import query_search_nearby
from get_location_history import get_location_history
from query_location_history import query_location_history
from get_space_activity import get_space_activity
from query_space_activity import query_space_activity
from get_histogram import get_histogram
from query_histogram import query_histogram
|
locomatix/locomatix-python
|
locomatix/cli/__init__.py
|
Python
|
apache-2.0
| 3,074 | 0.006181 |
from cStringIO import StringIO
import os
import tarfile
import unittest
import tempfile
import shutil
import errno
import mock
from pulp.devel.unit.util import touch
from pulp.plugins.conduits.repo_publish import RepoPublishConduit
from pulp.plugins.config import PluginCallConfiguration
from pulp.plugins.model import Repository, AssociatedUnit, PublishReport
from pulp_puppet.common import constants
from pulp_puppet.plugins.distributors import installdistributor
class TestEntryPoint(unittest.TestCase):
def test_everything(self):
"""everything isn't much"""
plugin_class, config = installdistributor.entry_point()
self.assertTrue(plugin_class is installdistributor.PuppetModuleInstallDistributor)
# there is never a global config for this distributor
self.assertEqual(config, {})
class TestValidateConfig(unittest.TestCase):
def setUp(self):
self.distributor = installdistributor.PuppetModuleInstallDistributor()
self.repo = Repository('repo1', '', {})
def test_not_present(self):
config = PluginCallConfiguration({}, {})
result, message = self.distributor.validate_config(self.repo, config, [])
self.assertTrue(result)
def test_relative_path(self):
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: 'a/b/c'})
result, message = self.distributor.validate_config(self.repo, config, [])
self.assertFalse(result)
self.assertTrue(len(message) > 0)
def test_with_permission(self):
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: '/tmp'})
result, message = self.distributor.validate_config(self.repo, config, [])
self.assertTrue(result)
class TestPublishRepo(unittest.TestCase):
def setUp(self):
self.distributor = installdistributor.PuppetModuleInstallDistributor()
self.working_directory = tempfile.mkdtemp()
self.puppet_dir = os.path.join(self.working_directory, 'puppet')
os.makedirs(self.puppet_dir)
self.repo = Repository('repo1', '', {})
self.conduit = RepoPublishConduit('repo1', self.distributor.metadata()['id'])
self.uk1 = {'author': 'puppetlabs', 'name': 'stdlib', 'version': '1.2.0'}
self.uk2 = {'author': 'puppetlabs', 'name': 'java', 'version': '1.3.1'}
self.units = [
AssociatedUnit(constants.TYPE_PUPPET_MODULE, self.uk1, {}, '/a/b/x', '', '', '', ''),
AssociatedUnit(constants.TYPE_PUPPET_MODULE, self.uk2, {}, '/a/b/y', '', '', '', ''),
]
self.conduit.get_units = mock.MagicMock(return_value=self.units, spec_set=self.conduit.get_units)
def tearDown(self):
shutil.rmtree(self.working_directory)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_move_to_destination_directory',
return_value=None)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_rename_directory',
return_value=None)
@mock.patch('tarfile.open', autospec=True)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_clear_destination_directory',
return_value=None)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_create_temporary_destination_directory',
return_value=None)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_check_for_unsafe_archive_paths',
return_value=None)
def test_workflow(self, mock_check_paths, mock_mkdir, mock_clear, mock_open, mock_rename, mock_move):
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: self.puppet_dir})
mock_open.return_value.getnames.return_value = ['a/b', 'a/c']
report = self.distributor.publish_repo(self.repo, self.conduit, config)
self.assertTrue(isinstance(report, PublishReport))
self.assertTrue(report.success_flag)
self.assertEqual(len(report.details['errors']), 0)
self.assertEqual(len(report.details['success_unit_keys']), 2)
self.assertTrue(self.uk1 in report.details['success_unit_keys'])
self.assertTrue(self.uk2 in report.details['success_unit_keys'])
self.assertEqual(mock_open.call_count, 2)
mock_open.assert_any_call(self.units[0].storage_path)
mock_open.assert_any_call(self.units[1].storage_path)
self.assertEqual(mock_rename.call_count, 2)
mock_mkdir.assert_called_once_with(self.puppet_dir)
mock_clear.assert_called_once_with(self.puppet_dir)
mock_check_paths.assert_called_once_with(self.units, self.puppet_dir)
self.assertEqual(mock_move.call_count, 1)
def test_no_destination(self):
"""this one should fail very early since the destination is missing"""
config = PluginCallConfiguration({}, {})
report = self.distributor.publish_repo(self.repo, self.conduit, config)
self.assertFalse(report.success_flag)
self.assertTrue(isinstance(report.summary, basestring))
self.assertEqual(len(report.details['errors']), 0)
self.assertEqual(len(report.details['success_unit_keys']), 0)
def test_duplicate_unit_names(self):
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: self.puppet_dir})
uk3 = {'author': 'puppetlabs', 'name': 'stdlib', 'version': '1.3.1'}
unit3 = AssociatedUnit(constants.TYPE_PUPPET_MODULE, uk3, {}, '/a/b/z', '', '', '', '')
self.units.append(unit3)
report = self.distributor.publish_repo(self.repo, self.conduit, config)
self.assertFalse(report.success_flag)
self.assertTrue(isinstance(report.summary, basestring))
self.assertEqual(len(report.details['errors']), 2)
self.assertTrue(report.summary.find('duplicate') >= 0)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_check_for_unsafe_archive_paths',
return_value=None)
def test_unsafe_paths(self, mock_check):
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: self.puppet_dir})
mock_check.side_effect = self._add_error
report = self.distributor.publish_repo(self.repo, self.conduit, config)
self.assertFalse(report.success_flag)
self.assertTrue(isinstance(report.summary, basestring))
self.assertTrue(len(report.summary) > 0)
self.assertEqual(len(report.details['errors']), 1)
self.assertEqual(len(report.details['success_unit_keys']), 0)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_check_for_unsafe_archive_paths',
return_value=None)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_clear_destination_directory',
side_effect=OSError)
def test_cannot_remove_destination(self, mock_clear, mock_check):
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: self.puppet_dir})
report = self.distributor.publish_repo(self.repo, self.conduit, config)
self.assertFalse(report.success_flag)
self.assertTrue(isinstance(report.summary, basestring))
self.assertEqual(len(report.details['errors']), 2)
self.assertEqual(len(report.details['success_unit_keys']), 0)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_check_for_unsafe_archive_paths',
return_value=None)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_clear_destination_directory',
return_value=None)
def test_cannot_open_tarballs(self, mock_clear, mock_check):
"""
This is easy to simulate, because we can let the real tarfile module try
to open the fake paths.
"""
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: self.puppet_dir})
report = self.distributor.publish_repo(self.repo, self.conduit, config)
self.assertFalse(report.success_flag)
self.assertTrue(isinstance(report.summary, basestring))
self.assertEqual(len(report.details['errors']), 2)
self.assertTrue(report.details['errors'][0][0] in [self.uk1, self.uk2])
self.assertTrue(report.details['errors'][1][0] in [self.uk1, self.uk2])
self.assertEqual(len(report.details['success_unit_keys']), 0)
@mock.patch('tarfile.open', autospec=True)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_check_for_unsafe_archive_paths',
return_value=None)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_clear_destination_directory',
return_value=None)
def test_cannot_extract_tarballs(self, mock_clear, mock_check, mock_open):
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: self.puppet_dir})
mock_open.return_value.extractall.side_effect = OSError
report = self.distributor.publish_repo(self.repo, self.conduit, config)
self.assertFalse(report.success_flag)
self.assertTrue(isinstance(report.summary, basestring))
self.assertEqual(len(report.details['errors']), 2)
self.assertTrue(report.details['errors'][0][0] in [self.uk1, self.uk2])
self.assertTrue(report.details['errors'][1][0] in [self.uk1, self.uk2])
self.assertEqual(len(report.details['success_unit_keys']), 0)
@mock.patch('shutil.move', side_effect=IOError)
@mock.patch('tarfile.open', autospec=True)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_check_for_unsafe_archive_paths',
return_value=None)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_clear_destination_directory',
return_value=None)
def test_cannot_move(self, mock_clear, mock_check, mock_open, mock_move):
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: self.puppet_dir})
mock_open.return_value.getnames.return_value = ['a/b', 'a/c']
report = self.distributor.publish_repo(self.repo, self.conduit, config)
self.assertFalse(report.success_flag)
self.assertTrue(isinstance(report.summary, basestring))
self.assertEqual(len(report.details['errors']), 2)
self.assertTrue(report.details['errors'][0][0] in [self.uk1, self.uk2])
self.assertTrue(report.details['errors'][1][0] in [self.uk1, self.uk2])
self.assertEqual(len(report.details['success_unit_keys']), 0)
@mock.patch('tarfile.open', autospec=True)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_check_for_unsafe_archive_paths',
return_value=None)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_clear_destination_directory',
return_value=None)
def test_multiple_extraction_dirs(self, mock_clear, mock_check, mock_open):
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: self.puppet_dir})
mock_open.return_value.getnames.return_value = ['a/b', 'c/b']
report = self.distributor.publish_repo(self.repo, self.conduit, config)
self.assertFalse(report.success_flag)
self.assertTrue(isinstance(report.summary, basestring))
self.assertEqual(len(report.details['errors']), 2)
self.assertTrue(report.details['errors'][0][0] in [self.uk1, self.uk2])
self.assertTrue(report.details['errors'][1][0] in [self.uk1, self.uk2])
self.assertEqual(len(report.details['success_unit_keys']), 0)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor,
'_clear_destination_directory',
return_value=None)
def test_no_units(self, mock_clear):
config = PluginCallConfiguration({}, {constants.CONFIG_INSTALL_PATH: self.puppet_dir})
self.conduit.get_units.return_value = []
report = self.distributor.publish_repo(self.repo, self.conduit, config)
self.assertTrue(report.success_flag)
self.assertEqual(len(report.details['errors']), 0)
self.assertEqual(len(report.details['success_unit_keys']), 0)
# we still need to clear the destination
mock_clear.assert_called_once_with(self.puppet_dir)
def _add_error(self, *args, **kwargs):
"""
add an error to the detail report. This gives us a chance to add an error
during a particular step in the workflow.
"""
if not self.distributor.detail_report.report['errors']:
self.distributor.detail_report.error(self.uk1, 'failed')
class TestFindDuplicateNames(unittest.TestCase):
def setUp(self):
self.uk1 = {'author': 'puppetlabs', 'name': 'stdlib', 'version': '1.2.0'}
self.uk2 = {'author': 'puppetlabs', 'name': 'java', 'version': '1.3.1'}
self.uk3 = {'author': 'puppetlabs', 'name': 'stdlib', 'version': '1.3.1'}
self.unit3 = AssociatedUnit(constants.TYPE_PUPPET_MODULE, self.uk3, {}, '/a/b/z', '', '', '', '')
self.units = [
AssociatedUnit(constants.TYPE_PUPPET_MODULE, self.uk1, {}, '/a/b/x', '', '', '', ''),
AssociatedUnit(constants.TYPE_PUPPET_MODULE, self.uk2, {}, '/a/b/y', '', '', '', ''),
]
self.method = installdistributor.PuppetModuleInstallDistributor._find_duplicate_names
def test_no_dups(self):
ret = self.method(self.units)
self.assertEqual(ret, [])
def test_with_dups(self):
self.units.append(self.unit3)
ret = self.method(self.units)
self.assertTrue(self.units[0] in ret)
self.assertTrue(self.units[2] in ret)
class TestMoveToDestinationDirectory(unittest.TestCase):
def setUp(self):
self.working_dir = tempfile.mkdtemp()
self.destination_dir = os.path.join(self.working_dir, 'target')
os.makedirs(self.destination_dir)
self.source_dir = os.path.join(self.working_dir, 'source')
os.makedirs(self.source_dir)
def tearDown(self):
shutil.rmtree(self.working_dir)
def existing_files_saved(self):
existing_file = os.path.join(self.destination_dir, 'foo.txt')
touch(existing_file)
new_dir = os.path.join(self.source_dir, 'bar')
os.makedirs(new_dir)
installdistributor.PuppetModuleInstallDistributor.\
_move_to_destination_directory(self.source_dir, self.destination_dir)
self.assertTrue(os.path.exists(existing_file))
def test_source_dir_removed(self):
installdistributor.PuppetModuleInstallDistributor.\
_move_to_destination_directory(self.source_dir, self.destination_dir)
self.assertFalse(os.path.exists(self.source_dir))
def test_move_dirs(self):
new_dir = os.path.join(self.source_dir, 'bar')
os.makedirs(new_dir)
installdistributor.PuppetModuleInstallDistributor.\
_move_to_destination_directory(self.source_dir, self.destination_dir)
self.assertTrue(os.path.exists(os.path.join(self.destination_dir, 'bar')))
class TestRenameDirectory(unittest.TestCase):
def setUp(self):
uk = {'author': 'puppetlabs', 'name': 'stdlib', 'version': '1.2.0'}
self.unit = AssociatedUnit(constants.TYPE_PUPPET_MODULE, uk, {}, '/a/b/x', '', '', '', '')
self.method = installdistributor.PuppetModuleInstallDistributor._rename_directory
@mock.patch('shutil.move', autospec=True)
def test_trailing_slash(self, mock_move):
self.method(self.unit, '/tmp/', ['a/b', 'a/c'])
mock_move.assert_called_once_with('/tmp/a', '/tmp/stdlib')
@mock.patch('shutil.move', autospec=True)
def test_no_trailing_slash(self, mock_move):
self.method(self.unit, '/tmp', ['a/b', 'a/c'])
mock_move.assert_called_once_with('/tmp/a', '/tmp/stdlib')
@mock.patch('shutil.move', autospec=True)
def test_too_many_dirs(self, mock_move):
self.assertRaises(ValueError, self.method, self.unit, '/tmp', ['a/b', 'c/b'])
@mock.patch('shutil.move', autospec=True)
def test_no_dirs(self, mock_move):
self.assertRaises(ValueError, self.method, self.unit, '/tmp', [])
@mock.patch('shutil.move', autospec=True)
def test_absolute_paths(self, mock_move):
self.method(self.unit, '/tmp', ['/tmp/a/b', '/tmp/a/c'])
mock_move.assert_called_once_with('/tmp/a', '/tmp/stdlib')
@mock.patch('shutil.move', autospec=True)
def test_empty_dir(self, mock_move):
"""weird scenario, but you never know..."""
self.method(self.unit, '/tmp', ['a'])
mock_move.assert_called_once_with('/tmp/a', '/tmp/stdlib')
class TestCheckForUnsafeArchivePaths(unittest.TestCase):
def setUp(self):
self.distributor = installdistributor.PuppetModuleInstallDistributor()
self.uk1 = {'author': 'puppetlabs', 'name': 'stdlib', 'version': '1.2.0'}
self.uk2 = {'author': 'puppetlabs', 'name': 'stdlib', 'version': '1.2.1'}
self.units = [
AssociatedUnit(constants.TYPE_PUPPET_MODULE, self.uk1, {}, '/a/b/x', '', '', '', ''),
AssociatedUnit(constants.TYPE_PUPPET_MODULE, self.uk2, {}, '/a/b/y', '', '', '', ''),
]
def test_does_not_exist(self):
self.distributor._check_for_unsafe_archive_paths(self.units, '/foo/bar')
self.assertEqual(len(self.distributor.detail_report.report['errors']), 2)
self.assertTrue(self.distributor.detail_report.report['errors'][0][0] in [self.uk1, self.uk2])
self.assertTrue(isinstance(self.distributor.detail_report.report['errors'][0][1], basestring))
self.assertTrue(self.distributor.detail_report.report['errors'][1][0] in [self.uk1, self.uk2])
self.assertTrue(isinstance(self.distributor.detail_report.report['errors'][1][1], basestring))
@mock.patch('tarfile.open', autospec=True)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor, '_archive_paths_are_safe')
def test_safe(self, mock_archive_paths_are_safe, mock_open):
mock_archive_paths_are_safe.return_value = True
self.distributor._check_for_unsafe_archive_paths(self.units, '/foo/bar')
mock_archive_paths_are_safe.assert_any_call('/foo/bar', mock_open.return_value)
self.assertEqual(mock_archive_paths_are_safe.call_count, 2)
self.assertEqual(len(self.distributor.detail_report.report['errors']), 0)
@mock.patch('tarfile.open', autospec=True)
@mock.patch.object(installdistributor.PuppetModuleInstallDistributor, '_archive_paths_are_safe')
def test_unsafe(self, mock_archive_paths_are_safe, mock_open):
mock_archive_paths_are_safe.return_value = False
self.distributor._check_for_unsafe_archive_paths(self.units, '/foo/bar')
mock_archive_paths_are_safe.assert_any_call('/foo/bar', mock_open.return_value)
self.assertEqual(mock_archive_paths_are_safe.call_count, 2)
self.assertEqual(len(self.distributor.detail_report.report['errors']), 2)
self.assertEqual(mock_open.call_count, 2)
mock_open.assert_any_call('/a/b/x')
mock_open.assert_any_call('/a/b/y')
class TestArchivePathsAreSafe(unittest.TestCase):
def setUp(self):
self.tarball = tarfile.TarFile(fileobj=StringIO(), mode='w')
self.tarball.getnames = mock.MagicMock(spec_set=self.tarball.getnames)
def test_safe_names(self):
self.tarball.getnames.return_value = [
'a/b/c',
'd/e/f',
'g/h/../i',
'/foo/a/b/', # this is a terrible thing to have in a tarball, but just in case...
]
ret = installdistributor.PuppetModuleInstallDistributor._archive_paths_are_safe(
'/foo', self.tarball)
self.assertTrue(ret)
def test_unsafe_relative_name(self):
self.tarball.getnames.return_value = [
'a/b/c',
'd/e/f',
'../i',
]
ret = installdistributor.PuppetModuleInstallDistributor._archive_paths_are_safe(
'/foo', self.tarball)
self.assertFalse(ret)
def test_unsafe_absolute_name(self):
"""
I'm not actually sure if this is possible with a tarball
"""
self.tarball.getnames.return_value = [
'a/b/c',
'd/e/f',
'/i',
]
ret = installdistributor.PuppetModuleInstallDistributor._archive_paths_are_safe(
'/foo', self.tarball)
self.assertFalse(ret)
class TestClearDestinationDirectory(unittest.TestCase):
def setUp(self):
self.distributor = installdistributor.PuppetModuleInstallDistributor()
@mock.patch('shutil.rmtree', autospec=True)
def test_real_dir(self, mock_rmtree):
destination = os.path.dirname(os.path.dirname(__file__))
self.distributor._clear_destination_directory(destination)
# makes sure it only tries to remove the directories, and not any of the
# regular files that appear within "destination"
self.assertEqual(mock_rmtree.call_count, 3)
mock_rmtree.assert_any_call(os.path.join(destination, 'data'))
mock_rmtree.assert_any_call(os.path.join(destination, 'integration'))
mock_rmtree.assert_any_call(os.path.join(destination, 'unit'))
class TestCreateTemporaryDestinationDirectory(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_no_dir(self):
destination = os.path.join(self.tmp_dir, 'puppet')
distributor = installdistributor.PuppetModuleInstallDistributor()
destination_dir = distributor._create_temporary_destination_directory(destination)
self.assertTrue(os.path.isdir(destination_dir))
def test_dir_already_exists(self):
destination = os.path.join(self.tmp_dir, 'puppet')
os.makedirs(destination)
distributor = installdistributor.PuppetModuleInstallDistributor()
destination_dir = distributor._create_temporary_destination_directory(destination)
self.assertTrue(os.path.isdir(destination_dir))
self.assertTrue(os.path.isdir(destination))
@mock.patch('os.makedirs', side_effect=OSError(errno.EPERM))
def test_dir_permission_denied(self, *unused):
destination = os.path.join(self.tmp_dir, 'puppet')
distributor = installdistributor.PuppetModuleInstallDistributor()
self.assertRaises(OSError, distributor._create_temporary_destination_directory, destination)
class TestDetailReport(unittest.TestCase):
def setUp(self):
self.report = installdistributor.DetailReport()
self.uk1 = {'author': 'puppetlabs', 'name': 'stdlib', 'version': '1.2.0'}
self.uk2 = {'author': 'puppetlabs', 'name': 'stdlib', 'version': '1.2.1'}
def test_success(self):
self.report.success(self.uk1)
self.assertTrue(self.uk1 in self.report.report['success_unit_keys'])
def test_error(self):
self.report.error(self.uk1, 'failed')
self.assertTrue((self.uk1, 'failed') in self.report.report['errors'])
def test_has_errors_true(self):
self.report.error(self.uk1, 'failed')
self.assertTrue(self.report.has_errors)
def test_has_errors_false_success(self):
self.report.success(self.uk1)
self.assertFalse(self.report.has_errors)
def test_has_errors_false_empty(self):
self.report.success(self.uk1)
self.assertFalse(self.report.has_errors)
def test_report_is_dict(self):
self.assertTrue(isinstance(self.report.report, dict))
|
ipanova/pulp_puppet
|
pulp_puppet_plugins/test/unit/test_install_distributor.py
|
Python
|
gpl-2.0
| 24,167 | 0.002814 |
"""Redo the `...` (representation) but with limits on most sizes."""
__all__ = ["Repr","repr"]
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxdict = 4
self.maxstring = 30
self.maxlong = 40
self.maxother = 20
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
s = `x`
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_tuple(self, x, level):
n = len(x)
if n == 0: return '()'
if level <= 0: return '(...)'
s = ''
for i in range(min(n, self.maxtuple)):
if s: s = s + ', '
s = s + self.repr1(x[i], level-1)
if n > self.maxtuple: s = s + ', ...'
elif n == 1: s = s + ','
return '(' + s + ')'
def repr_list(self, x, level):
n = len(x)
if n == 0: return '[]'
if level <= 0: return '[...]'
s = ''
for i in range(min(n, self.maxlist)):
if s: s = s + ', '
s = s + self.repr1(x[i], level-1)
if n > self.maxlist: s = s + ', ...'
return '[' + s + ']'
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
s = ''
keys = x.keys()
keys.sort()
for i in range(min(n, self.maxdict)):
if s: s = s + ', '
key = keys[i]
s = s + self.repr1(key, level-1)
s = s + ': ' + self.repr1(x[key], level-1)
if n > self.maxdict: s = s + ', ...'
return '{' + s + '}'
def repr_str(self, x, level):
s = `x[:self.maxstring]`
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = `x[:i] + x[len(x)-j:]`
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_long(self, x, level):
s = `x` # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = `x`
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except:
return '<' + x.__class__.__name__ + ' instance at ' + \
hex(id(x))[2:] + '>'
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
aRepr = Repr()
repr = aRepr.repr
|
remybaranx/qtaste
|
tools/jython/lib/Lib/repr.py
|
Python
|
gpl-3.0
| 3,151 | 0.009838 |
import urllib,urllib2,re,cookielib,string,os,sys
import xbmc, xbmcgui, xbmcaddon, xbmcplugin
from resources.libs import main
#Mash Up - by Mash2k3 2012.
from t0mm0.common.addon import Addon
from resources.universal import playbackengine, watchhistory
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
addon = Addon('plugin.video.movie25', sys.argv)
art = main.art
wh = watchhistory.WatchHistory('plugin.video.movie25')
pyamfpath = xbmc.translatePath(os.path.join('special://home/addons', 'script.module.pyamf'))
try:
if not os.path.exists(pyamfpath):
url = 'https://github.com/mash2k3/MashUpFixes/raw/master/FIXES/script.module.pyamf.zip'
path = xbmc.translatePath(os.path.join('special://home/addons','packages'))
lib=os.path.join(path, 'script.module.pyamf.zip')
if main.downloadFile(url,lib):
addonfolder = xbmc.translatePath(os.path.join('special://home/addons',''))
xbmc.executebuiltin("XBMC.Extract(%s,%s)"%(lib,addonfolder))
except: pass
def SKYSPORTS():
main.addDir('All Videos','http://www1.skysports.com/watch/more/5/27452/200/1',173,art+'/skysports.png')
main.addDir('Sports','http://www1.skysports.com/watch/tv-shows',178,art+'/skysports.png')
main.addDir('TV Shows','http://www1.skysports.com/watch/tv-shows',175,art+'/skysports.png')
def SKYSPORTSCAT():
main.addDir('Sports [COLOR red]All Videos[/COLOR]','http://www1.skysports.com/watch/more/5/28461/200/1',173,art+'/skysports.png')
main.addDir('Football','football',179,art+'/skysports.png')
main.addDir('Formula 1','formula-1',179,art+'/skysports.png')
main.addDir('Cricket','http://www1.skysports.com//watch/video/sports/cricket',176,art+'/skysports.png')
main.addDir('Rugby Union','rugby-union',179,art+'/skysports.png')
main.addDir('Rugby League','http://www1.skysports.com//watch/video/sports/rugby-league',176,art+'/skysports.png')
main.addDir('Golf','http://www1.skysports.com//watch/video/sports/golf',176,art+'/skysports.png')
main.addDir('Tennis','http://www1.skysports.com//watch/video/sports/tennis',176,art+'/skysports.png')
main.addDir('Boxing','http://www1.skysports.com//watch/video/sports/boxing',176,art+'/skysports.png')
main.addDir('NFL','http://www1.skysports.com//watch/video/sports/nfl',176,art+'/skysports.png')
main.addDir('Racing','http://www1.skysports.com//watch/video/sports/racing',176,art+'/skysports.png')
main.addDir('Darts','http://www1.skysports.com//watch/video/sports/darts',176,art+'/skysports.png')
main.addDir('Basketball','http://www1.skysports.com//watch/video/sports/basketball',176,art+'/skysports.png')
main.addDir('Cycling','http://www1.skysports.com//watch/video/sports/cycling',176,art+'/skysports.png')
main.addDir('Speedway','http://www1.skysports.com//watch/video/sports/speedway',176,art+'/skysports.png')
main.addDir('Ice Hockey','http://www1.skysports.com//watch/video/sports/ice-hockey',176,art+'/skysports.png')
main.addDir('UFC','http://www1.skysports.com//watch/video/sports/ufc',176,art+'/skysports.png')
main.addDir('WWE','http://www1.skysports.com//watch/video/sports/wwe',176,art+'/skysports.png')
def SKYSPORTSCAT2(murl):
if murl=='football':
main.addDir('Football [COLOR red]All Videos[/COLOR]','http://www1.skysports.com/watch/more/5/12606/200/1',173,art+'/skysports.png')
main.addDir('Premier League','premier-league',180,art+'/skysports.png')
main.addDir('Championship','championship',180,art+'/skysports.png')
main.addDir('League One','league-one',180,art+'/skysports.png')
main.addDir('League Two','league-two',180,art+'/skysports.png')
main.addDir('Scottish Football','scottish-football',180,art+'/skysports.png')
main.addDir('Primera Liga','primera-liga',180,art+'/skysports.png')
main.addDir('Champions League','http://www1.skysports.com/watch/video/sports/football/competitions/champions-league',176,art+'/skysports.png')
main.addDir('Capital One Cup','http://www1.skysports.com/watch/video/sports/football/competitions/capital-one-cup',176,art+'/skysports.png')
if murl=='formula-1':
main.addDir('Formula 1 [COLOR red]All Videos[/COLOR]','http://www1.skysports.com/watch/more/5/12870/200/1',173,art+'/skysports.png')
main.addDir('Grand Prix','grand-prix',180,art+'/skysports.png')
main.addDir('Teams','f1Teams',180,art+'/skysports.png')
if murl=='rugby-union':
main.addDir('Rugby Union [COLOR red]All Videos[/COLOR]','http://www1.skysports.com/watch/more/5/12610/200/1',173,art+'/skysports.png')
main.addDir('Aviva Premiership','http://www1.skysports.com/watch/video/sports/rugby-union/competitions/aviva-premiership',176,art+'/skysports.png')
main.addDir('Super Rugby','http://www1.skysports.com/watch/video/sports/rugby-union/competitions/super-rugby',176,art+'/skysports.png')
main.addDir('Heineken Cup','http://www1.skysports.com/watch/video/sports/rugby-union/competitions/heineken-cup',176,art+'/skysports.png')
def SKYSPORTSTEAMS(murl):
if murl=='premier-league':
main.addDir('Premier League [COLOR red]All Videos[/COLOR]','http://www1.skysports.com/watch/more/5/16426/100/1',173,art+'/skysports.png')
main.addDir('Arsenal','http://www1.skysports.com/watch/video/sports/football/teams/arsenal',176,art+'/skysports.png')
main.addDir('Aston Villa','http://www1.skysports.com/watch/video/sports/football/teams/aston-villa',176,art+'/skysports.png')
main.addDir('Chelsea','http://www1.skysports.com/watch/video/sports/football/teams/chelsea',176,art+'/skysports.png')
main.addDir('Everton','http://www1.skysports.com/watch/video/sports/football/teams/everton',176,art+'/skysports.png')
main.addDir('Fulham','http://www1.skysports.com/watch/video/sports/football/teams/fulham',176,art+'/skysports.png')
main.addDir('Liverpool','http://www1.skysports.com/watch/video/sports/football/teams/liverpool',176,art+'/skysports.png')
main.addDir('Manchester City','http://www1.skysports.com/watch/video/sports/football/teams/manchester-city',176,art+'/skysports.png')
main.addDir('Manchester United','http://www1.skysports.com/watch/video/sports/football/teams/manchester-united',176,art+'/skysports.png')
main.addDir('Newcastle United','http://www1.skysports.com/watch/video/sports/football/teams/newcastle-united',176,art+'/skysports.png')
main.addDir('Norwich City','http://www1.skysports.com/watch/video/sports/football/teams/norwich-city',176,art+'/skysports.png')
main.addDir('Queens Park Rangers','http://www1.skysports.com/watch/video/sports/football/teams/queens-park-rangers',176,art+'/skysports.png')
main.addDir('Reading','http://www1.skysports.com/watch/video/sports/football/teams/reading',176,art+'/skysports.png')
main.addDir('Southampton','http://www1.skysports.com/watch/video/sports/football/teams/southampton',176,art+'/skysports.png')
main.addDir('Stoke City','http://www1.skysports.com/watch/video/sports/football/teams/stoke-city',176,art+'/skysports.png')
main.addDir('Sunderland','http://www1.skysports.com/watch/video/sports/football/teams/sunderland',176,art+'/skysports.png')
main.addDir('Swansea City','http://www1.skysports.com/watch/video/sports/football/teams/swansea-city',176,art+'/skysports.png')
main.addDir('Tottenham Hotspur','http://www1.skysports.com/watch/video/sports/football/teams/tottenham-hotspur',176,art+'/skysports.png')
main.addDir('West Bromwich Albion','http://www1.skysports.com/watch/video/sports/football/teams/west-bromwich-albion',176,art+'/skysports.png')
main.addDir('West Ham United','http://www1.skysports.com/watch/video/sports/football/teams/west-ham-united',176,art+'/skysports.png')
main.addDir('Wigan Athletic','http://www1.skysports.com/watch/video/sports/football/teams/wigan-athletic',176,art+'/skysports.png')
if murl=='championship':
main.addDir('Championship [COLOR red]All Videos[/COLOR]','http://www1.skysports.com/watch/more/5/16428/100/1',173,art+'/skysports.png')
main.addDir('Barnsley','http://www1.skysports.com/watch/video/sports/football/teams/barnsley',176,art+'/skysports.png')
main.addDir('Birmingham City','http://www1.skysports.com/watch/video/sports/football/teams/birmingham-city',176,art+'/skysports.png')
main.addDir('Blackburn Rovers','http://www1.skysports.com/watch/video/sports/football/teams/blackburn-rovers',176,art+'/skysports.png')
main.addDir('Blackpool','http://www1.skysports.com/watch/video/sports/football/teams/blackpool',176,art+'/skysports.png')
main.addDir('Bolton Wanderers','http://www1.skysports.com/watch/video/sports/football/teams/bolton-wanderers',176,art+'/skysports.png')
main.addDir('Brighton','http://www1.skysports.com/watch/video/sports/football/teams/brighton',176,art+'/skysports.png')
main.addDir('Bristol City','http://www1.skysports.com/watch/video/sports/football/teams/bristol-city',176,art+'/skysports.png')
main.addDir('Burnley','http://www1.skysports.com/watch/video/sports/football/teams/burnley',176,art+'/skysports.png')
main.addDir('Cardiff City','http://www1.skysports.com/watch/video/sports/football/teams/cardiff-city',176,art+'/skysports.png')
main.addDir('Charlton Athletic','http://www1.skysports.com/watch/video/sports/football/teams/charlton-athletic',176,art+'/skysports.png')
main.addDir('Crystal Palace','http://www1.skysports.com/watch/video/sports/football/teams/crystal-palace',176,art+'/skysports.png')
main.addDir('Derby County','http://www1.skysports.com/watch/video/sports/football/teams/derby-county',176,art+'/skysports.png')
main.addDir('Huddersfield Town','http://www1.skysports.com/watch/video/sports/football/teams/huddersfield-town',176,art+'/skysports.png')
main.addDir('Hull City','http://www1.skysports.com/watch/video/sports/football/teams/hull-city',176,art+'/skysports.png')
main.addDir('Ipswich Town','http://www1.skysports.com/watch/video/sports/football/teams/ipswich-town',176,art+'/skysports.png')
main.addDir('Leeds United','http://www1.skysports.com/watch/video/sports/football/teams/leeds-united',176,art+'/skysports.png')
main.addDir('Leicester City','http://www1.skysports.com/watch/video/sports/football/teams/leicester-city',176,art+'/skysports.png')
main.addDir('Middlesbrough','http://www1.skysports.com/watch/video/sports/football/teams/middlesbrough',176,art+'/skysports.png')
main.addDir('Millwall','http://www1.skysports.com/watch/video/sports/football/teams/millwall',176,art+'/skysports.png')
main.addDir('Nottingham Forest','http://www1.skysports.com/watch/video/sports/football/teams/nottingham-forest',176,art+'/skysports.png')
main.addDir('Peterborough United','http://www1.skysports.com/watch/video/sports/football/teams/peterborough-united',176,art+'/skysports.png')
main.addDir('Sheffield Wednesday','http://www1.skysports.com/watch/video/sports/football/teams/sheffield-wednesday',176,art+'/skysports.png')
main.addDir('Watford','http://www1.skysports.com/watch/video/sports/football/teams/watford',176,art+'/skysports.png')
main.addDir('Wolverhampton','http://www1.skysports.com/watch/video/sports/football/teams/wolverhampton',176,art+'/skysports.png')
if murl=='league-one':
main.addDir('League One [COLOR red]All Videos[/COLOR]','http://www1.skysports.com/watch/more/5/16478/100/1',173,art+'/skysports.png')
main.addDir('Bournemouth','http://www1.skysports.com/watch/video/sports/football/teams/bournemouth',176,art+'/skysports.png')
main.addDir('Brentford','http://www1.skysports.com/watch/video/sports/football/teams/brentford',176,art+'/skysports.png')
main.addDir('Bury','http://www1.skysports.com/watch/video/sports/football/teams/bury',176,art+'/skysports.png')
main.addDir('Carlisle United','http://www1.skysports.com/watch/video/sports/football/teams/carlisle-united',176,art+'/skysports.png')
main.addDir('Colchester United','http://www1.skysports.com/watch/video/sports/football/teams/colchester-united',176,art+'/skysports.png')
main.addDir('Coventry City','http://www1.skysports.com/watch/video/sports/football/teams/coventry-city',176,art+'/skysports.png')
main.addDir('Crawley Town','http://www1.skysports.com/watch/video/sports/football/teams/crawley-town',176,art+'/skysports.png')
main.addDir('Crewe Alexandra','http://www1.skysports.com/watch/video/sports/football/teams/crewe-alexandra',176,art+'/skysports.png')
main.addDir('Doncaster','http://www1.skysports.com/watch/video/sports/football/teams/doncaster',176,art+'/skysports.png')
main.addDir('Hartlepool United','http://www1.skysports.com/watch/video/sports/football/teams/hartlepool-united',176,art+'/skysports.png')
main.addDir('Leyton Orient','http://www1.skysports.com/watch/video/sports/football/teams/leyton-orient',176,art+'/skysports.png')
main.addDir('Milton Keynes Dons','http://www1.skysports.com/watch/video/sports/football/teams/milton-keynes-dons',176,art+'/skysports.png')
main.addDir('Notts County','http://www1.skysports.com/watch/video/sports/football/teams/notts-county',176,art+'/skysports.png')
main.addDir('Oldham Athletic','http://www1.skysports.com/watch/video/sports/football/teams/oldham-athletic',176,art+'/skysports.png')
main.addDir('Portsmouth','http://www1.skysports.com/watch/video/sports/football/teams/portsmouth',176,art+'/skysports.png')
main.addDir('Preston North End','http://www1.skysports.com/watch/video/sports/football/teams/preston-north-end',176,art+'/skysports.png')
main.addDir('Scunthorpe United','http://www1.skysports.com/watch/video/sports/football/teams/scunthorpe-united',176,art+'/skysports.png')
main.addDir('Sheffield United','http://www1.skysports.com/watch/video/sports/football/teams/sheffield-united',176,art+'/skysports.png')
main.addDir('Shrewsbury Town','http://www1.skysports.com/watch/video/sports/football/teams/shrewsbury-town',176,art+'/skysports.png')
main.addDir('Stevenage','http://www1.skysports.com/watch/video/sports/football/teams/stevenage',176,art+'/skysports.png')
main.addDir('Swindon Town','http://www1.skysports.com/watch/video/sports/football/teams/swindon-town',176,art+'/skysports.png')
main.addDir('Tranmere Rovers','http://www1.skysports.com/watch/video/sports/football/teams/tranmere-rovers',176,art+'/skysports.png')
main.addDir('Walsall','http://www1.skysports.com/watch/video/sports/football/teams/walsall',176,art+'/skysports.png')
main.addDir('Yeovil Town','http://www1.skysports.com/watch/video/sports/football/teams/yeovil-town',176,art+'/skysports.png')
if murl=='league-two':
main.addDir('League Two [COLOR red]All Videos[/COLOR]','http://www1.skysports.com/watch/more/5/16478/100/1',173,art+'/skysports.png')
main.addDir('AFC Wimbledon','http://www1.skysports.com/watch/video/sports/football/teams/afc-wimbledon',176,art+'/skysports.png')
main.addDir('Accrington Stanley','http://www1.skysports.com/watch/video/sports/football/teams/accrington-stanley',176,art+'/skysports.png')
main.addDir('Aldershot','http://www1.skysports.com/watch/video/sports/football/teams/aldershot',176,art+'/skysports.png')
main.addDir('Barnet FC','http://www1.skysports.com/watch/video/sports/football/teams/barnet-fc',176,art+'/skysports.png')
main.addDir('Bradford City','http://www1.skysports.com/watch/video/sports/football/teams/bradford-city',176,art+'/skysports.png')
main.addDir('Bristol Rovers','http://www1.skysports.com/watch/video/sports/football/teams/bristol-rovers',176,art+'/skysports.png')
main.addDir('Burton Albion','http://www1.skysports.com/watch/video/sports/football/teams/burton-albion',176,art+'/skysports.png')
main.addDir('Cheltenham Town','http://www1.skysports.com/watch/video/sports/football/teams/cheltenham-town',176,art+'/skysports.png')
main.addDir('Chesterfield','http://www1.skysports.com/watch/video/sports/football/teams/chesterfield',176,art+'/skysports.png')
main.addDir('Dagenham and Redbridge','http://www1.skysports.com/watch/video/sports/football/teams/dagenham-and-redbridge',176,art+'/skysports.png')
main.addDir('Exeter City','http://www1.skysports.com/watch/video/sports/football/teams/exeter-city',176,art+'/skysports.png')
main.addDir('Fleetwood Town','http://www1.skysports.com/watch/video/sports/football/teams/fleetwood-town',176,art+'/skysports.png')
main.addDir('Gillingham','http://www1.skysports.com/watch/video/sports/football/teams/gillingham',176,art+'/skysports.png')
main.addDir('Hereford','http://www1.skysports.com/watch/video/sports/football/teams/hereford',176,art+'/skysports.png')
main.addDir('Macclesfield Town','http://www1.skysports.com/watch/video/sports/football/teams/macclesfield-town',176,art+'/skysports.png')
main.addDir('Morecambe','http://www1.skysports.com/watch/video/sports/football/teams/morecambe',176,art+'/skysports.png')
main.addDir('Northampton Town','http://www1.skysports.com/watch/video/sports/football/teams/northampton-town',176,art+'/skysports.png')
main.addDir('Oxford Utd','http://www1.skysports.com/watch/video/sports/football/teams/oxford-utd',176,art+'/skysports.png')
main.addDir('Plymouth Argyle','http://www1.skysports.com/watch/video/sports/football/teams/plymouth-argyle',176,art+'/skysports.png')
main.addDir('Port Vale','http://www1.skysports.com/watch/video/sports/football/teams/port-vale',176,art+'/skysports.png')
main.addDir('Rochdale','http://www1.skysports.com/watch/video/sports/football/teams/rochdale',176,art+'/skysports.png')
main.addDir('Rotherham United','http://www1.skysports.com/watch/video/sports/football/teams/rotherham-united',176,art+'/skysports.png')
main.addDir('Southend United','http://www1.skysports.com/watch/video/sports/football/teams/southend-united',176,art+'/skysports.png')
main.addDir('Torquay United','http://www1.skysports.com/watch/video/sports/football/teams/torquay-united',176,art+'/skysports.png')
main.addDir('Wycombe Wanderers','http://www1.skysports.com/watch/video/sports/football/teams/wycombe-wanderers',176,art+'/skysports.png')
main.addDir('York City','http://www1.skysports.com/watch/video/sports/football/teams/york-city',176,art+'/skysports.png')
if murl=='scottish-football':
main.addDir('Scottish Football [COLOR red]All Videos[/COLOR]','http://www1.skysports.com/watch/more/5/16480/100/1',173,art+'/skysports.png')
main.addDir('Aberdeen','http://www1.skysports.com/watch/video/sports/football/teams/aberdeen',176,art+'/skysports.png')
main.addDir('Celtic','http://www1.skysports.com/watch/video/sports/football/teams/celtic',176,art+'/skysports.png')
main.addDir('Rangers','http://www1.skysports.com/watch/video/sports/football/teams/rangers',176,art+'/skysports.png')
if murl=='primera-liga':
main.addDir('Primera Liga [COLOR red]All Videos[/COLOR]','http://www1.skysports.com/watch/more/5/16995/100/1',173,art+'/skysports.png')
main.addDir('Athletic Bilbao','http://www1.skysports.com/watch/video/sports/football/teams/athletic-bilbao',176,art+'/skysports.png')
main.addDir('Atletico Madrid','http://www1.skysports.com/watch/video/sports/football/teams/atletico-madrid',176,art+'/skysports.png')
main.addDir('Barcelona','http://www1.skysports.com/watch/video/sports/football/teams/barcelona',176,art+'/skysports.png')
main.addDir('Celta Vigo','http://www1.skysports.com/watch/video/sports/football/teams/celta-vigo',176,art+'/skysports.png')
main.addDir('Deportivo La Coruna','http://www1.skysports.com/watch/video/sports/football/teams/deportivo-la-coruna',176,art+'/skysports.png')
main.addDir('Espanyol','http://www1.skysports.com/watch/video/sports/football/teams/espanyol',176,art+'/skysports.png')
main.addDir('Getafe','http://www1.skysports.com/watch/video/sports/football/teams/getafe',176,art+'/skysports.png')
main.addDir('Granada','http://www1.skysports.com/watch/video/sports/football/teams/granada',176,art+'/skysports.png')
main.addDir('Levante','http://www1.skysports.com/watch/video/sports/football/teams/levante',176,art+'/skysports.png')
main.addDir('Malaga','http://www1.skysports.com/watch/video/sports/football/teams/malaga',176,art+'/skysports.png')
main.addDir('Osasuna','http://www1.skysports.com/watch/video/sports/football/teams/osasuna',176,art+'/skysports.png')
main.addDir('Racing Santander','http://www1.skysports.com/watch/video/sports/football/teams/racing-santander',176,art+'/skysports.png')
main.addDir('Rayo Vallecano','http://www1.skysports.com/watch/video/sports/football/teams/rayo-vallecano',176,art+'/skysports.png')
main.addDir('Real Betis','http://www1.skysports.com/watch/video/sports/football/teams/real-betis',176,art+'/skysports.png')
main.addDir('Real Madrid','http://www1.skysports.com/watch/video/sports/football/teams/real-madrid',176,art+'/skysports.png')
main.addDir('Real Mallorca','http://www1.skysports.com/watch/video/sports/football/teams/real-mallorca',176,art+'/skysports.png')
main.addDir('Real Sociedad','http://www1.skysports.com/watch/video/sports/football/teams/real-sociedad',176,art+'/skysports.png')
main.addDir('Real Valladolid','http://www1.skysports.com/watch/video/sports/football/teams/real-valladolid',176,art+'/skysports.png')
main.addDir('Real Zaragoza','http://www1.skysports.com/watch/video/sports/football/teams/real-zaragoza',176,art+'/skysports.png')
main.addDir('Sevilla','http://www1.skysports.com/watch/video/sports/football/teams/sevilla',176,art+'/skysports.png')
main.addDir('Sporting Gijon','http://www1.skysports.com/watch/video/sports/football/teams/sporting-gijon',176,art+'/skysports.png')
main.addDir('Tenerife','http://www1.skysports.com/watch/video/sports/football/teams/tenerife',176,art+'/skysports.png')
main.addDir('UD Almeria','http://www1.skysports.com/watch/video/sports/football/teams/ud-almeria',176,art+'/skysports.png')
main.addDir('Valencia','http://www1.skysports.com/watch/video/sports/football/teams/valencia',176,art+'/skysports.png')
main.addDir('Villarreal','http://www1.skysports.com/watch/video/sports/football/teams/villarreal',176,art+'/skysports.png')
main.addDir('Xerez','http://www1.skysports.com/watch/video/sports/football/teams/xerez',176,art+'/skysports.png')
if murl=='grand-prix':
main.addDir('Grand Prix [COLOR red]All Videos[/COLOR]','http://www1.skysports.com/watch/more/5/27438/100/1',173,art+'/skysports.png')
main.addDir('Australia','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/australia',176,art+'/skysports.png')
main.addDir('Malaysia','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/malaysia',176,art+'/skysports.png')
main.addDir('China','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/china',176,art+'/skysports.png')
main.addDir('Bahrain','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/bahrain',176,art+'/skysports.png')
main.addDir('Spain','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/spain',176,art+'/skysports.png')
main.addDir('Monaco','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/monaco',176,art+'/skysports.png')
main.addDir('Canada','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/canada',176,art+'/skysports.png')
main.addDir('Great Britain','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/great-britain',176,art+'/skysports.png')
main.addDir('Germany','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/germany',176,art+'/skysports.png')
main.addDir('Hungary','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/hungary',176,art+'/skysports.png')
main.addDir('Belgium','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/belgium',176,art+'/skysports.png')
main.addDir('Italy','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/italy',176,art+'/skysports.png')
main.addDir('Singapore','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/singapore',176,art+'/skysports.png')
main.addDir('Korea','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/korea',176,art+'/skysports.png')
main.addDir('Japan','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/japan',176,art+'/skysports.png')
main.addDir('India','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/india',176,art+'/skysports.png')
main.addDir('Abu Dhabi','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/abu-dhabi',176,art+'/skysports.png')
main.addDir('United States','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/united-states',176,art+'/skysports.png')
main.addDir('Brazil','http://www1.skysports.com/watch/video/sports/formula-1/grandprix/brazil',176,art+'/skysports.png')
if murl=='f1Teams':
main.addDir('Teams [COLOR red]All Videos[/COLOR]','http://www1.skysports.com/watch/more/5/28292/100/1',173,art+'/skysports.png')
main.addDir('Caterham','http://www1.skysports.com/watch/video/sports/formula-1/teams/caterham',176,art+'/skysports.png')
main.addDir('Ferrari','http://www1.skysports.com/watch/video/sports/formula-1/teams/ferrari',176,art+'/skysports.png')
main.addDir('Force India','http://www1.skysports.com/watch/video/sports/formula-1/teams/force-india',176,art+'/skysports.png')
main.addDir('Lotus','http://www1.skysports.com/watch/video/sports/formula-1/teams/lotus',176,art+'/skysports.png')
main.addDir('Marussia','http://www1.skysports.com/watch/video/sports/formula-1/teams/marussia',176,art+'/skysports.png')
main.addDir('McLaren','http://www1.skysports.com/watch/video/sports/formula-1/teams/mclaren',176,art+'/skysports.png')
main.addDir('Mercedes GP','http://www1.skysports.com/watch/video/sports/formula-1/teams/mercedes-gp',176,art+'/skysports.png')
main.addDir('Red Bull','http://www1.skysports.com/watch/video/sports/formula-1/teams/red-bull',176,art+'/skysports.png')
main.addDir('Sauber','http://www1.skysports.com/watch/video/sports/formula-1/teams/sauber',176,art+'/skysports.png')
main.addDir('Toro Rosso','http://www1.skysports.com/watch/video/sports/formula-1/teams/toro-rosso',176,art+'/skysports.png')
main.addDir('Williams','http://www1.skysports.com/watch/video/sports/formula-1/teams/williams',176,art+'/skysports.png')
def SKYSPORTSTV(murl):
main.GA("SkySportsTV","List")
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match = re.compile('<img src=".+?" data-src="(.+?)" class=".+?"> </a> </div> <div class=".+?"> <a href="(.+?)" class=".+?"> <h4 class=".+?">(.+?)</h4>').findall(link)
for thumb,url, name in match:
thumb=thumb.replace('16-9/#{30}','384x216')
url=url.replace('watch/tv-shows','watch/video/tv-shows').replace('/fantasyFC','/watch/video/tv-shows/fantasyFC')
main.addDir(name,'http://www1.skysports.com'+url,176,thumb)
main.VIEWSB()
def SKYSPORTSList(murl):
main.GA("SkySports","List")
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile('<a href="(.+?)" class=".+?"> <img src=".+?" data-src="(.+?)" class=".+?"> <div class=".+?"><span class=".+?"></span></div> </a> </div> <div class=".+?"> <a href=".+?" class="-a-block"> <h4 class=".+?">(.+?)</h4> <p class=".+?">(.+?)</p> <button class=".+?">(.+?)</button>').findall(link)
for url,thumb,name,date,typ in match:
thumb=thumb.replace('16-9/#{30}','384x216')
if name!='Sky Sports News Report':
if typ=='Watch Now':
main.addPlayMs(name+' [COLOR red]'+date+'[/COLOR]',url,174,thumb,'','','','','')
else:
main.addPlayMs('[COLOR red]'+name+'[/COLOR]'+' '+date,url,177,thumb,'','','','','')
def SKYSPORTSList2(murl):
main.GA("SkySports","List")
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
page=re.compile('data-current-page=".+?" data-pattern="(.+?)">').findall(link)
if len(page)>0:
for durl in page:
durl=durl.replace('{currentPage}','1').replace('/12/','/75/')
link2=main.OPENURL('http://www1.skysports.com'+durl)
link2=link2.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile('<a href="(.+?)" class=".+?"> <img src=".+?" data-src="(.+?)" class=".+?"> <div class=".+?"><span class=".+?"></span></div> </a> </div> <div class=".+?"> <a href=".+?" class="-a-block"> <h4 class=".+?">(.+?)</h4> <p class=".+?">(.+?)</p> <button class=".+?">(.+?)</button>').findall(link2)
for url,thumb,name,date,typ in match:
thumb=thumb.replace('16-9/#{30}','384x216')
if name!='Sky Sports News Report':
if typ=='Watch Now':
main.addPlayMs(name+' [COLOR red]'+date+'[/COLOR]',url,174,thumb,'','','','','')
else:
main.addPlayMs('[COLOR red]'+name+'[/COLOR]'+' '+date,url,177,thumb,'','','','','')
else:
xbmc.executebuiltin("XBMC.Notification(Sorry!,No Video's to list,3000)")
def playBrightCoveStream(bc_videoID):
from pyamf import remoting
import httplib
bc_playerID = 813474149001
bc_publisherID = 165012893
bc_const = "cf760beae3fbdde270b76f2109537e13144e6fbd"
conn = httplib.HTTPConnection("c.brightcove.com")
envelope = remoting.Envelope(amfVersion=3)
envelope.bodies.append(("/1", remoting.Request(target="com.brightcove.player.runtime.PlayerMediaFacade.findMediaById", body=[bc_const, bc_playerID, bc_videoID, bc_publisherID], envelope=envelope)))
conn.request("POST", "/services/messagebroker/amf?playerId=" + str(bc_playerID), str(remoting.encode(envelope).read()), {'content-type': 'application/x-amf'})
response = conn.getresponse().read()
response = remoting.decode(response).bodies[0][1].body
streamUrl = ""
for item in sorted(response['renditions'], key=lambda item: item['encodingRate'], reverse=False):
encRate = item['encodingRate']
if encRate <= selfAddon.getSetting("ss-qua"):
streamUrl = item['defaultURL']
if streamUrl.find("http://") == 0:
return streamUrl+"?videoId="+bc_videoID+"&lineUpId=&pubId="+str(bc_publisherID)+"&playerId="+str(bc_playerID)+"&affiliateId=&v=&fp=&r=&g="
else:
url = streamUrl[0:streamUrl.find("&")]
playpath = streamUrl[streamUrl.find("&")+1:]
return url+' playpath='+playpath
def SKYSPORTSLink(mname,murl):
main.GA("SkySports","Watched")
xbmc.executebuiltin("XBMC.Notification(Please Wait!,Playing Video,1500)")
ok= True
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','')
match=re.compile('data-video-id="([^"]+?)"').findall(link)
stream_url=playBrightCoveStream(match[0])
desc=re.compile('<meta name="description" content="(.+?)"/>').findall(link)
thumb=re.compile("<link rel='image_src' href='(.+?)' />").findall(link)
infoL={ "Title": mname, "Plot": desc[0]}
# play with bookmark
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='', title=mname,season='', episode='', year='',img=thumb[0],infolabels=infoL, watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id='')
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
wh.add_item(mname+' '+'[COLOR green]SkySports[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb[0], fanart='', is_folder=False)
player.KeepAlive()
return ok
|
marduk191/plugin.video.movie25
|
resources/libs/sports/skysports.py
|
Python
|
gpl-3.0
| 35,627 | 0.028012 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import IntegerType, Row, StringType
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext", "UDFRegistration"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession.builder.getOrCreate()
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=None):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is not None, return
defaultValue. If the key is not set and defaultValue is None, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return UDFRegistration(self)
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a python function (including lambda function) as a UDF
so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
:param name: name of the UDF
:param f: python function
:param returnType: a :class:`pyspark.sql.types.DataType` object
:return: a wrapped :class:`UserDefinedFunction`
>>> strlen = sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> sqlContext.sql("SELECT 'foo' AS text").select(strlen("text")).collect()
[Row(stringLengthString(text)=u'3')]
>>> from pyspark.sql.types import IntegerType
>>> _ = sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> _ = sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
"""
return self.sparkSession.catalog.registerFunction(name, f, returnType)
@ignore_unicode_prefix
@since(2.1)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""Register a java UDF so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not specified we would infer it via reflection.
:param name: name of the UDF
:param javaClassName: fully qualified name of java class
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerJavaFunction("javaStringLength",
... "test.org.apache.spark.sql.JavaStringLength", IntegerType())
>>> sqlContext.sql("SELECT javaStringLength('test')").collect()
[Row(UDF(test)=4)]
>>> sqlContext.registerJavaFunction("javaStringLength2",
... "test.org.apache.spark.sql.JavaStringLength")
>>> sqlContext.sql("SELECT javaStringLength2('test')").collect()
[Row(UDF(test)=4)]
"""
jdt = None
if returnType is not None:
jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json())
self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
@ignore_unicode_prefix
@since(2.3)
def registerJavaUDAF(self, name, javaClassName):
"""Register a java UDAF so it can be used in SQL statements.
:param name: name of the UDAF
:param javaClassName: fully qualified name of java class
>>> sqlContext.registerJavaUDAF("javaUDAF",
... "test.org.apache.spark.sql.MyDoubleAvg")
>>> df = sqlContext.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"])
>>> df.registerTempTable("df")
>>> sqlContext.sql("SELECT name, javaUDAF(id) as avg from df group by name").collect()
[Row(name=u'b', avg=102.0), Row(name=u'a', avg=102.0)]
"""
self.sparkSession._jsparkSession.udf().registerJavaUDAF(name, javaClassName)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table or view as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(database=u'', tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
class UDFRegistration(object):
"""Wrapper for user-defined function registration."""
def __init__(self, sqlContext):
self.sqlContext = sqlContext
def register(self, name, f, returnType=StringType()):
return self.sqlContext.registerFunction(name, f, returnType)
def registerJavaFunction(self, name, javaClassName, returnType=None):
self.sqlContext.registerJavaFunction(name, javaClassName, returnType)
def registerJavaUDAF(self, name, javaClassName):
self.sqlContext.registerJavaUDAF(name, javaClassName)
register.__doc__ = SQLContext.registerFunction.__doc__
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
aokolnychyi/spark
|
python/pyspark/sql/context.py
|
Python
|
apache-2.0
| 24,880 | 0.002854 |
# Copyright 2011 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Speedway iptables generator. This is a subclass of Iptables lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = 'watson@google.com (Tony Watson)'
from string import Template
from lib import iptables
class Error(Exception):
pass
class Term(iptables.Term):
"""Generate Iptables policy terms."""
_PLATFORM = 'speedway'
_PREJUMP_FORMAT = None
_POSTJUMP_FORMAT = Template('-A $filter -j $term')
class Speedway(iptables.Iptables):
"""Generates filters and terms from provided policy object."""
_PLATFORM = 'speedway'
_DEFAULT_PROTOCOL = 'all'
SUFFIX = '.ipt'
_RENDER_PREFIX = '*filter'
_RENDER_SUFFIX = 'COMMIT'
_DEFAULTACTION_FORMAT = ':%s %s'
_TERM = Term
|
ryantierney513/capirca
|
lib/speedway.py
|
Python
|
apache-2.0
| 1,410 | 0.00922 |
# Copyright 2013 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from tempest.common import rest_client
from tempest.common import xml_utils
from tempest import config
from tempest import exceptions
CONF = config.CONF
class AggregatesClientXML(rest_client.RestClient):
TYPE = "xml"
def __init__(self, auth_provider):
super(AggregatesClientXML, self).__init__(auth_provider)
self.service = CONF.compute.catalog_type
def _format_aggregate(self, g):
agg = xml_utils.xml_to_json(g)
aggregate = {}
for key, value in agg.items():
if key == 'hosts':
aggregate['hosts'] = []
for k, v in value.items():
aggregate['hosts'].append(v)
elif key == 'availability_zone':
aggregate[key] = None if value == 'None' else value
else:
aggregate[key] = value
return aggregate
def _parse_array(self, node):
return [self._format_aggregate(x) for x in node]
def list_aggregates(self):
"""Get aggregate list."""
resp, body = self.get("os-aggregates")
aggregates = self._parse_array(etree.fromstring(body))
return resp, aggregates
def get_aggregate(self, aggregate_id):
"""Get details of the given aggregate."""
resp, body = self.get("os-aggregates/%s" % str(aggregate_id))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def create_aggregate(self, name, availability_zone=None):
"""Creates a new aggregate."""
if availability_zone is not None:
post_body = xml_utils.Element("aggregate", name=name,
availability_zone=availability_zone)
else:
post_body = xml_utils.Element("aggregate", name=name)
resp, body = self.post('os-aggregates',
str(xml_utils.Document(post_body)))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def update_aggregate(self, aggregate_id, name, availability_zone=None):
"""Update a aggregate."""
if availability_zone is not None:
put_body = xml_utils.Element("aggregate", name=name,
availability_zone=availability_zone)
else:
put_body = xml_utils.Element("aggregate", name=name)
resp, body = self.put('os-aggregates/%s' % str(aggregate_id),
str(xml_utils.Document(put_body)))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def delete_aggregate(self, aggregate_id):
"""Deletes the given aggregate."""
return self.delete("os-aggregates/%s" % str(aggregate_id))
def is_resource_deleted(self, id):
try:
self.get_aggregate(id)
except exceptions.NotFound:
return True
return False
def add_host(self, aggregate_id, host):
"""Adds a host to the given aggregate."""
post_body = xml_utils.Element("add_host", host=host)
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
str(xml_utils.Document(post_body)))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def remove_host(self, aggregate_id, host):
"""Removes a host from the given aggregate."""
post_body = xml_utils.Element("remove_host", host=host)
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
str(xml_utils.Document(post_body)))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
def set_metadata(self, aggregate_id, meta):
"""Replaces the aggregate's existing metadata with new metadata."""
post_body = xml_utils.Element("set_metadata")
metadata = xml_utils.Element("metadata")
post_body.append(metadata)
for k, v in meta.items():
meta = xml_utils.Element(k)
meta.append(xml_utils.Text(v))
metadata.append(meta)
resp, body = self.post('os-aggregates/%s/action' % aggregate_id,
str(xml_utils.Document(post_body)))
aggregate = self._format_aggregate(etree.fromstring(body))
return resp, aggregate
|
queria/my-tempest
|
tempest/services/compute/xml/aggregates_client.py
|
Python
|
apache-2.0
| 5,059 | 0 |
"""luigi target for writing data into an HP Vertica database"""
import logging
import luigi
logger = logging.getLogger('luigi-interface') # pylint: disable-msg=C0103
try:
import vertica_python
except ImportError:
logger.warning("Attempted to load Vertica interface tools without the vertica_python package; will crash if \
Vertica functionality is used.")
class VerticaTarget(luigi.Target):
"""
Target for a resource in HP Vertica
"""
marker_table = 'table_updates'
def __init__(self, host, user, password, schema, table, update_id):
"""
Initializes a VerticaTarget instance.
:param host: Vertica server address. Possibly a host:port string.
:type host: str
:param user: database user.
:type user: str
:param password: password for the specified user.
:type password: str
:param schema: the schema being written to.
:type schema: str
:param table: the table within schema being written to.
:type table: str
:param update_id: an identifier for this data set.
:type update_id: str
"""
if ':' in host:
self.host, self.port = host.split(':')
self.port = int(self.port)
else:
self.host = host
self.port = 5433
self.user = user
self.password = password
self.schema = schema
self.table = table
self.update_id = update_id
# Default to using the schema data is being inserted into as the schema for the marker table.
self.marker_schema = schema
def touch(self, connection=None):
"""
Mark this update as complete.
IMPORTANT, If the marker table doesn't exist,
the connection transaction will be aborted and the connection reset.
Then the marker table will be created.
"""
self.create_marker_table()
if connection is None:
connection = self.connect()
connection.autocommit = True # if connection created here, we commit it here
connection.cursor().execute(
"""INSERT INTO {marker_schema}.{marker_table} (update_id, target_table)
VALUES (%s, %s)""".format(marker_schema=self.marker_schema, marker_table=self.marker_table),
(self.update_id, "{schema}.{table}".format(schema=self.schema, table=self.table))
)
# make sure update is properly marked
assert self.exists(connection)
def exists(self, connection=None): # pylint: disable-msg=W0221
if connection is None:
connection = self.connect()
connection.autocommit = True
cursor = connection.cursor()
try:
cursor.execute("""SELECT 1 FROM {marker_schema}.{marker_table}
WHERE update_id = %s
LIMIT 1""".format(marker_schema=self.marker_schema, marker_table=self.marker_table),
(self.update_id,)
)
row = cursor.fetchone()
except vertica_python.errors.Error as err:
if (type(err) is vertica_python.errors.MissingRelation) or ('Sqlstate: 42V01' in err.args[0]):
# If so, then our query error failed because the table doesn't exist.
row = None
else:
raise
return row is not None
def connect(self, autocommit=False):
"""
Creates a connection to a Vertica database using the supplied credentials.
:param autocommit: whether the connection should automatically commit.
:type autocmommit: bool
"""
# vertica-python 0.5.0 changes the code for connecting to databases to use kwargs instead of a dictionary.
# The 'database' parameter is included for DBAPI reasons and does not actually affect the session.
connection = vertica_python.connect(user=self.user, password=self.password, host=self.host, port=self.port,
database="", autocommit=autocommit)
return connection
def create_marker_table(self):
"""
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
"""
connection = self.connect(autocommit=True)
cursor = connection.cursor()
try:
cursor.execute(
""" CREATE TABLE {marker_schema}.{marker_table} (
id AUTO_INCREMENT,
update_id VARCHAR(4096) NOT NULL,
target_table VARCHAR(128),
inserted TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (update_id, id)
)
""".format(marker_schema=self.marker_schema, marker_table=self.marker_table)
)
except vertica_python.errors.QueryError as err:
if 'Sqlstate: 42710' in err.args[0]: # This Sqlstate will appear if the marker table already exists.
pass
else:
raise
connection.close()
|
sssllliang/edx-analytics-pipeline
|
edx/analytics/tasks/util/vertica_target.py
|
Python
|
agpl-3.0
| 5,187 | 0.003085 |
from django import forms
from django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm
from crispy_forms.bootstrap import FormActions, AppendedText, StrictButton, InlineField
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Button, Field, Hidden, HTML, Div
class MyLoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(MyLoginForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.form_method = 'post'
self.helper.form_action = ''
self.helper.label_class = 'col-lg-3'
self.helper.field_class = 'col-lg-6'
self.helper.layout = Layout(
'username',
Field('password'),
FormActions(Submit('login', 'Login', css_class='btn btn_success')),
)
class MyPasswordChangeForm(PasswordChangeForm):
def __init__(self, *args, **kwargs):
super(MyPasswordChangeForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.form_method = 'post'
self.helper.form_action = ''
self.helper.label_class = 'col-lg-3'
self.helper.field_class = 'col-lg-6'
self.helper.layout = Layout(
'old_password',
'new_password1',
'new_password2',
FormActions(Submit('save', 'Save', css_class='btn btn_success')),
)
|
blacksph3re/alastair
|
alastair_cookie/forms.py
|
Python
|
gpl-2.0
| 1,349 | 0.029652 |
"""
WSGI config for PythonAnywhere test project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.org/
application = DjangoWhiteNoise(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
hjwp/cookiecutter-example-project
|
config/wsgi.py
|
Python
|
mit
| 1,632 | 0 |
#!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = r'''
---
module: nxos_hsrp
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages HSRP configuration on NX-OS switches.
description:
- Manages HSRP configuration on NX-OS switches.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- HSRP feature needs to be enabled first on the system.
- SVIs must exist before using this module.
- Interface must be a L3 port before using this module.
- HSRP cannot be configured on loopback interfaces.
- MD5 authentication is only possible with HSRPv2 while it is ignored if
HSRPv1 is used instead, while it will not raise any error. Here we allow
MD5 authentication only with HSRPv2 in order to enforce better practice.
options:
group:
description:
- HSRP group number.
required: true
interface:
description:
- Full name of interface that is being managed for HSRP.
required: true
version:
description:
- HSRP version.
default: 1
choices: ['1','2']
priority:
description:
- HSRP priority or keyword 'default'.
preempt:
description:
- Enable/Disable preempt.
choices: ['enabled', 'disabled']
vip:
description:
- HSRP virtual IP address or keyword 'default'
auth_string:
description:
- Authentication string. If this needs to be hidden(for md5 type), the string
should be 7 followed by the key string. Otherwise, it can be 0 followed by
key string or just key string (for backward compatibility). For text type,
this should be just be a key string. if this is 'default', authentication
is removed.
auth_type:
description:
- Authentication type.
choices: ['text','md5']
state:
description:
- Specify desired state of the resource.
choices: ['present','absent']
default: 'present'
'''
EXAMPLES = r'''
- name: Ensure HSRP is configured with following params on a SVI
nxos_hsrp:
group: 10
vip: 10.1.1.1
priority: 150
interface: vlan10
preempt: enabled
host: 68.170.147.165
- name: Ensure HSRP is configured with following params on a SVI
with clear text authentication
nxos_hsrp:
group: 10
vip: 10.1.1.1
priority: 150
interface: vlan10
preempt: enabled
host: 68.170.147.165
auth_type: text
auth_string: CISCO
- name: Ensure HSRP is configured with md5 authentication and clear
authentication string
nxos_hsrp:
group: 10
vip: 10.1.1.1
priority: 150
interface: vlan10
preempt: enabled
host: 68.170.147.165
auth_type: md5
auth_string: "0 1234"
- name: Ensure HSRP is configured with md5 authentication and hidden
authentication string
nxos_hsrp:
group: 10
vip: 10.1.1.1
priority: 150
interface: vlan10
preempt: enabled
host: 68.170.147.165
auth_type: md5
auth_string: "7 1234"
- name: Remove HSRP config for given interface, group, and VIP
nxos_hsrp:
group: 10
interface: vlan10
vip: 10.1.1.1
host: 68.170.147.165
state: absent
'''
RETURN = r'''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface vlan10", "hsrp version 2", "hsrp 30", "ip 10.30.1.1"]
'''
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import get_capabilities, nxos_argument_spec
from ansible.module_utils.network.nxos.nxos import get_interface_type
from ansible.module_utils.basic import AnsibleModule
PARAM_TO_DEFAULT_KEYMAP = {
'vip': None,
'priority': '100',
'auth_type': 'text',
'auth_string': 'cisco',
}
def apply_key_map(key_map, table):
new_dict = {}
for key in table:
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0} | json'.format(interface)
interface = {}
mode = 'unknown'
try:
body = run_commands(module, [command])[0]
except IndexError:
return None
if intf_type in ['ethernet', 'portchannel']:
interface_table = body['TABLE_interface']['ROW_interface']
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
elif intf_type == 'svi':
mode = 'layer3'
return mode
def get_hsrp_group(group, interface, module):
command = 'show hsrp group {0} all | json'.format(group)
hsrp = {}
hsrp_key = {
'sh_if_index': 'interface',
'sh_group_num': 'group',
'sh_group_version': 'version',
'sh_cfg_prio': 'priority',
'sh_preempt': 'preempt',
'sh_vip': 'vip',
'sh_authentication_type': 'auth_type',
'sh_keystring_attr': 'auth_enc',
'sh_authentication_data': 'auth_string'
}
try:
body = run_commands(module, [command])[0]
hsrp_table = body['TABLE_grp_detail']['ROW_grp_detail']
if 'unknown enum:' in str(hsrp_table):
hsrp_table = get_hsrp_group_unknown_enum(module, command, hsrp_table)
except (AttributeError, IndexError, TypeError, KeyError):
return {}
if isinstance(hsrp_table, dict):
hsrp_table = [hsrp_table]
for hsrp_group in hsrp_table:
parsed_hsrp = apply_key_map(hsrp_key, hsrp_group)
parsed_hsrp['interface'] = parsed_hsrp['interface'].lower()
if parsed_hsrp['version'] == 'v1':
parsed_hsrp['version'] = '1'
elif parsed_hsrp['version'] == 'v2':
parsed_hsrp['version'] = '2'
if parsed_hsrp['auth_type'] == 'md5':
if parsed_hsrp['auth_enc'] == 'hidden':
parsed_hsrp['auth_enc'] = '7'
else:
parsed_hsrp['auth_enc'] = '0'
if parsed_hsrp['interface'] == interface:
return parsed_hsrp
return hsrp
def get_hsrp_group_unknown_enum(module, command, hsrp_table):
'''Some older NXOS images fail to set the attr values when using structured output and
instead set the values to <unknown enum>. This fallback method is a workaround that
uses an unstructured (text) request to query the device a second time.
'sh_preempt' is currently the only attr affected. Add checks for other attrs as needed.
'''
if 'unknown enum:' in hsrp_table['sh_preempt']:
cmd = {'output': 'text', 'command': command.split('|')[0]}
out = run_commands(module, cmd)[0]
hsrp_table['sh_preempt'] = 'enabled' if ('may preempt' in out) else 'disabled'
return hsrp_table
def get_commands_remove_hsrp(group, interface):
commands = ['interface {0}'.format(interface), 'no hsrp {0}'.format(group)]
return commands
def get_commands_config_hsrp(delta, interface, args, existing):
commands = []
config_args = {
'group': 'hsrp {group}',
'priority': '{priority}',
'preempt': '{preempt}',
'vip': '{vip}'
}
preempt = delta.get('preempt', None)
group = delta.get('group', None)
vip = delta.get('vip', None)
priority = delta.get('priority', None)
if preempt:
if preempt == 'enabled':
delta['preempt'] = 'preempt'
elif preempt == 'disabled':
delta['preempt'] = 'no preempt'
if priority:
if priority == 'default':
if existing and existing.get('priority') != PARAM_TO_DEFAULT_KEYMAP.get('priority'):
delta['priority'] = 'no priority'
else:
del(delta['priority'])
else:
delta['priority'] = 'priority {0}'.format(delta['priority'])
if vip:
if vip == 'default':
if existing and existing.get('vip') != PARAM_TO_DEFAULT_KEYMAP.get('vip'):
delta['vip'] = 'no ip'
else:
del(delta['vip'])
else:
delta['vip'] = 'ip {0}'.format(delta['vip'])
for key in delta:
command = config_args.get(key, 'DNE').format(**delta)
if command and command != 'DNE':
if key == 'group':
commands.insert(0, command)
else:
commands.append(command)
command = None
auth_type = delta.get('auth_type', None)
auth_string = delta.get('auth_string', None)
auth_enc = delta.get('auth_enc', None)
if auth_type or auth_string:
if not auth_type:
auth_type = args['auth_type']
elif not auth_string:
auth_string = args['auth_string']
if auth_string != 'default':
if auth_type == 'md5':
command = 'authentication md5 key-string {0} {1}'.format(auth_enc, auth_string)
commands.append(command)
elif auth_type == 'text':
command = 'authentication text {0}'.format(auth_string)
commands.append(command)
else:
if existing and existing.get('auth_string') != PARAM_TO_DEFAULT_KEYMAP.get('auth_string'):
commands.append('no authentication')
if commands and not group:
commands.insert(0, 'hsrp {0}'.format(args['group']))
version = delta.get('version', None)
if version:
if version == '2':
command = 'hsrp version 2'
elif version == '1':
command = 'hsrp version 1'
commands.insert(0, command)
commands.insert(0, 'interface {0}'.format(interface))
if commands:
if not commands[0].startswith('interface'):
commands.insert(0, 'interface {0}'.format(interface))
return commands
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = run_commands(module, [command], check_rc=False)[0]
if 'invalid' in body.lower():
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError):
return 'DNE'
def validate_config(body, vip, module):
new_body = ''.join(body)
if "invalid ip address" in new_body.lower():
module.fail_json(msg="Invalid VIP. Possible duplicate IP address.",
vip=vip)
def main():
argument_spec = dict(
group=dict(required=True, type='str'),
interface=dict(required=True),
version=dict(choices=['1', '2'], default='1', required=False),
priority=dict(type='str', required=False),
preempt=dict(type='str', choices=['disabled', 'enabled'], required=False),
vip=dict(type='str', required=False),
auth_type=dict(choices=['text', 'md5'], required=False),
auth_string=dict(type='str', required=False),
state=dict(choices=['absent', 'present'], required=False, default='present')
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
results = dict(changed=False, warnings=warnings)
interface = module.params['interface'].lower()
group = module.params['group']
version = module.params['version']
state = module.params['state']
priority = module.params['priority']
preempt = module.params['preempt']
vip = module.params['vip']
auth_type = module.params['auth_type']
auth_full_string = module.params['auth_string']
auth_enc = '0'
auth_string = None
if auth_full_string:
kstr = auth_full_string.split()
if len(kstr) == 2:
auth_enc = kstr[0]
auth_string = kstr[1]
elif len(kstr) == 1:
auth_string = kstr[0]
else:
module.fail_json(msg='Invalid auth_string')
if auth_enc != '0' and auth_enc != '7':
module.fail_json(msg='Invalid auth_string, only 0 or 7 allowed')
device_info = get_capabilities(module)
network_api = device_info.get('network_api', 'nxapi')
intf_type = get_interface_type(interface)
if (intf_type != 'ethernet' and network_api == 'cliconf'):
if is_default(interface, module) == 'DNE':
module.fail_json(msg='That interface does not exist yet. Create '
'it first.', interface=interface)
if intf_type == 'loopback':
module.fail_json(msg="Loopback interfaces don't support HSRP.",
interface=interface)
mode = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
module.fail_json(msg='That interface is a layer2 port.\nMake it '
'a layer 3 port first.', interface=interface)
if auth_type or auth_string:
if not (auth_type and auth_string):
module.fail_json(msg='When using auth parameters, you need BOTH '
'auth_type AND auth_string.')
args = dict(group=group, version=version, priority=priority,
preempt=preempt, vip=vip, auth_type=auth_type,
auth_string=auth_string, auth_enc=auth_enc)
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_hsrp_group(group, interface, module)
# This will enforce better practice with md5 and hsrp version.
if proposed.get('auth_type', None) == 'md5':
if proposed['version'] == '1':
module.fail_json(msg="It's recommended to use HSRP v2 "
"when auth_type=md5")
elif not proposed.get('auth_type', None) and existing:
if (proposed['version'] == '1' and
existing['auth_type'] == 'md5') and state == 'present':
module.fail_json(msg="Existing auth_type is md5. It's recommended "
"to use HSRP v2 when using md5")
commands = []
if state == 'present':
delta = dict(
set(proposed.items()).difference(existing.items()))
if delta:
command = get_commands_config_hsrp(delta, interface, args, existing)
commands.extend(command)
elif state == 'absent':
if existing:
command = get_commands_remove_hsrp(group, interface)
commands.extend(command)
if commands:
if module.check_mode:
module.exit_json(**results)
else:
load_config(module, commands)
# validate IP
if network_api == 'cliconf' and state == 'present':
commands.insert(0, 'config t')
body = run_commands(module, commands)
validate_config(body, vip, module)
results['changed'] = True
if 'configure' in commands:
commands.pop(0)
results['commands'] = commands
module.exit_json(**results)
if __name__ == '__main__':
main()
|
rosmo/ansible
|
lib/ansible/modules/network/nxos/nxos_hsrp.py
|
Python
|
gpl-3.0
| 15,408 | 0.001493 |
from elasticsearch import helpers
from c2corg_api.scripts.migration.batch import Batch
from elasticsearch.helpers import BulkIndexError
import logging
log = logging.getLogger(__name__)
class ElasticBatch(Batch):
"""A batch implementation to do bulk inserts for ElasticSearch.
Example usage:
batch = ElasticBatch(client, 1000)
with batch:
...
batch.add({
'_op_type': 'index',
'_index': index_name,
'_type': SearchDocument._doc_type.name,
'_id': document_id,
'title': 'Abc'
})
"""
def __init__(self, client, batch_size):
super(ElasticBatch, self).__init__(client, batch_size)
self.client = client
self.actions = []
def add(self, action):
self.actions.append(action)
self.flush_or_not()
def should_flush(self):
return len(self.actions) > self.batch_size
def flush(self):
if self.actions:
try:
helpers.bulk(self.client, self.actions)
except BulkIndexError:
# when trying to delete a document that does not exist, an
# error is raised, and other documents are not inserted
log.warning(
'error sending bulk update to ElasticSearch',
exc_info=True)
self.actions = []
|
c2corg/v6_api
|
c2corg_api/scripts/es/es_batch.py
|
Python
|
agpl-3.0
| 1,426 | 0 |
from importlib import import_module
from django.apps import AppConfig as BaseAppConfig
class AppConfig(BaseAppConfig):
name = "gestioneide"
def ready(self):
import_module("gestioneide.receivers")
|
Etxea/gestioneide
|
gestioneide/apps.py
|
Python
|
gpl-3.0
| 217 | 0 |
from timeit import timeit
import pytest
from cfme import test_requirements
from cfme.base.ui import navigate_to
from cfme.services.myservice import MyService
from cfme.tests.test_db_migrate import download_and_migrate_db
from cfme.utils.conf import cfme_data
@pytest.fixture
def appliance_with_performance_db(temp_appliance_extended_db):
app = temp_appliance_extended_db
try:
db_backups = cfme_data['db_backups']
performance_db = db_backups['performance_510']
except KeyError as e:
pytest.skip(f"Couldn't find the performance DB in the cfme_data: {e}")
download_and_migrate_db(app, performance_db.url)
yield app
@test_requirements.service
@pytest.mark.meta(automates=[1688937, 1686433])
def test_services_performance(appliance_with_performance_db):
"""
Polarion:
assignee: jhenner
initialEstimate: 1/4h
casecomponent: Services
Bugzilla:
1688937
1686433
"""
app = appliance_with_performance_db
assert 50000 == app.rest_api.collections.services.count
my_service = MyService(app)
# Timeit seems to accept callable as well as string of Python code on cPython.
assert timeit(lambda: navigate_to(my_service, 'All', use_resetter=False), number=1) < 180
|
nachandr/cfme_tests
|
cfme/tests/services/test_service_performance.py
|
Python
|
gpl-2.0
| 1,275 | 0.001569 |
import pandas as pd
class Status:
Instance = None
@classmethod
def add(cls, message, red = False, verbosity = 1):
cls.get().add_message(message, red, verbosity)
@classmethod
def initialize_status(cls, status_method, verbosity = 1):
# Note: verbosity must be passed (amd not read directly form preferencecs)
# in to avoid circulate reference
status = cls.get()
status.status_method = status_method
status.verbosity = verbosity
@classmethod
def get(cls):
if cls.Instance == None:
cls.Instance = Status()
return cls.Instance
def __init__(self):
self.verbosity = 1
def add_message(self, message, red, verbosity):
if verbosity <= self.verbosity:
if isinstance(message, pd.DataFrame) or isinstance(message, pd.core.frame.DataFrame):
text = str(message.head())
else:
text = str(message)
lines = text.split("\n")
for line in lines:
self.status_method(line, red)
def status_method(self, message, red):
print message
|
clembou/PCWG
|
pcwg/core/status.py
|
Python
|
mit
| 1,242 | 0.017713 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.dispatch import receiver
from assessments.business import scores_encodings_deadline
from base.signals import publisher
@receiver(publisher.compute_scores_encodings_deadlines)
def compute_scores_encodings_deadlines(sender, **kwargs):
scores_encodings_deadline.compute_deadline(kwargs['offer_year_calendar'])
@receiver(publisher.compute_student_score_encoding_deadline)
def compute_student_score_encoding_deadline(sender, **kwargs):
scores_encodings_deadline.compute_deadline_by_student(kwargs['session_exam_deadline'])
@receiver(publisher.compute_all_scores_encodings_deadlines)
def compute_all_scores_encodings_deadlines(sender, **kwargs):
scores_encodings_deadline.recompute_all_deadlines(kwargs['academic_calendar'])
|
uclouvain/osis_louvain
|
assessments/signals/subscribers.py
|
Python
|
agpl-3.0
| 2,029 | 0.001479 |
from server import app
app.run()
|
billyoverton/demerit-manager
|
run_server.py
|
Python
|
gpl-2.0
| 33 | 0 |
"""An event loop.
This event loop should handle both asynchronous App Engine RPC objects
(specifically urlfetch, memcache and datastore RPC objects) and arbitrary
callback functions with an optional time delay.
Normally, event loops are singleton objects, though there is no
enforcement of this requirement.
The API here is inspired by Monocle.
"""
import collections
import logging
import os
import threading
import time
from google.appengine.api.apiproxy_rpc import RPC
from google.appengine.datastore import datastore_rpc
from . import utils
logging_debug = utils.logging_debug
IDLE = RPC.IDLE
RUNNING = RPC.RUNNING
FINISHING = RPC.FINISHING
class EventLoop(object):
"""An event loop."""
def __init__(self):
"""Constructor."""
self.current = collections.deque() # FIFO list of (callback, args, kwds)
self.idlers = collections.deque() # Cyclic list of (callback, args, kwds)
self.inactive = 0 # How many idlers in a row were no-ops
self.queue = [] # Sorted list of (time, callback, args, kwds)
self.rpcs = {} # Map of rpc -> (callback, args, kwds)
def insort_event_right(self, event, lo=0, hi=None):
"""Insert event in queue, and keep it sorted assuming queue is sorted.
If event is already in queue, insert it to the right of the rightmost
event (to keep FIFO order).
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(self.queue)
while lo < hi:
mid = (lo + hi) // 2
if event[0] < self.queue[mid][0]: hi = mid
else: lo = mid + 1
self.queue.insert(lo, event)
# TODO: Rename to queue_callback?
def queue_call(self, delay, callback, *args, **kwds):
"""Schedule a function call at a specific time in the future."""
if delay is None:
self.current.append((callback, args, kwds))
return
if delay < 1e9:
when = delay + time.time()
else:
# Times over a billion seconds are assumed to be absolute.
when = delay
self.insort_event_right((when, callback, args, kwds))
def queue_rpc(self, rpc, callback=None, *args, **kwds):
"""Schedule an RPC with an optional callback.
The caller must have previously sent the call to the service.
The optional callback is called with the remaining arguments.
NOTE: If the rpc is a MultiRpc, the callback will be called once
for each sub-RPC. TODO: Is this a good idea?
"""
if rpc is None:
return
if rpc.state not in (RUNNING, FINISHING):
raise RuntimeError('rpc must be sent to service before queueing')
if isinstance(rpc, datastore_rpc.MultiRpc):
rpcs = rpc.rpcs
if len(rpcs) > 1:
# Don't call the callback until all sub-rpcs have completed.
rpc.__done = False
def help_multi_rpc_along(r=rpc, c=callback, a=args, k=kwds):
if r.state == FINISHING and not r.__done:
r.__done = True
c(*a, **k)
# TODO: And again, what about exceptions?
callback = help_multi_rpc_along
args = ()
kwds = {}
else:
rpcs = [rpc]
for rpc in rpcs:
self.rpcs[rpc] = (callback, args, kwds)
def add_idle(self, callback, *args, **kwds):
"""Add an idle callback.
An idle callback can return True, False or None. These mean:
- None: remove the callback (don't reschedule)
- False: the callback did no work; reschedule later
- True: the callback did some work; reschedule soon
If the callback raises an exception, the traceback is logged and
the callback is removed.
"""
self.idlers.append((callback, args, kwds))
def run_idle(self):
"""Run one of the idle callbacks.
Returns:
True if one was called, False if no idle callback was called.
"""
if not self.idlers or self.inactive >= len(self.idlers):
return False
idler = self.idlers.popleft()
callback, args, kwds = idler
logging_debug('idler: %s', callback.__name__)
res = callback(*args, **kwds)
# See add_idle() for the meaning of the callback return value.
if res is not None:
if res:
self.inactive = 0
else:
self.inactive += 1
self.idlers.append(idler)
else:
logging_debug('idler %s removed', callback.__name__)
return True
def run0(self):
"""Run one item (a callback or an RPC wait_any).
Returns:
A time to sleep if something happened (may be 0);
None if all queues are empty.
"""
if self.current:
self.inactive = 0
callback, args, kwds = self.current.popleft()
logging_debug('nowevent: %s', callback.__name__)
callback(*args, **kwds)
return 0
if self.run_idle():
return 0
delay = None
if self.queue:
delay = self.queue[0][0] - time.time()
if delay <= 0:
self.inactive = 0
_, callback, args, kwds = self.queue.pop(0)
logging_debug('event: %s', callback.__name__)
callback(*args, **kwds)
# TODO: What if it raises an exception?
return 0
if self.rpcs:
self.inactive = 0
rpc = datastore_rpc.MultiRpc.wait_any(self.rpcs)
if rpc is not None:
logging_debug('rpc: %s.%s', rpc.service, rpc.method)
# Yes, wait_any() may return None even for a non-empty argument.
# But no, it won't ever return an RPC not in its argument.
if rpc not in self.rpcs:
raise RuntimeError('rpc %r was not given to wait_any as a choice %r' %
(rpc, self.rpcs))
callback, args, kwds = self.rpcs[rpc]
del self.rpcs[rpc]
if callback is not None:
callback(*args, **kwds)
# TODO: Again, what about exceptions?
return 0
return delay
def run1(self):
"""Run one item (a callback or an RPC wait_any) or sleep.
Returns:
True if something happened; False if all queues are empty.
"""
delay = self.run0()
if delay is None:
return False
if delay > 0:
time.sleep(delay)
return True
def run(self):
"""Run until there's nothing left to do."""
# TODO: A way to stop running before the queue is empty.
self.inactive = 0
while True:
if not self.run1():
break
class _State(threading.local):
event_loop = None
_EVENT_LOOP_KEY = '__EVENT_LOOP__'
_state = _State()
def get_event_loop():
"""Return a EventLoop instance.
A new instance is created for each new HTTP request. We determine
that we're in a new request by inspecting os.environ, which is reset
at the start of each request. Also, each thread gets its own loop.
"""
# TODO: Make sure this works with the multithreaded Python 2.7 runtime.
ev = None
if os.getenv(_EVENT_LOOP_KEY):
ev = _state.event_loop
if ev is None:
ev = EventLoop()
_state.event_loop = ev
os.environ[_EVENT_LOOP_KEY] = '1'
return ev
def queue_call(*args, **kwds):
ev = get_event_loop()
ev.queue_call(*args, **kwds)
def queue_rpc(rpc, callback=None, *args, **kwds):
ev = get_event_loop()
ev.queue_rpc(rpc, callback, *args, **kwds)
def add_idle(callback, *args, **kwds):
ev = get_event_loop()
ev.add_idle(callback, *args, **kwds)
def run():
ev = get_event_loop()
ev.run()
def run1():
ev = get_event_loop()
return ev.run1()
def run0():
ev = get_event_loop()
return ev.run0()
|
adviti/melange
|
thirdparty/google_appengine/google/appengine/ext/ndb/eventloop.py
|
Python
|
apache-2.0
| 7,479 | 0.009226 |
# See http://www.python.org/dev/peps/pep-0386/ for version numbering, especially NormalizedVersion
from distutils import version
version = version.LooseVersion('0.7.1-dev')
|
foundit/Piped
|
contrib/zmq/piped_zmq/__init__.py
|
Python
|
mit
| 173 | 0.00578 |
from stage import *
import os
use_gpu = os.environ.get('GNUMPY_USE_GPU', 'yes') == 'yes'
if use_gpu:
import gnumpy as gpu
import gnumpy as gnp
class Map(Stage):
def __init__(self,
outputDim,
activeFn,
inputNames=None,
initRange=1.0,
bias=True,
biasInitConst=-1.0,
initSeed=2,
needInit=True,
initWeights=0,
initType='zeroMean',
learningRate=0.0,
learningRateAnnealConst=0.0,
momentum=0.0,
deltaMomentum=0.0,
weightClip=0.0,
gradientClip=0.0,
weightRegConst=0.0,
outputdEdX=True,
defaultValue=0.0,
gpu=use_gpu,
name=None):
Stage.__init__(self,
name=name,
inputNames=inputNames,
outputDim=outputDim,
defaultValue=defaultValue,
learningRate=learningRate,
learningRateAnnealConst=learningRateAnnealConst,
momentum=momentum,
deltaMomentum=deltaMomentum,
weightClip=weightClip,
gradientClip=gradientClip,
weightRegConst=weightRegConst,
gpu=gpu,
outputdEdX=outputdEdX)
self.bias = bias
self.activeFn = activeFn
self.inputDim = None
self.random = np.random.RandomState(initSeed)
if not needInit:
if self.gpu:
self.W = gnp.as_garray(initWeights)
else:
self.W = initWeights
else:
# Lazy initialize the weights until the first data arrives
self.W = None
self.initRange = initRange
self.biasInitConst = biasInitConst
self.initType = initType
self.X = 0
self.Y = 0
pass
def initWeights(self):
if self.initType == 'zeroMean':
r0 = -self.initRange/2.0
r1 = self.initRange/2.0
elif self.initType == 'positive':
r0 = 0.0
r1 = self.initRange
else:
raise Exception('Unknown initialization type: ' + self.initType)
if self.bias:
if self.biasInitConst >= 0.0:
self.W = np.concatenate((self.random.uniform(
r0, r1, (self.inputDim, self.outputDim)),
np.ones((1, self.outputDim)) * self.biasInitConst), axis=0)
else:
self.W = self.random.uniform(
r0, r1, (self.inputDim + 1, self.outputDim))
else:
self.W = self.random.uniform(
-self.initRange/2.0, self.initRange/2.0, (self.inputDim, self.outputDim))
if self.gpu:
self.W = gpu.as_garray(self.W.astype('float32'))
def forward(self, X):
if self.inputDim is None: self.inputDim = X.shape[-1]
if self.W is None: self.initWeights()
if self.bias:
self.X = np.concatenate((X, np.ones((X.shape[0], 1), dtype=X.dtype)), axis=-1)
else:
self.X = X
if self.gpu:
self.X = gpu.as_garray(self.X.astype('float32'))
Z = gpu.dot(self.X, self.W)
Z = Z.as_numpy_array(dtype='float32')
self.Y = self.activeFn.forward(Z)
else:
Z = np.dot(self.X, self.W)
self.Y = self.activeFn.forward(Z)
return self.Y
def backward(self, dEdY):
dEdZ = self.activeFn.backward(dEdY, self.Y, 0)
if self.gpu:
gdEdZ = gpu.as_garray(dEdZ.astype('float32'))
self.dEdW = gpu.dot(self.X.transpose(), gdEdZ)
if self.bias:
dEdX = gpu.dot(gdEdZ, self.W[:-1, :].transpose())
else:
dEdX = gpu.dot(gdEdZ, self.W.transpose())
dEdX = gpu.as_numpy_array(dEdX)
else:
self.dEdW = np.dot(self.X.transpose(), dEdZ)
if self.bias:
dEdX = np.dot(dEdZ, self.W[:-1, :].transpose())
else:
dEdX = np.dot(dEdZ, self.W.transpose())
return dEdX if self.outputdEdX else None
|
renmengye/imageqa-public
|
src/nn/map.py
|
Python
|
mit
| 4,455 | 0.004265 |
# -*- coding: utf-8 -*-
"""
Runs functions in pipeline to get query reuslts and does some caching.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import six # NOQA
from os.path import exists
#from ibeis.algo.hots import query_request
#from ibeis.algo.hots import hots_query_result
#from ibeis.algo.hots import exceptions as hsexcept
from ibeis.algo.hots import chip_match
from ibeis.algo.hots import pipeline
from ibeis.algo.hots import _pipeline_helpers as plh # NOQA
(print, rrr, profile) = ut.inject2(__name__, '[mc4]')
# TODO: Move to params
USE_HOTSPOTTER_CACHE = pipeline.USE_HOTSPOTTER_CACHE
USE_CACHE = not ut.get_argflag(('--nocache-query', '--noqcache')) and USE_HOTSPOTTER_CACHE
USE_BIGCACHE = not ut.get_argflag(('--nocache-big', '--no-bigcache-query', '--noqcache', '--nobigcache')) and ut.USE_CACHE
SAVE_CACHE = not ut.get_argflag('--nocache-save')
#MIN_BIGCACHE_BUNDLE = 20
#MIN_BIGCACHE_BUNDLE = 150
MIN_BIGCACHE_BUNDLE = 64
HOTS_BATCH_SIZE = ut.get_argval('--hots-batch-size', type_=int, default=None)
#----------------------
# Main Query Logic
#----------------------
def empty_query(ibs, qaids):
r"""
Hack to give an empty query a query result object
Args:
ibs (ibeis.IBEISController): ibeis controller object
qaids (list):
Returns:
tuple: (qaid2_cm, qreq_)
CommandLine:
python -m ibeis.algo.hots.match_chips4 --test-empty_query
python -m ibeis.algo.hots.match_chips4 --test-empty_query --show
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb('testdb1')
>>> qaids = ibs.get_valid_aids(species=ibeis.const.TEST_SPECIES.ZEB_PLAIN)
>>> # execute function
>>> (qaid2_cm, qreq_) = empty_query(ibs, qaids)
>>> # verify results
>>> result = str((qaid2_cm, qreq_))
>>> print(result)
>>> cm = qaid2_cm[qaids[0]]
>>> ut.assert_eq(len(cm.get_top_aids()), 0)
>>> ut.quit_if_noshow()
>>> cm.ishow_top(ibs, update=True, make_figtitle=True, show_query=True, sidebyside=False)
>>> from matplotlib import pyplot as plt
>>> plt.show()
"""
daids = []
qreq_ = ibs.new_query_request(qaids, daids)
cm = qreq_.make_empty_chip_matches()
qaid2_cm = dict(zip(qaids, cm))
return qaid2_cm, qreq_
def submit_query_request_nocache(ibs, qreq_, verbose=pipeline.VERB_PIPELINE):
""" depricate """
assert len(qreq_.qaids) > 0, ' no current query aids'
if len(qreq_.daids) == 0:
print('[mc4] WARNING no daids... returning empty query')
qaid2_cm, qreq_ = empty_query(ibs, qreq_.qaids)
return qaid2_cm
save_qcache = False
qaid2_cm = execute_query2(ibs, qreq_, verbose, save_qcache)
return qaid2_cm
@profile
def submit_query_request(ibs, qaid_list, daid_list, use_cache=None,
use_bigcache=None, cfgdict=None, qreq_=None,
verbose=None, save_qcache=None,
prog_hook=None):
"""
The standard query interface.
TODO: rename use_cache to use_qcache
Checks a big cache for qaid2_cm. If cache miss, tries to load each cm
individually. On an individual cache miss, it preforms the query.
Args:
ibs (ibeis.IBEISController) : ibeis control object
qaid_list (list): query annotation ids
daid_list (list): database annotation ids
use_cache (bool):
use_bigcache (bool):
Returns:
qaid2_cm (dict): dict of QueryResult objects
CommandLine:
python -m ibeis.algo.hots.match_chips4 --test-submit_query_request
Examples:
>>> # SLOW_DOCTEST
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> qaid_list = [1]
>>> daid_list = [1, 2, 3, 4, 5]
>>> use_bigcache = True
>>> use_cache = True
>>> ibs = ibeis.opendb(db='testdb1')
>>> qreq_ = ibs.new_query_request(qaid_list, daid_list, cfgdict={}, verbose=True)
>>> qaid2_cm = submit_query_request(ibs, qaid_list, daid_list, use_cache, use_bigcache, qreq_=qreq_)
"""
# Get flag defaults if necessary
if verbose is None:
verbose = pipeline.VERB_PIPELINE
if use_cache is None:
use_cache = USE_CACHE
if save_qcache is None:
save_qcache = SAVE_CACHE
if use_bigcache is None:
use_bigcache = USE_BIGCACHE
# Create new query request object to store temporary state
if verbose:
#print('[mc4] --- Submit QueryRequest_ --- ')
ut.colorprint('[mc4] --- Submit QueryRequest_ --- ', 'darkyellow')
assert qreq_ is not None, 'query request must be prebuilt'
qreq_.prog_hook = prog_hook
# --- BIG CACHE ---
# Do not use bigcache single queries
use_bigcache_ = (use_bigcache and use_cache and
len(qaid_list) > MIN_BIGCACHE_BUNDLE)
if (use_bigcache_ or save_qcache) and len(qaid_list) > MIN_BIGCACHE_BUNDLE:
bc_dpath, bc_fname, bc_cfgstr = qreq_.get_bigcache_info()
if use_bigcache_:
# Try and load directly from a big cache
try:
qaid2_cm = ut.load_cache(bc_dpath, bc_fname, bc_cfgstr)
cm_list = [qaid2_cm[qaid] for qaid in qaid_list]
except (IOError, AttributeError):
pass
else:
return cm_list
# ------------
# Execute query request
qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose=verbose)
# ------------
if save_qcache and len(qaid_list) > MIN_BIGCACHE_BUNDLE:
ut.save_cache(bc_dpath, bc_fname, bc_cfgstr, qaid2_cm)
cm_list = [qaid2_cm[qaid] for qaid in qaid_list]
return cm_list
@profile
def execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose=True, batch_size=None):
"""
Args:
ibs (ibeis.IBEISController):
qreq_ (ibeis.QueryRequest):
use_cache (bool):
Returns:
qaid2_cm
CommandLine:
python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:0
python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:1
python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:2
python -m ibeis.algo.hots.match_chips4 --test-execute_query_and_save_L1:3
Example0:
>>> # SLOW_DOCTEST
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> cfgdict1 = dict(codename='vsmany', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> ibs = qreq_.ibs
>>> use_cache, save_qcache, verbose = False, False, True
>>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example1:
>>> # SLOW_DOCTEST
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> cfgdict1 = dict(codename='vsone', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> ibs = qreq_.ibs
>>> use_cache, save_qcache, verbose = False, False, True
>>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example1:
>>> # SLOW_DOCTEST
>>> # TEST SAVE
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> cfgdict1 = dict(codename='vsmany', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> ibs = qreq_.ibs
>>> use_cache, save_qcache, verbose = False, True, True
>>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example2:
>>> # SLOW_DOCTEST
>>> # TEST LOAD
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> cfgdict1 = dict(codename='vsmany', sv_on=True)
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3, 4)
>>> ibs = qreq_.ibs
>>> use_cache, save_qcache, verbose = True, True, True
>>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache, save_qcache, verbose)
>>> print(qaid2_cm)
Example2:
>>> # ENABLE_DOCTEST
>>> # TEST PARTIAL HIT
>>> from ibeis.algo.hots.match_chips4 import * # NOQA
>>> import ibeis
>>> cfgdict1 = dict(codename='vsmany', sv_on=False, prescore_method='csum')
>>> p = 'default' + ut.get_cfg_lbl(cfgdict1)
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(p=p, qaid_override=[1, 2, 3,
>>> 4, 5, 6,
>>> 7, 8, 9])
>>> ibs = qreq_.ibs
>>> use_cache, save_qcache, verbose = False, True, False
>>> qaid2_cm = execute_query_and_save_L1(ibs, qreq_, use_cache,
>>> save_qcache, verbose,
>>> batch_size=3)
>>> cm = qaid2_cm[1]
>>> ut.delete(cm.get_fpath(qreq_))
>>> cm = qaid2_cm[4]
>>> ut.delete(cm.get_fpath(qreq_))
>>> cm = qaid2_cm[5]
>>> ut.delete(cm.get_fpath(qreq_))
>>> cm = qaid2_cm[6]
>>> ut.delete(cm.get_fpath(qreq_))
>>> print('Re-execute')
>>> qaid2_cm_ = execute_query_and_save_L1(ibs, qreq_, use_cache,
>>> save_qcache, verbose,
>>> batch_size=3)
>>> assert all([qaid2_cm_[qaid] == qaid2_cm[qaid] for qaid in qreq_.qaids])
>>> [ut.delete(fpath) for fpath in qreq_.get_chipmatch_fpaths(qreq_.qaids)]
Ignore:
other = cm_ = qaid2_cm_[qaid]
cm = qaid2_cm[qaid]
"""
if use_cache:
if ut.VERBOSE:
print('[mc4] cache-query is on')
if ut.DEBUG2:
# sanity check
qreq_.assert_self(ibs)
# Try loading as many cached results as possible
qaid2_cm_hit = {}
external_qaids = qreq_.qaids
fpath_list = qreq_.get_chipmatch_fpaths(external_qaids)
exists_flags = [exists(fpath) for fpath in fpath_list]
qaids_hit = ut.compress(external_qaids, exists_flags)
fpaths_hit = ut.compress(fpath_list, exists_flags)
fpath_iter = ut.ProgressIter(
fpaths_hit, nTotal=len(fpaths_hit), enabled=len(fpaths_hit) > 1,
lbl='loading cache hits', adjust=True, freq=1)
try:
cm_hit_list = [
chip_match.ChipMatch.load_from_fpath(fpath, verbose=False)
for fpath in fpath_iter
]
assert all([qaid == cm.qaid for qaid, cm in zip(qaids_hit, cm_hit_list)]), (
'inconsistent')
qaid2_cm_hit = {cm.qaid: cm for cm in cm_hit_list}
except chip_match.NeedRecomputeError:
print('NeedRecomputeError: Some cached chips need to recompute')
fpath_iter = ut.ProgressIter(
fpaths_hit, nTotal=len(fpaths_hit), enabled=len(fpaths_hit) > 1,
lbl='checking chipmatch cache', adjust=True, freq=1)
# Recompute those that fail loading
qaid2_cm_hit = {}
for fpath in fpath_iter:
try:
cm = chip_match.ChipMatch.load_from_fpath(fpath, verbose=False)
except chip_match.NeedRecomputeError:
pass
else:
qaid2_cm_hit[cm.qaid] = cm
print('%d / %d cached matches need to be recomputed' % (
len(qaids_hit) - len(qaid2_cm_hit), len(qaids_hit)))
if len(qaid2_cm_hit) == len(external_qaids):
return qaid2_cm_hit
else:
if len(qaid2_cm_hit) > 0 and not ut.QUIET:
print('... partial cm cache hit %d/%d' % (
len(qaid2_cm_hit), len(external_qaids)))
cachehit_qaids = list(qaid2_cm_hit.keys())
# mask queries that have already been executed
qreq_.set_external_qaid_mask(cachehit_qaids)
else:
if ut.VERBOSE:
print('[mc4] cache-query is off')
qaid2_cm_hit = {}
qaid2_cm = execute_query2(ibs, qreq_, verbose, save_qcache, batch_size)
if ut.DEBUG2:
# sanity check
qreq_.assert_self(ibs)
# Merge cache hits with computed misses
if len(qaid2_cm_hit) > 0:
qaid2_cm.update(qaid2_cm_hit)
qreq_.set_external_qaid_mask(None) # undo state changes
return qaid2_cm
@profile
def execute_query2(ibs, qreq_, verbose, save_qcache, batch_size=None):
"""
Breaks up query request into several subrequests
to process "more efficiently" and safer as well.
"""
with ut.Timer('Timing Query'):
if qreq_.prog_hook is not None:
preload_hook, query_hook = qreq_.prog_hook.subdivide(spacing=[0, .15, .8])
preload_hook(0, lbl='preloading')
qreq_.prog_hook = query_hook
else:
preload_hook = None
# Load features / weights for all annotations
qreq_.lazy_preload(prog_hook=preload_hook, verbose=verbose and ut.NOT_QUIET)
all_qaids = qreq_.qaids
print('len(missed_qaids) = %r' % (len(all_qaids),))
qaid2_cm = {}
# vsone must have a chunksize of 1
if batch_size is None:
if HOTS_BATCH_SIZE is None:
hots_batch_size = ibs.cfg.other_cfg.hots_batch_size
else:
hots_batch_size = HOTS_BATCH_SIZE
else:
hots_batch_size = batch_size
chunksize = 1 if qreq_.qparams.vsone else hots_batch_size
# Iterate over vsone queries in chunks.
nTotalChunks = ut.get_nTotalChunks(len(all_qaids), chunksize)
qaid_chunk_iter = ut.ichunks(all_qaids, chunksize)
_qreq_iter = (qreq_.shallowcopy(qaids=qaids) for qaids in qaid_chunk_iter)
sub_qreq_iter = ut.ProgressIter(_qreq_iter, nTotal=nTotalChunks, freq=1,
lbl='[mc4] query chunk: ',
prog_hook=qreq_.prog_hook)
for sub_qreq_ in sub_qreq_iter:
if ut.VERBOSE:
print('Generating vsmany chunk')
sub_cm_list = pipeline.request_ibeis_query_L0(ibs, sub_qreq_,
verbose=verbose)
assert len(sub_qreq_.qaids) == len(sub_cm_list), 'not aligned'
assert all([qaid == cm.qaid for qaid, cm in
zip(sub_qreq_.qaids, sub_cm_list)]), 'not corresonding'
if save_qcache:
fpath_list = qreq_.get_chipmatch_fpaths(sub_qreq_.qaids)
_iter = zip(sub_cm_list, fpath_list)
_iter = ut.ProgressIter(_iter, nTotal=len(sub_cm_list),
lbl='saving chip matches', adjust=True, freq=1)
for cm, fpath in _iter:
cm.save_to_fpath(fpath, verbose=False)
else:
if ut.VERBOSE:
print('[mc4] not saving vsmany chunk')
qaid2_cm.update({cm.qaid: cm for cm in sub_cm_list})
return qaid2_cm
if __name__ == '__main__':
"""
python -m ibeis.algo.hots.match_chips4
python -m ibeis.algo.hots.match_chips4 --allexamples --testslow
"""
import multiprocessing
multiprocessing.freeze_support()
ut.doctest_funcs()
|
SU-ECE-17-7/ibeis
|
ibeis/algo/hots/match_chips4.py
|
Python
|
apache-2.0
| 15,985 | 0.001564 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-05-12 11:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ossuo', '0021_merge'),
('ossuo', '0022_signupformpage_signupformpagebullet_signupformpagelogo_signupformpagequote_signupformpageresponse'),
]
operations = [
]
|
spketoundi/CamODI
|
waespk/core/migrations/0023_merge.py
|
Python
|
mit
| 399 | 0.002506 |
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Ben Swartzlander. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Andrew Kerr. All rights reserved.
# Copyright (c) 2014 Jeff Applewhite. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver library for NetApp 7/C-mode block storage systems.
"""
import math
import sys
import uuid
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.volume.drivers.netapp.dataontap.client import api as na_api
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class NetAppLun(object):
"""Represents a LUN on NetApp storage."""
def __init__(self, handle, name, size, metadata_dict):
self.handle = handle
self.name = name
self.size = size
self.metadata = metadata_dict or {}
def get_metadata_property(self, prop):
"""Get the metadata property of a LUN."""
if prop in self.metadata:
return self.metadata[prop]
name = self.name
LOG.debug("No metadata property %(prop)s defined for the LUN %(name)s",
{'prop': prop, 'name': name})
def __str__(self, *args, **kwargs):
return 'NetApp LUN [handle:%s, name:%s, size:%s, metadata:%s]' % (
self.handle, self.name, self.size, self.metadata)
class NetAppBlockStorageLibrary(object):
"""NetApp block storage library for Data ONTAP."""
# do not increment this as it may be used in volume type definitions
VERSION = "1.0.0"
REQUIRED_FLAGS = ['netapp_login', 'netapp_password',
'netapp_server_hostname']
ALLOWED_LUN_OS_TYPES = ['linux', 'aix', 'hpux', 'image', 'windows',
'windows_2008', 'windows_gpt', 'solaris',
'solaris_efi', 'netware', 'openvms', 'hyper_v']
ALLOWED_IGROUP_HOST_TYPES = ['linux', 'aix', 'hpux', 'windows', 'solaris',
'netware', 'default', 'vmware', 'openvms',
'xen', 'hyper_v']
DEFAULT_LUN_OS = 'linux'
DEFAULT_HOST_TYPE = 'linux'
def __init__(self, driver_name, driver_protocol, **kwargs):
na_utils.validate_instantiation(**kwargs)
self.driver_name = driver_name
self.driver_protocol = driver_protocol
self.zapi_client = None
self._stats = {}
self.lun_table = {}
self.lun_ostype = None
self.host_type = None
self.lookup_service = fczm_utils.create_lookup_service()
self.app_version = kwargs.get("app_version", "unknown")
self.configuration = kwargs['configuration']
self.configuration.append_config_values(na_opts.netapp_connection_opts)
self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
self.configuration.append_config_values(na_opts.netapp_transport_opts)
self.configuration.append_config_values(
na_opts.netapp_provisioning_opts)
self.configuration.append_config_values(na_opts.netapp_san_opts)
def do_setup(self, context):
na_utils.check_flags(self.REQUIRED_FLAGS, self.configuration)
self.lun_ostype = (self.configuration.netapp_lun_ostype
or self.DEFAULT_LUN_OS)
self.host_type = (self.configuration.netapp_host_type
or self.DEFAULT_HOST_TYPE)
def check_for_setup_error(self):
"""Check that the driver is working and can communicate.
Discovers the LUNs on the NetApp server.
"""
if self.lun_ostype not in self.ALLOWED_LUN_OS_TYPES:
msg = _("Invalid value for NetApp configuration"
" option netapp_lun_ostype.")
LOG.error(msg)
raise exception.NetAppDriverException(msg)
if self.host_type not in self.ALLOWED_IGROUP_HOST_TYPES:
msg = _("Invalid value for NetApp configuration"
" option netapp_host_type.")
LOG.error(msg)
raise exception.NetAppDriverException(msg)
lun_list = self.zapi_client.get_lun_list()
self._extract_and_populate_luns(lun_list)
LOG.debug("Success getting list of LUNs from server.")
def get_pool(self, volume):
"""Return pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata') or dict()
return metadata.get('Volume', None)
def create_volume(self, volume):
"""Driver entry point for creating a new volume (Data ONTAP LUN)."""
LOG.debug('create_volume on %s', volume['host'])
# get Data ONTAP volume name as pool name
pool_name = volume_utils.extract_host(volume['host'], level='pool')
if pool_name is None:
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
extra_specs = na_utils.get_volume_extra_specs(volume)
lun_name = volume['name']
size = int(volume['size']) * units.Gi
metadata = {'OsType': self.lun_ostype,
'SpaceReserved': 'true',
'Path': '/vol/%s/%s' % (pool_name, lun_name)}
qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs)
qos_policy_group_name = (
na_utils.get_qos_policy_group_name_from_info(
qos_policy_group_info))
try:
self._create_lun(pool_name, lun_name, size, metadata,
qos_policy_group_name)
except Exception:
LOG.exception(_LE("Exception creating LUN %(name)s in pool "
"%(pool)s."),
{'name': lun_name, 'pool': pool_name})
self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
msg = _("Volume %s could not be created.")
raise exception.VolumeBackendAPIException(data=msg % (
volume['name']))
LOG.debug('Created LUN with name %(name)s and QoS info %(qos)s',
{'name': lun_name, 'qos': qos_policy_group_info})
metadata['Path'] = '/vol/%s/%s' % (pool_name, lun_name)
metadata['Volume'] = pool_name
metadata['Qtree'] = None
handle = self._create_lun_handle(metadata)
self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata))
def _setup_qos_for_volume(self, volume, extra_specs):
return None
def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info):
return
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
if not metadata:
LOG.warning(_LW("No entry in LUN table for volume/snapshot"
" %(name)s."), {'name': name})
return
self.zapi_client.destroy_lun(metadata['Path'])
self.lun_table.pop(name)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
handle = self._get_lun_attr(volume['name'], 'handle')
return {'provider_location': handle}
def create_export(self, context, volume):
"""Driver entry point to get the export info for a new volume."""
handle = self._get_lun_attr(volume['name'], 'handle')
return {'provider_location': handle}
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume.
Since exporting is idempotent in this driver, we have nothing
to do for unexporting.
"""
pass
def create_snapshot(self, snapshot):
"""Driver entry point for creating a snapshot.
This driver implements snapshots by using efficient single-file
(LUN) cloning.
"""
vol_name = snapshot['volume_name']
snapshot_name = snapshot['name']
lun = self._get_lun_from_table(vol_name)
self._clone_lun(lun.name, snapshot_name, space_reserved='false')
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
self.delete_volume(snapshot)
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
def create_volume_from_snapshot(self, volume, snapshot):
source = {'name': snapshot['name'], 'size': snapshot['volume_size']}
return self._clone_source_to_destination(source, volume)
def create_cloned_volume(self, volume, src_vref):
src_lun = self._get_lun_from_table(src_vref['name'])
source = {'name': src_lun.name, 'size': src_vref['size']}
return self._clone_source_to_destination(source, volume)
def _clone_source_to_destination(self, source, destination_volume):
source_size = source['size']
destination_size = destination_volume['size']
source_name = source['name']
destination_name = destination_volume['name']
extra_specs = na_utils.get_volume_extra_specs(destination_volume)
qos_policy_group_info = self._setup_qos_for_volume(
destination_volume, extra_specs)
qos_policy_group_name = (
na_utils.get_qos_policy_group_name_from_info(
qos_policy_group_info))
try:
self._clone_lun(source_name, destination_name,
space_reserved='true',
qos_policy_group_name=qos_policy_group_name)
if destination_size != source_size:
try:
self.extend_volume(
destination_volume, destination_size,
qos_policy_group_name=qos_policy_group_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(
_LE("Resizing %s failed. Cleaning volume."),
destination_volume['id'])
self.delete_volume(destination_volume)
except Exception:
LOG.exception(_LE("Exception cloning volume %(name)s from source "
"volume %(source)s."),
{'name': destination_name, 'source': source_name})
self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
msg = _("Volume %s could not be created from source volume.")
raise exception.VolumeBackendAPIException(
data=msg % destination_name)
def _create_lun(self, volume_name, lun_name, size,
metadata, qos_policy_group_name=None):
"""Creates a LUN, handling Data ONTAP differences as needed."""
raise NotImplementedError()
def _create_lun_handle(self, metadata):
"""Returns LUN handle based on filer type."""
raise NotImplementedError()
def _extract_lun_info(self, lun):
"""Extracts the LUNs from API and populates the LUN table."""
meta_dict = self._create_lun_meta(lun)
path = lun.get_child_content('path')
(_rest, _splitter, name) = path.rpartition('/')
handle = self._create_lun_handle(meta_dict)
size = lun.get_child_content('size')
return NetAppLun(handle, name, size, meta_dict)
def _extract_and_populate_luns(self, api_luns):
"""Extracts the LUNs from API and populates the LUN table."""
for lun in api_luns:
discovered_lun = self._extract_lun_info(lun)
self._add_lun_to_table(discovered_lun)
def _map_lun(self, name, initiator_list, initiator_type, lun_id=None):
"""Maps LUN to the initiator(s) and returns LUN ID assigned."""
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
igroup_name, ig_host_os, ig_type = self._get_or_create_igroup(
initiator_list, initiator_type, self.host_type)
if ig_host_os != self.host_type:
LOG.warning(_LW("LUN misalignment may occur for current"
" initiator group %(ig_nm)s) with host OS type"
" %(ig_os)s. Please configure initiator group"
" manually according to the type of the"
" host OS."),
{'ig_nm': igroup_name, 'ig_os': ig_host_os})
try:
return self.zapi_client.map_lun(path, igroup_name, lun_id=lun_id)
except na_api.NaApiError:
exc_info = sys.exc_info()
(_igroup, lun_id) = self._find_mapped_lun_igroup(path,
initiator_list)
if lun_id is not None:
return lun_id
else:
raise exc_info[0], exc_info[1], exc_info[2]
def _unmap_lun(self, path, initiator_list):
"""Unmaps a LUN from given initiator."""
(igroup_name, _lun_id) = self._find_mapped_lun_igroup(path,
initiator_list)
self.zapi_client.unmap_lun(path, igroup_name)
def _find_mapped_lun_igroup(self, path, initiator_list):
"""Find an igroup for a LUN mapped to the given initiator(s)."""
raise NotImplementedError()
def _has_luns_mapped_to_initiators(self, initiator_list):
"""Checks whether any LUNs are mapped to the given initiator(s)."""
return self.zapi_client.has_luns_mapped_to_initiators(initiator_list)
def _get_or_create_igroup(self, initiator_list, initiator_group_type,
host_os_type):
"""Checks for an igroup for a set of one or more initiators.
Creates igroup if not already present with given host os type,
igroup type and adds initiators.
"""
igroups = self.zapi_client.get_igroup_by_initiators(initiator_list)
igroup_name = None
if igroups:
igroup = igroups[0]
igroup_name = igroup['initiator-group-name']
host_os_type = igroup['initiator-group-os-type']
initiator_group_type = igroup['initiator-group-type']
if not igroup_name:
igroup_name = self._create_igroup_add_initiators(
initiator_group_type, host_os_type, initiator_list)
return igroup_name, host_os_type, initiator_group_type
def _create_igroup_add_initiators(self, initiator_group_type,
host_os_type, initiator_list):
"""Creates igroup and adds initiators."""
igroup_name = na_utils.OPENSTACK_PREFIX + six.text_type(uuid.uuid4())
self.zapi_client.create_igroup(igroup_name, initiator_group_type,
host_os_type)
for initiator in initiator_list:
self.zapi_client.add_igroup_initiator(igroup_name, initiator)
return igroup_name
def _add_lun_to_table(self, lun):
"""Adds LUN to cache table."""
if not isinstance(lun, NetAppLun):
msg = _("Object is not a NetApp LUN.")
raise exception.VolumeBackendAPIException(data=msg)
self.lun_table[lun.name] = lun
def _get_lun_from_table(self, name):
"""Gets LUN from cache table.
Refreshes cache if LUN not found in cache.
"""
lun = self.lun_table.get(name)
if lun is None:
lun_list = self.zapi_client.get_lun_list()
self._extract_and_populate_luns(lun_list)
lun = self.lun_table.get(name)
if lun is None:
raise exception.VolumeNotFound(volume_id=name)
return lun
def _clone_lun(self, name, new_name, space_reserved='true',
qos_policy_group_name=None, src_block=0, dest_block=0,
block_count=0):
"""Clone LUN with the given name to the new name."""
raise NotImplementedError()
def _get_lun_attr(self, name, attr):
"""Get the LUN attribute if found else None."""
try:
attr = getattr(self._get_lun_from_table(name), attr)
return attr
except exception.VolumeNotFound as e:
LOG.error(_LE("Message: %s"), e.msg)
except Exception as e:
LOG.error(_LE("Error getting LUN attribute. Exception: %s"), e)
return None
def _create_lun_meta(self, lun):
raise NotImplementedError()
def _get_fc_target_wwpns(self, include_partner=True):
raise NotImplementedError()
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
raise NotImplementedError()
def extend_volume(self, volume, new_size, qos_policy_group_name=None):
"""Extend an existing volume to the new size."""
name = volume['name']
lun = self._get_lun_from_table(name)
path = lun.metadata['Path']
curr_size_bytes = six.text_type(lun.size)
new_size_bytes = six.text_type(int(new_size) * units.Gi)
# Reused by clone scenarios.
# Hence comparing the stored size.
if curr_size_bytes != new_size_bytes:
lun_geometry = self.zapi_client.get_lun_geometry(path)
if (lun_geometry and lun_geometry.get("max_resize")
and int(lun_geometry.get("max_resize")) >=
int(new_size_bytes)):
self.zapi_client.do_direct_resize(path, new_size_bytes)
else:
self._do_sub_clone_resize(
path, new_size_bytes,
qos_policy_group_name=qos_policy_group_name)
self.lun_table[name].size = new_size_bytes
else:
LOG.info(_LI("No need to extend volume %s"
" as it is already the requested new size."), name)
def _get_vol_option(self, volume_name, option_name):
"""Get the value for the volume option."""
value = None
options = self.zapi_client.get_volume_options(volume_name)
for opt in options:
if opt.get_child_content('name') == option_name:
value = opt.get_child_content('value')
break
return value
def _do_sub_clone_resize(self, path, new_size_bytes,
qos_policy_group_name=None):
"""Does sub LUN clone after verification.
Clones the block ranges and swaps
the LUNs also deletes older LUN
after a successful clone.
"""
seg = path.split("/")
LOG.info(_LI("Resizing LUN %s to new size using clone operation."),
seg[-1])
name = seg[-1]
vol_name = seg[2]
lun = self._get_lun_from_table(name)
metadata = lun.metadata
compression = self._get_vol_option(vol_name, 'compression')
if compression == "on":
msg = _('%s cannot be resized using clone operation'
' as it is hosted on compressed volume')
raise exception.VolumeBackendAPIException(data=msg % name)
else:
block_count = self._get_lun_block_count(path)
if block_count == 0:
msg = _('%s cannot be resized using clone operation'
' as it contains no blocks.')
raise exception.VolumeBackendAPIException(data=msg % name)
new_lun = 'new-%s' % name
self.zapi_client.create_lun(
vol_name, new_lun, new_size_bytes, metadata,
qos_policy_group_name=qos_policy_group_name)
try:
self._clone_lun(name, new_lun, block_count=block_count,
qos_policy_group_name=qos_policy_group_name)
self._post_sub_clone_resize(path)
except Exception:
with excutils.save_and_reraise_exception():
new_path = '/vol/%s/%s' % (vol_name, new_lun)
self.zapi_client.destroy_lun(new_path)
def _post_sub_clone_resize(self, path):
"""Try post sub clone resize in a transactional manner."""
st_tm_mv, st_nw_mv, st_del_old = None, None, None
seg = path.split("/")
LOG.info(_LI("Post clone resize LUN %s"), seg[-1])
new_lun = 'new-%s' % (seg[-1])
tmp_lun = 'tmp-%s' % (seg[-1])
tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun)
new_path = "/vol/%s/%s" % (seg[2], new_lun)
try:
st_tm_mv = self.zapi_client.move_lun(path, tmp_path)
st_nw_mv = self.zapi_client.move_lun(new_path, path)
st_del_old = self.zapi_client.destroy_lun(tmp_path)
except Exception as e:
if st_tm_mv is None:
msg = _("Failure staging LUN %s to tmp.")
raise exception.VolumeBackendAPIException(data=msg % (seg[-1]))
else:
if st_nw_mv is None:
self.zapi_client.move_lun(tmp_path, path)
msg = _("Failure moving new cloned LUN to %s.")
raise exception.VolumeBackendAPIException(
data=msg % (seg[-1]))
elif st_del_old is None:
LOG.error(_LE("Failure deleting staged tmp LUN %s."),
tmp_lun)
else:
LOG.error(_LE("Unknown exception in"
" post clone resize LUN %s."), seg[-1])
LOG.error(_LE("Exception details: %s"), e)
def _get_lun_block_count(self, path):
"""Gets block counts for the LUN."""
LOG.debug("Getting LUN block count.")
lun_infos = self.zapi_client.get_lun_by_args(path=path)
if not lun_infos:
seg = path.split('/')
msg = _('Failure getting LUN info for %s.')
raise exception.VolumeBackendAPIException(data=msg % seg[-1])
lun_info = lun_infos[-1]
bs = int(lun_info.get_child_content('block-size'))
ls = int(lun_info.get_child_content('size'))
block_count = ls / bs
return block_count
def _check_volume_type_for_lun(self, volume, lun, existing_ref,
extra_specs):
"""Checks if lun satifies the volume type."""
raise NotImplementedError()
def manage_existing(self, volume, existing_ref):
"""Brings an existing storage object under Cinder management.
existing_ref can contain source-id or source-name or both.
source-id: lun uuid.
source-name: complete lun path eg. /vol/vol0/lun.
"""
lun = self._get_existing_vol_with_manage_ref(existing_ref)
extra_specs = na_utils.get_volume_extra_specs(volume)
self._check_volume_type_for_lun(volume, lun, existing_ref, extra_specs)
qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs)
qos_policy_group_name = (
na_utils.get_qos_policy_group_name_from_info(
qos_policy_group_info))
path = lun.get_metadata_property('Path')
if lun.name == volume['name']:
new_path = path
LOG.info(_LI("LUN with given ref %s need not be renamed "
"during manage operation."), existing_ref)
else:
(rest, splitter, name) = path.rpartition('/')
new_path = '%s/%s' % (rest, volume['name'])
self.zapi_client.move_lun(path, new_path)
lun = self._get_existing_vol_with_manage_ref(
{'source-name': new_path})
if qos_policy_group_name is not None:
self.zapi_client.set_lun_qos_policy_group(new_path,
qos_policy_group_name)
self._add_lun_to_table(lun)
LOG.info(_LI("Manage operation completed for LUN with new path"
" %(path)s and uuid %(uuid)s."),
{'path': lun.get_metadata_property('Path'),
'uuid': lun.get_metadata_property('UUID')})
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
"""
lun = self._get_existing_vol_with_manage_ref(existing_ref)
return int(math.ceil(float(lun.size) / units.Gi))
def _get_existing_vol_with_manage_ref(self, existing_ref):
"""Get the corresponding LUN from the storage server."""
uuid = existing_ref.get('source-id')
path = existing_ref.get('source-name')
if not (uuid or path):
reason = _('Reference must contain either source-id'
' or source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lun_info = {}
lun_info.setdefault('path', path if path else None)
if hasattr(self, 'vserver') and uuid:
lun_info['uuid'] = uuid
luns = self.zapi_client.get_lun_by_args(**lun_info)
if luns:
for lun in luns:
netapp_lun = self._extract_lun_info(lun)
storage_valid = self._is_lun_valid_on_storage(netapp_lun)
uuid_valid = True
if uuid:
if netapp_lun.get_metadata_property('UUID') == uuid:
uuid_valid = True
else:
uuid_valid = False
if storage_valid and uuid_valid:
return netapp_lun
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=(_('LUN not found with given ref %s.') % existing_ref))
def _is_lun_valid_on_storage(self, lun):
"""Validate lun specific to storage system."""
return True
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
"""
managed_lun = self._get_lun_from_table(volume['name'])
LOG.info(_LI("Unmanaged LUN with current path %(path)s and uuid "
"%(uuid)s."),
{'path': managed_lun.get_metadata_property('Path'),
'uuid': managed_lun.get_metadata_property('UUID')
or 'unknown'})
def initialize_connection_iscsi(self, volume, connector):
"""Driver entry point to attach a volume to an instance.
Do the LUN masking on the storage system so the initiator can access
the LUN on the target. Also return the iSCSI properties so the
initiator can find the LUN. This implementation does not call
_get_iscsi_properties() to get the properties because cannot store the
LUN number in the database. We only find out what the LUN number will
be during this method call so we construct the properties dictionary
ourselves.
"""
initiator_name = connector['initiator']
name = volume['name']
lun_id = self._map_lun(name, [initiator_name], 'iscsi', None)
LOG.debug("Mapped LUN %(name)s to the initiator %(initiator_name)s",
{'name': name, 'initiator_name': initiator_name})
target_list = self.zapi_client.get_iscsi_target_details()
if not target_list:
raise exception.VolumeBackendAPIException(
data=_('Failed to get LUN target list for the LUN %s') % name)
LOG.debug("Successfully fetched target list for LUN %(name)s and "
"initiator %(initiator_name)s",
{'name': name, 'initiator_name': initiator_name})
preferred_target = self._get_preferred_target_from_list(
target_list)
if preferred_target is None:
msg = _('Failed to get target portal for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
(address, port) = (preferred_target['address'],
preferred_target['port'])
iqn = self.zapi_client.get_iscsi_service_details()
if not iqn:
msg = _('Failed to get target IQN for the LUN %s')
raise exception.VolumeBackendAPIException(data=msg % name)
properties = na_utils.get_iscsi_connection_properties(lun_id, volume,
iqn, address,
port)
return properties
def _get_preferred_target_from_list(self, target_details_list,
filter=None):
preferred_target = None
for target in target_details_list:
if filter and target['address'] not in filter:
continue
if target.get('interface-enabled', 'true') == 'true':
preferred_target = target
break
if preferred_target is None and len(target_details_list) > 0:
preferred_target = target_details_list[0]
return preferred_target
def terminate_connection_iscsi(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance.
Unmask the LUN on the storage system so the given initiator can no
longer access it.
"""
initiator_name = connector['initiator']
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
self._unmap_lun(path, [initiator_name])
LOG.debug("Unmapped LUN %(name)s from the initiator "
"%(initiator_name)s",
{'name': name, 'initiator_name': initiator_name})
def initialize_connection_fc(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '500a098280feeba5',
'access_mode': 'rw',
'initiator_target_map': {
'21000024ff406cc3': ['500a098280feeba5'],
'21000024ff406cc2': ['500a098280feeba5']
}
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['500a098280feeba5', '500a098290feeba5',
'500a098190feeba5', '500a098180feeba5'],
'access_mode': 'rw',
'initiator_target_map': {
'21000024ff406cc3': ['500a098280feeba5',
'500a098290feeba5'],
'21000024ff406cc2': ['500a098190feeba5',
'500a098180feeba5']
}
}
}
"""
initiators = [fczm_utils.get_formatted_wwn(wwpn)
for wwpn in connector['wwpns']]
volume_name = volume['name']
lun_id = self._map_lun(volume_name, initiators, 'fcp', None)
LOG.debug("Mapped LUN %(name)s to the initiator(s) %(initiators)s",
{'name': volume_name, 'initiators': initiators})
target_wwpns, initiator_target_map, num_paths = (
self._build_initiator_target_map(connector))
if target_wwpns:
LOG.debug("Successfully fetched target details for LUN %(name)s "
"and initiator(s) %(initiators)s",
{'name': volume_name, 'initiators': initiators})
else:
raise exception.VolumeBackendAPIException(
data=_('Failed to get LUN target details for '
'the LUN %s') % volume_name)
target_info = {'driver_volume_type': 'fibre_channel',
'data': {'target_discovered': True,
'target_lun': int(lun_id),
'target_wwn': target_wwpns,
'access_mode': 'rw',
'initiator_target_map': initiator_target_map}}
return target_info
def terminate_connection_fc(self, volume, connector, **kwargs):
"""Disallow connection from connector.
Return empty data if other volumes are in the same zone.
The FibreChannel ZoneManager doesn't remove zones
if there isn't an initiator_target_map in the
return of terminate_connection.
:returns: data - the target_wwns and initiator_target_map if the
zone is to be removed, otherwise the same map with
an empty dict for the 'data' key
"""
initiators = [fczm_utils.get_formatted_wwn(wwpn)
for wwpn in connector['wwpns']]
name = volume['name']
metadata = self._get_lun_attr(name, 'metadata')
path = metadata['Path']
self._unmap_lun(path, initiators)
LOG.debug("Unmapped LUN %(name)s from the initiator %(initiators)s",
{'name': name, 'initiators': initiators})
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
if not self._has_luns_mapped_to_initiators(initiators):
# No more exports for this host, so tear down zone.
LOG.info(_LI("Need to remove FC Zone, building initiator "
"target map"))
target_wwpns, initiator_target_map, num_paths = (
self._build_initiator_target_map(connector))
info['data'] = {'target_wwn': target_wwpns,
'initiator_target_map': initiator_target_map}
return info
def _build_initiator_target_map(self, connector):
"""Build the target_wwns and the initiator target map."""
# get WWPNs from controller and strip colons
all_target_wwpns = self._get_fc_target_wwpns()
all_target_wwpns = [six.text_type(wwpn).replace(':', '')
for wwpn in all_target_wwpns]
target_wwpns = []
init_targ_map = {}
num_paths = 0
if self.lookup_service is not None:
# Use FC SAN lookup to determine which ports are visible.
dev_map = self.lookup_service.get_device_mapping_from_network(
connector['wwpns'],
all_target_wwpns)
for fabric_name in dev_map:
fabric = dev_map[fabric_name]
target_wwpns += fabric['target_port_wwn_list']
for initiator in fabric['initiator_port_wwn_list']:
if initiator not in init_targ_map:
init_targ_map[initiator] = []
init_targ_map[initiator] += fabric['target_port_wwn_list']
init_targ_map[initiator] = list(set(
init_targ_map[initiator]))
for target in init_targ_map[initiator]:
num_paths += 1
target_wwpns = list(set(target_wwpns))
else:
initiator_wwns = connector['wwpns']
target_wwpns = all_target_wwpns
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwpns
return target_wwpns, init_targ_map, num_paths
|
julianwang/cinder
|
cinder/volume/drivers/netapp/dataontap/block_base.py
|
Python
|
apache-2.0
| 37,442 | 0 |
from datetime import datetime
# pokemon lottery
global_lotto_last_run = datetime(1969, 12, 31, 23, 59, 59, 999999)
lotto_new_run = None
print_speed_base = 0.03 # delay between printed characters
""" Graphics Notes:
The screen is 15x11 squares in dimension. Each square is 16x16 pixels. Total
screen is 240x176. Since I want to at least double the scale, a good starting
size would be 480x352, with each square being 32x32.
The background image needs to be called at scale, then moved instead of the
Player. Then obstacles and objects will be rendered as clear (or not) tiles
above the background layer.
"""
"""
General notes:
When slower than an opposing wild pokemon, there is approximately a 50% chance
you'll escape.
The only reason that the start button fly away Trainer works is because
enemy trainers face south for one frame before turning back and starting the
fight sequence. Obviously, the trick does not work with trainers that are
already facing south.
Three Steps: After a battle, wild or Trainer, a wild battle cannot be triggered
until the third step from their original location.
"""
|
itsthejoker/Pokemon-Homage
|
lemonyellow/core/__init__.py
|
Python
|
mit
| 1,110 | 0 |
"""Support for ESPHome binary sensors."""
import logging
from typing import TYPE_CHECKING, Optional
from homeassistant.components.binary_sensor import BinarySensorDevice
from . import EsphomeEntity, platform_async_setup_entry
if TYPE_CHECKING:
# pylint: disable=unused-import
from aioesphomeapi import BinarySensorInfo, BinarySensorState # noqa
DEPENDENCIES = ['esphome']
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up ESPHome binary sensors based on a config entry."""
# pylint: disable=redefined-outer-name
from aioesphomeapi import BinarySensorInfo, BinarySensorState # noqa
await platform_async_setup_entry(
hass, entry, async_add_entities,
component_key='binary_sensor',
info_type=BinarySensorInfo, entity_type=EsphomeBinarySensor,
state_type=BinarySensorState
)
class EsphomeBinarySensor(EsphomeEntity, BinarySensorDevice):
"""A binary sensor implementation for ESPHome."""
@property
def _static_info(self) -> 'BinarySensorInfo':
return super()._static_info
@property
def _state(self) -> Optional['BinarySensorState']:
return super()._state
@property
def is_on(self):
"""Return true if the binary sensor is on."""
if self._static_info.is_status_binary_sensor:
# Status binary sensors indicated connected state.
# So in their case what's usually _availability_ is now state
return self._entry_data.available
if self._state is None:
return None
return self._state.state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._static_info.device_class
@property
def available(self):
"""Return True if entity is available."""
if self._static_info.is_status_binary_sensor:
return True
return super().available
|
jamespcole/home-assistant
|
homeassistant/components/esphome/binary_sensor.py
|
Python
|
apache-2.0
| 2,003 | 0 |
# -*- coding: utf-8 -*-
import json
import pytest
from keyard_client import KeyardClient, testutils
@pytest.mark.skipif(not testutils.keyard_is_available(), reason="keyard is missing")
class TestKeyardClient(object):
def setup_method(self, method):
self.client = KeyardClient('http://127.0.0.1:8000')
def test_register(self):
response = self.client.register('app', '0.1', 'localhost:8002')
assert response is True
def test_health_check(self):
response = self.client.health_check('app', '0.1', 'localhost:8002')
assert response is True
def test_unregister(self):
response = self.client.unregister('app', '0.1', 'localhost:8002')
assert response is True
def test_get_service(self):
result = {"result": ['localhost:8080']}
value = self.client.get_service('app')
assert result == value
|
rzanluchi/keyard-client
|
tests/test_client_integration.py
|
Python
|
mit
| 890 | 0.001124 |
# -*- mode: python; fill-column: 100; comment-column: 100; -*-
import unittest
import sys
import os
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import base_test
class ForwardTest(base_test.WebDriverBaseTest):
# Get a static page that must be the same upon refresh
def test_forward(self):
self.driver.get(
self.webserver.where_is('navigation/res/forwardStart.html'))
self.driver.get(
self.webserver.where_is('navigation/res/forwardNext.html'))
nextbody = self.driver.find_element_by_css("body").get_text()
self.driver.go_back()
currbody = self.driver.find_element_by_css("body").get_text()
self.assertNotEqual(nextbody, currbody)
self.driver.go_forward()
currbody = self.driver.find_element_by_css("body").get_text()
self.assertEqual(nextbody, currbody)
if __name__ == '__main__':
unittest.main()
|
qiuzhong/crosswalk-test-suite
|
misc/webdriver-w3c-tests/navigation/forward.py
|
Python
|
bsd-3-clause
| 962 | 0.002079 |
import os
from cpenv import api, paths
from cpenv.cli import core
from cpenv.module import parse_module_path
class Create(core.CLI):
'''Create a new Module.'''
def setup_parser(self, parser):
parser.add_argument(
'where',
help='Path to new module',
)
def run(self, args):
where = paths.normalize(args.where)
if os.path.isdir(where):
core.echo()
core.echo('Error: Can not create module in existing directory.')
core.exit(1)
default_name, default_version = parse_module_path(where)
core.echo()
core.echo('This command will guide you through creating a new module.')
core.echo()
name = core.prompt(' Module Name [%s]: ' % default_name)
version = core.prompt(' Version [%s]: ' % default_version.string)
description = core.prompt(' Description []: ')
author = core.prompt(' Author []: ')
email = core.prompt(' Email []: ')
core.echo()
core.echo('- Creating your new Module...', end='')
module = api.create(
where=where,
name=name or default_name,
version=version or default_version.string,
description=description,
author=author,
email=email,
)
core.echo('OK!')
core.echo()
core.echo(' ' + module.path)
core.echo()
core.echo('Steps you might take before publishing...')
core.echo()
core.echo(' - Include binaries your module depends on')
core.echo(' - Edit the module.yml file')
core.echo(' - Add variables to the environment section')
core.echo(' - Add other modules to the requires section')
core.echo(' - Add python hooks like post_activate')
core.echo()
|
cpenv/cpenv
|
cpenv/cli/create.py
|
Python
|
mit
| 1,854 | 0 |
from typing import List, Optional, Sequence, Union
from decksite.data import achievements, deck, preaggregation, query
from decksite.data.models.person import Person
from decksite.database import db
from shared import dtutil, guarantee, logger
from shared.container import Container
from shared.database import sqlescape
from shared.decorators import retry_after_calling
from shared.pd_exception import AlreadyExistsException, DoesNotExistException
def load_person_by_id(person_id: int, season_id: Optional[int] = None) -> Person:
return load_person(f'p.id = {person_id}', season_id=season_id)
def load_person_by_mtgo_username(username: str, season_id: Optional[int] = None) -> Person:
return load_person('p.mtgo_username = {username}'.format(username=sqlescape(username, force_string=True)), season_id=season_id)
def load_person_by_discord_id(discord_id: int, season_id: Optional[int] = None) -> Person:
return load_person(f'p.discord_id = {discord_id}', season_id=season_id)
# pylint: disable=invalid-name
def load_person_by_discord_id_or_username(person: str, season_id: int = 0) -> Person:
# It would probably be better if this method did not exist but for now it's required by the API.
# The problem is that Magic Online usernames can be integers so we cannot be completely unambiguous here.
# We can make a really good guess, though.
# See https://discordapp.com/developers/docs/reference#snowflakes
# Unix timestamp (ms) for 2015-01-01T00:00:00.0000 = 1420070400000
# Unix timestamp (ms) for 2015-01-01T00:00:00.0001 = 1420070400001
# Unix timestamp (ms) for 2015-02-01T00:00:00.0000 = 1422748800000
# Unix timestamp (ms) for 2100-01-01T00:00:00.0000 = 4102444800000
# Discord timestamp (ms) for 2015-01-01T00:00:00.0000 = 0
# Discord timestamp (ms) for 2015-01-01T00:00:00.0001 = 1
# Discord timestamp (ms) for 2015-02-01T00:00:00.0000 = 2678400000
# Min Discord snowflake for 2015-01-01T00:00:00.0000 = 0 ( 00000000000000000000000 in binary)
# Min Discord snowflake for 2015-01-01T00:00:00.0001 = 4194304 ( 10000000000000000000000 in binary)
# Min Discord snowflake for 2015-02-01T00:00:00.0000 = 11234023833600000 ( 100111111010010100100100000000000000000000000000000000 in binary)
# Min Discord snowflake for 2100-01-01T00:00:00.0000 = 5625346837708800000 (100111000010001001111110010010100000000000000000000000000000000 in binary)
# Discord snowflakes created between 2015-01-01T00:00:00.001Z and 2100-01-01T00:00:00.000Z will therefore fall in the range 2097152-5625346837708800000 if created before the year 2100.
# We use 2015-02-01T00:00:00.000Z (11234023833600000) as the start of the range instead because it greatly reduces the range and we have seen no evidence of Discord snowflakes from before December 28th 2015.
# This function will fail or (very unlikely) return incorrect results if we ever have a player with a Magic Online username that falls numerically between MIN_DISCORD_ID and MAX_DISCORD_ID.
MIN_DISCORD_ID = 11234023833600000
MAX_DISCORD_ID = 5625346837708800000
if person.isdigit() and int(person) >= MIN_DISCORD_ID and int(person) <= MAX_DISCORD_ID:
return load_person_by_discord_id(int(person), season_id=season_id)
return load_person_by_mtgo_username(person, season_id=season_id)
# pylint: disable=invalid-name
def maybe_load_person_by_discord_id(discord_id: Optional[int]) -> Optional[Person]:
if discord_id is None:
return None
return guarantee.at_most_one(load_people(f'p.discord_id = {discord_id}'))
# pylint: disable=invalid-name
def maybe_load_person_by_tappedout_name(username: str) -> Optional[Person]:
return guarantee.at_most_one(load_people('p.tappedout_username = {username}'.format(username=sqlescape(username))))
# pylint: disable=invalid-name
def maybe_load_person_by_mtggoldfish_name(username: str) -> Optional[Person]:
return guarantee.at_most_one(load_people('p.mtggoldfish_username = {username}'.format(username=sqlescape(username))))
def load_person(where: str, season_id: Optional[int] = None) -> Person:
people = load_people(where, season_id=season_id)
if len(people) == 0: # We didn't find an entry for that person with decks, what about without?
person = load_person_statless(where, season_id)
else:
person = guarantee.exactly_one(people)
set_achievements([person], season_id)
return person
# Sometimes (person detail page) we want to load what we know about a person even though they had no decks in the specified season.
def load_person_statless(where: str = 'TRUE', season_id: Optional[int] = None) -> Person:
person_query = query.person_query()
sql = f"""
SELECT
p.id,
{person_query} AS name,
p.mtgo_username,
p.tappedout_username,
p.mtggoldfish_username,
p.discord_id,
p.elo,
p.locale
FROM
person AS p
WHERE
{where}
"""
people = [Person(r) for r in db().select(sql)]
for p in people:
p.season_id = season_id
return guarantee.exactly_one(people)
def load_people_count(where: str = 'TRUE', season_id: Optional[Union[str, int]] = None) -> int:
season_join = query.season_join() if season_id else ''
season_query = query.season_query(season_id, 'season.id')
sql = f"""
SELECT
COUNT(DISTINCT p.id)
FROM
person AS p
LEFT JOIN
deck AS d ON d.person_id = p.id
LEFT JOIN
deck_cache AS dc ON d.id = dc.deck_id
{season_join}
WHERE
({where}) AND ({season_query})
"""
return db().value(sql) or 0
# Note: This only loads people who have decks in the specified season.
def load_people(where: str = 'TRUE',
order_by: str = 'num_decks DESC, p.name',
limit: str = '',
season_id: Optional[Union[str, int]] = None) -> Sequence[Person]:
person_query = query.person_query()
season_join = query.season_join() if season_id else ''
season_query = query.season_query(season_id, 'season.id')
sql = f"""
SELECT
p.id,
{person_query} AS name,
p.mtgo_username,
p.tappedout_username,
p.mtggoldfish_username,
p.discord_id,
p.elo,
p.locale,
SUM(1) AS num_decks,
SUM(dc.wins) AS wins,
SUM(dc.losses) AS losses,
SUM(dc.draws) AS draws,
SUM(wins - losses) AS record,
SUM(CASE WHEN dc.wins >= 5 AND dc.losses = 0 AND d.source_id IN (SELECT id FROM source WHERE name = 'League') THEN 1 ELSE 0 END) AS perfect_runs,
SUM(CASE WHEN d.finish = 1 THEN 1 ELSE 0 END) AS tournament_wins,
SUM(CASE WHEN d.finish <= 8 THEN 1 ELSE 0 END) AS tournament_top8s,
IFNULL(ROUND((SUM(dc.wins) / NULLIF(SUM(dc.wins + dc.losses), 0)) * 100, 1), '') AS win_percent,
SUM(DISTINCT CASE WHEN d.competition_id IS NOT NULL THEN 1 ELSE 0 END) AS num_competitions
FROM
person AS p
LEFT JOIN
deck AS d ON d.person_id = p.id
LEFT JOIN
deck_cache AS dc ON d.id = dc.deck_id
{season_join}
WHERE
({where}) AND ({season_query})
GROUP BY
p.id
ORDER BY
{order_by}
{limit}
"""
people = [Person(r) for r in db().select(sql)]
for p in people:
p.season_id = season_id
return people
def seasons_active(person_id: int) -> List[int]:
sql = f"""
SELECT
DISTINCT season.id
FROM
deck AS d
{query.season_join()}
WHERE
d.person_id = {person_id}
ORDER BY
season.id
"""
return db().values(sql)
def preaggregate() -> None:
achievements.preaggregate_achievements()
preaggregate_head_to_head()
def preaggregate_head_to_head() -> None:
table = '_head_to_head_stats'
sql = """
CREATE TABLE IF NOT EXISTS _new{table} (
person_id INT NOT NULL,
opponent_id INT NOT NULL,
season_id INT NOT NULL,
num_matches INT NOT NULL,
wins INT NOT NULL,
losses INT NOT NULL,
draws INT NOT NULL,
PRIMARY KEY (season_id, person_id, opponent_id),
FOREIGN KEY (season_id) REFERENCES season (id) ON UPDATE CASCADE ON DELETE CASCADE,
FOREIGN KEY (person_id) REFERENCES person (id) ON UPDATE CASCADE ON DELETE CASCADE,
FOREIGN KEY (opponent_id) REFERENCES person (id) ON UPDATE CASCADE ON DELETE CASCADE
) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci AS
SELECT
p.id AS person_id,
opp.id AS opponent_id,
season.id AS season_id,
COUNT(p.id) AS num_matches,
SUM(CASE WHEN dm.games > odm.games THEN 1 ELSE 0 END) AS wins,
SUM(CASE WHEN dm.games < odm.games THEN 1 ELSE 0 END) AS losses,
SUM(CASE WHEN dm.games = odm.games THEN 1 ELSE 0 END) AS draws
FROM
person AS p
INNER JOIN
deck AS d ON p.id = d.person_id
INNER JOIN
deck_match AS dm ON dm.deck_id = d.id
INNER JOIN
deck_match AS odm ON dm.match_id = odm.match_id AND dm.deck_id <> IFNULL(odm.deck_id, 0)
INNER JOIN
deck AS od ON odm.deck_id = od.id
INNER JOIN
person AS opp ON od.person_id = opp.id
{season_join}
GROUP BY
p.id,
opp.id,
season.id
""".format(table=table, season_join=query.season_join())
preaggregation.preaggregate(table, sql)
@retry_after_calling(achievements.preaggregate_achievements)
def set_achievements(people: List[Person], season_id: int = None) -> None:
people_by_id = {person.id: person for person in people}
sql = achievements.load_query(people_by_id, season_id)
results = [Container(r) for r in db().select(sql)]
for result in results:
people_by_id[result['id']].num_achievements = len([k for k, v in result.items() if k != 'id' and v > 0])
people_by_id[result['id']].achievements = result
people_by_id[result['id']].achievements.pop('id')
@retry_after_calling(preaggregate_head_to_head)
def load_head_to_head_count(person_id: int, where: str = 'TRUE', season_id: Optional[int] = None) -> int:
season_query = query.season_query(season_id)
sql = f'SELECT COUNT(*) FROM _head_to_head_stats AS hths INNER JOIN person AS opp ON hths.opponent_id = opp.id WHERE ({where}) AND (hths.person_id = {person_id}) AND ({season_query})'
return db().value(sql)
@retry_after_calling(preaggregate_head_to_head)
def load_head_to_head(person_id: int, where: str = 'TRUE', order_by: str = 'num_matches DESC, record DESC, win_percent DESC, wins DESC, opp_mtgo_username', limit: str = '', season_id: int = None) -> Sequence[Container]:
season_query = query.season_query(season_id)
sql = f"""
SELECT
hths.person_id AS id,
LOWER(opp.mtgo_username) AS opp_mtgo_username,
SUM(num_matches) AS num_matches,
SUM(wins) - SUM(losses) AS record,
SUM(wins) AS wins,
SUM(losses) AS losses,
SUM(draws) AS draws,
IFNULL(ROUND((SUM(wins) / NULLIF(SUM(wins + losses), 0)) * 100, 1), '') AS win_percent
FROM
_head_to_head_stats AS hths
INNER JOIN
person AS opp ON hths.opponent_id = opp.id
WHERE
({where}) AND (hths.person_id = {person_id}) AND ({season_query})
GROUP BY
hths.person_id,
hths.opponent_id
ORDER BY
{order_by}
{limit}
"""
return [Container(r) for r in db().select(sql)]
def associate(d: deck.Deck, discord_id: int) -> int:
person_id = db().value('SELECT person_id FROM deck WHERE id = %s', [d.id], fail_on_missing=True)
sql = 'UPDATE person SET discord_id = %s WHERE id = %s'
return db().execute(sql, [discord_id, person_id])
def is_allowed_to_retire(deck_id: Optional[int], discord_id: Optional[int]) -> bool:
if not deck_id:
return False
if not discord_id:
return True
person = maybe_load_person_by_discord_id(discord_id)
if person is None:
return True
return any(int(deck_id) == deck.id for deck in person.decks)
def get_or_insert_person_id(mtgo_username: Optional[str], tappedout_username: Optional[str], mtggoldfish_username: Optional[str]) -> int:
sql = 'SELECT id FROM person WHERE LOWER(mtgo_username) = LOWER(%s) OR LOWER(tappedout_username) = LOWER(%s) OR LOWER(mtggoldfish_username) = LOWER(%s)'
person_id = db().value(sql, [mtgo_username, tappedout_username, mtggoldfish_username])
if person_id:
return person_id
sql = 'INSERT INTO person (mtgo_username, tappedout_username, mtggoldfish_username) VALUES (%s, %s, %s)'
return db().insert(sql, [mtgo_username, tappedout_username, mtggoldfish_username])
def load_aliases() -> List[Container]:
sql = """
SELECT
pa.person_id,
pa.alias,
p.mtgo_username
FROM
person_alias AS pa
INNER JOIN
person AS p ON p.id = pa.person_id
"""
return [Container(r) for r in db().select(sql)]
def add_alias(person_id: int, alias: str) -> None:
db().begin('add_alias')
try:
p = load_person_by_mtgo_username(alias)
db().execute('UPDATE deck SET person_id = %s WHERE person_id = %s', [person_id, p.id])
db().execute('DELETE FROM person WHERE id = %s', [p.id])
except DoesNotExistException:
pass
db().execute('INSERT INTO person_alias (person_id, alias) VALUES (%s, %s)', [person_id, alias])
db().commit('add_alias')
def load_notes(person_id: int = None) -> List[Container]:
where = f'subject_id = {person_id}' if person_id else 'TRUE'
sql = """
SELECT
pn.created_date,
pn.creator_id,
{creator_query} AS creator,
pn.subject_id,
{subject_query} AS subject,
note
FROM
person_note AS pn
INNER JOIN
person AS c ON pn.creator_id = c.id
INNER JOIN
person AS s ON pn.subject_id = s.id
WHERE
{where}
ORDER BY
s.id,
pn.created_date DESC
""".format(creator_query=query.person_query('c'), subject_query=query.person_query('s'), where=where)
notes = [Container(r) for r in db().select(sql)]
for n in notes:
n.created_date = dtutil.ts2dt(n.created_date)
n.display_date = dtutil.display_date(n.created_date)
return notes
def add_note(creator_id: int, subject_id: int, note: str) -> None:
sql = 'INSERT INTO person_note (created_date, creator_id, subject_id, note) VALUES (UNIX_TIMESTAMP(NOW()), %s, %s, %s)'
db().execute(sql, [creator_id, subject_id, note])
def link_discord(mtgo_username: str, discord_id: int) -> Person:
person_id = deck.get_or_insert_person_id(mtgo_username, None, None)
p = load_person_by_id(person_id)
if p.discord_id is not None:
raise AlreadyExistsException('Player with mtgo username {mtgo_username} already has discord id {old_discord_id}, cannot add {new_discord_id}'.format(mtgo_username=mtgo_username, old_discord_id=p.discord_id, new_discord_id=discord_id))
sql = 'UPDATE person SET discord_id = %s WHERE id = %s'
db().execute(sql, [discord_id, p.id])
return p
def unlink_discord(person_id: int) -> int:
sql = 'UPDATE person SET discord_id = NULL WHERE id = %s'
return db().execute(sql, [person_id])
def remove_discord_link(discord_id: int) -> int:
sql = 'UPDATE person SET discord_id = NULL WHERE discord_id = %s'
return db().execute(sql, [discord_id])
def is_banned(mtgo_username: str) -> bool:
return db().value('SELECT banned FROM person WHERE mtgo_username = %s', [mtgo_username]) == 1
def squash(p1id: int, p2id: int, col1: str, col2: str) -> None:
logger.warning('Squashing {p1id} and {p2id} on {col1} and {col2}'.format(p1id=p1id, p2id=p2id, col1=col1, col2=col2))
db().begin('squash')
new_value = db().value('SELECT {col2} FROM person WHERE id = %s'.format(col2=col2), [p2id])
db().execute('UPDATE deck SET person_id = %s WHERE person_id = %s', [p1id, p2id])
db().execute('DELETE FROM person WHERE id = %s', [p2id])
db().execute('UPDATE person SET {col2} = %s WHERE id = %s'.format(col2=col2), [new_value, p1id])
db().commit('squash')
def set_locale(person_id: int, locale: str) -> None:
db().execute('UPDATE person SET locale = %s WHERE id = %s', [locale, person_id])
|
PennyDreadfulMTG/Penny-Dreadful-Discord-Bot
|
decksite/data/person.py
|
Python
|
gpl-3.0
| 17,080 | 0.004859 |
#!/usr/bin/python
# (c) 2018 Jim Hawkins. MIT licensed, see https://opensource.org/licenses/MIT
# Part of Blender Driver, see https://github.com/sjjhsjjh/blender-driver
"""Python module for Blender Driver demonstration application.
Abstract base class for demonstration applications.
This module can only be used from within the Blender Game Engine."""
# Exit if run other than as a module.
if __name__ == '__main__':
print(__doc__)
raise SystemExit(1)
# Standard library imports, in alphabetic order.
#
# Module for command line switches.
# https://docs.python.org/3.5/library/argparse.html
# The import isn't needed because this class uses the base class to get an
# object.
# import argparse
#
# Module for levelled logging messages.
# Tutorial is here: https://docs.python.org/3.5/howto/logging.html
# Reference is here: https://docs.python.org/3.5/library/logging.html
from logging import DEBUG, INFO, WARNING, ERROR, log
#
# Blender library imports, in alphabetic order.
#
# Local imports.
#
# Blender Driver application with threads and locks.
import blender_driver.application.thread
# Diagnostic print to show when it's imported. Only printed if all its own
# imports run OK.
print('"'.join(('Application module ', __name__, '.')))
class Application(blender_driver.application.thread.Application):
_instructions = "Press ESC to crash BGE, or any other key to terminate."
_bannerName = 'banner'
_bannerObject = None
@property
def bannerObject(self):
return self._bannerObject
# Overriden.
def data_initialise(self):
#
# Do common initialisation for subclasses.
self._bannerObject = self.data_add_banner()
self.dontDeletes.append(self._bannerName)
#
# Run the base class method.
super().data_initialise()
def data_add_banner(self):
banner = "\n".join(
("Blender Driver" , self.applicationName , self._instructions))
return self.bpyutils.set_up_object(
self._bannerName, {'text':banner, 'physicsType':'NO_COLLISION'
, 'location': (-5, -8, 3)})
# Overriden.
def game_initialise(self):
super().game_initialise()
self.mainLock.acquire()
try:
self._bannerObject = self.game_add_text(self._bannerName)
log(DEBUG, "Game scene objects {}\nArguments: {}\nSettings: {}"
, self.gameScene.objects, vars(self.arguments), self.settings)
print(self._instructions)
finally:
self.mainLock.release()
# Overriden.
def game_keyboard(self, *args):
#
# Formally, run the base class method. Actually, it's a pass.
super().game_keyboard(*args)
#
# Default is to terminate on any key press.
log(DEBUG, "Terminating.")
self.game_terminate()
def tick_skipped(self):
log(WARNING, "Skipped ticks: {:d}.", self.skippedTicks)
|
sjjhsjjh/blender-driver
|
applications/demonstration.py
|
Python
|
mit
| 2,992 | 0.003676 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserActivateKey',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('activation_key', models.CharField(max_length=40, null=True, blank=True)),
('key_expires', models.DateTimeField(null=True, blank=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'tcms_user_activate_keys',
},
),
]
|
MrSenko/Nitrate
|
tcms/core/contrib/auth/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 867 | 0.002307 |
# Orbotor - arcade with orbit mechanics
# Copyright (C) 2014 mr555ru
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys # NOQA
import profile
import ConfigParser
import pygame
from pygame import *
from static_functions import *
import camera as camera
import planet as planet
from orbitable import GCD_Singleton, SoundSystem_Singleton
from helldebris_collection import HellDebrisCollection
from team import Team
from simplestats import SimpleStats
wwidth = 1024
wheight = 768
p1_name = "Player1"
p2_name = "Player2"
config = ConfigParser.RawConfigParser()
config.read('profile.cfg')
wwidth = config.getint("Screen", "width")
wheight = config.getint("Screen", "height")
p1_name = config.get("Player", "P1_name")
p2_name = config.get("Player", "P2_name")
display = (wwidth, wheight)
clock = pygame.time.Clock()
class Profile():
def __init__(self, is_player2_present=False,
is_player1_ai=False,
is_player2_ai=False,
player1_team="Green",
player2_team="Red",
greenteamsize=8,
redteamsize=8,
debris_min=6,
debris_max=20,
draw_planet=False,
name=""):
self.p2 = is_player2_present
self.p1_ai = is_player1_ai
self.p2_ai = is_player2_ai
self.p1_team = player1_team
self.p2_team = player2_team
mingreen = int(self.p1_team == "Green") + int(self.p2_team == "Green" and self.p2)
minred = int(self.p1_team == "Red") + int(self.p2_team == "Red" and self.p2)
self.green = max(mingreen, greenteamsize)
self.red = max(minred, redteamsize)
self.hellmin = debris_min
self.hellmax = debris_max
self.draw_planet = draw_planet
self.name = name
self.ERAD = 1000
self.MAXRAD = 1700
self.ORBHEIGHT = 350
def game_init(self):
pygame.init()
self.PROFILESTEP = False
self.UPDAE_GAME = pygame.USEREVENT + 1
pygame.time.set_timer(self.UPDAE_GAME, GAME_SPEED)
self.screen = pygame.display.set_mode(display)
if self.p2:
self.bg1 = Surface((wwidth, wheight/2))
self.bg2 = Surface((wwidth, wheight/2))
self.cam2 = camera.Camera(self.bg2, first_in_order=False)
self.bgs = (self.bg1, self.bg2)
else:
self.bg1 = Surface((wwidth, wheight))
self.bgs = (self.bg1,)
self.cam1 = camera.Camera(self.bg1)
if self.name == "":
pygame.display.set_caption("Orbotor")
else:
pygame.display.set_caption("Orbotor - %s" % self.name)
self.pl = planet.Planet(self.bgs, self.ERAD, self.MAXRAD, "planet.png" if self.draw_planet else None)
GCD_Singleton.set_planet(self.pl)
self.soundsys = SoundSystem_Singleton
self.spawn = (self.ERAD+self.ORBHEIGHT, 0)
self.team1 = Team("Green", "#009900", self.green, self.spawn, self.pl)
self.team2 = Team("Red", "#880000", self.red, self.spawn, self.pl)
self.team1.set_opponent_team(self.team2)
self.team2.set_opponent_team(self.team1)
self.hell = HellDebrisCollection(self.spawn, self.pl, self.hellmin, self.hellmax)
if self.p1_team == "Green":
self.player1 = self.team1.guys[0]
if self.p2:
if self.p2_team == "Green":
self.player2 = self.team1.guys[1]
elif self.p2_team == "Red":
self.player2 = self.team2.guys[0]
else:
raise Exception("unknown team for p2: %s" % self.p2_team)
elif self.p1_team == "Red":
self.player1 = team2.guys[0]
if self.p2:
if self.p2_team == "Green":
self.player2 = self.team1.guys[0]
elif self.p2_team == "Red":
self.player2 = self.team2.guys[1]
else:
raise Exception("unknown team for p2: %s" % self.p2_team)
else:
raise Exception("unknown team for p1: %s" % self.p1_team)
self.player1.is_ai = self.p1_ai
if self.p1_ai:
self.player1.set_name("[bot] %s" % p1_name)
else:
self.player1.set_name("%s" % p1_name)
if self.p2:
self.player2.is_ai = self.p2_ai
if self.p2_ai:
self.player2.set_name("[bot] %s" % p2_name)
else:
self.player2.set_name("%s" % p2_name)
self.stats1 = SimpleStats(self.team1, self.team2, self.player1)
if self.p2:
self.stats2 = SimpleStats(self.team1, self.team2, self.player2)
def game_key_listen(self, event):
if event.type == KEYDOWN and event.key == K_F1:
self.PROFILESTEP = True
self.game_step()
elif event.type == KEYDOWN and event.key == K_F2:
print len(GCD_Singleton.orbitables)
elif event.type == KEYDOWN and event.key == K_F5:
self.soundsys.switch()
if not self.p1_ai:
self.player1.catch_kb_event(event)
if self.p2 and not self.p2_ai:
self.player2.catch_kb_event_hotseat(event)
self.cam1.keys_listen(event)
if self.p2:
self.cam2.keys_listen_hotseat(event)
def game_step(self):
if self.PROFILESTEP:
profile.runctx("self._step()", globals(), {"self": self})
else:
self._step()
def _step(self):
self.team2.step() # todo faster better stronger
self.team1.step()
self.hell.step()
self.player1.focus(self.cam1)
self.cam1.step()
if self.p2:
self.player2.focus(self.cam2)
self.cam2.step()
GCD_Singleton.step()
def game_draw(self):
if self.PROFILESTEP:
profile.runctx("self._draw()", globals(), {"self": self})
self.PROFILESTEP = False
else:
self._draw()
def _draw(self):
clock.tick(60)
tup = [self.pl, ] + self.team1.objectslist() + self.team2.objectslist()\
+ self.hell.objectslist() + self.pl.cities
tup = tuple(tup)
self.cam1.translate_coords(*tup)
if self.p2:
self.cam2.translate_coords(*tup)
self.stats1.draw(self.bg1)
self.screen.blit(self.bg1, (0, 0))
if self.p2:
self.stats2.draw(self.bg2)
self.screen.blit(self.bg2, (0, wheight/2))
pygame.display.update()
def DefaultProfile(draw_planet, hell):
return Profile(draw_planet=draw_planet, debris_min=hell[0], debris_max=hell[1])
def HotseatProfile(draw_planet, hell):
return Profile(is_player2_present=True, draw_planet=draw_planet,
debris_min=hell[0], debris_max=hell[1])
def RivalProfile(draw_planet, hell):
return Profile(is_player2_present=True, is_player2_ai=True, draw_planet=draw_planet,
debris_min=hell[0], debris_max=hell[1])
def CoopProfile(draw_planet, hell):
return Profile(is_player2_present=True, player2_team="Green", draw_planet=draw_planet,
debris_min=hell[0], debris_max=hell[1])
def SpectateProfile(draw_planet, hell):
return Profile(is_player1_ai=True, draw_planet=draw_planet,
debris_min=hell[0], debris_max=hell[1])
def SurvivalProfile(draw_planet):
return Profile(draw_planet=draw_planet, debris_min=35, debris_max=70,
greenteamsize=1, redteamsize=0)
def CoopSurvivalProfile(draw_planet):
return Profile(is_player2_present=True, player2_team="Green", draw_planet=draw_planet,
debris_min=35, debris_max=70, greenteamsize=2, redteamsize=0)
|
mr555ru/orbotor
|
orbotor/gameprofile.py
|
Python
|
gpl-3.0
| 8,679 | 0.004378 |
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
class ImageBuilderException(Exception):
pass
class ImageRateLimitedException(Exception):
"""Rate Limited request"""
class ImageSpecificationException(Exception):
pass
class ImageUploaderException(Exception):
pass
class ImageUploaderThreadException(Exception):
"""Conflict during thread processing"""
pass
class ImageNotFoundException(Exception):
pass
|
openstack/tripleo-common
|
tripleo_common/image/exception.py
|
Python
|
apache-2.0
| 991 | 0 |
"""Terminal management for exposing terminals to a web interface using Tornado.
"""
# Copyright (c) Jupyter Development Team
# Copyright (c) 2014, Ramalingam Saravanan <sarava@sarava.net>
# Distributed under the terms of the Simplified BSD License.
from __future__ import absolute_import, print_function
import asyncio
from collections import deque
import itertools
import logging
import os
import signal
import codecs
import warnings
import select
try:
from ptyprocess import PtyProcessUnicode
def preexec_fn():
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except ImportError:
from winpty import PtyProcess as PtyProcessUnicode
preexec_fn = None
from tornado.ioloop import IOLoop
ENV_PREFIX = "PYXTERM_" # Environment variable prefix
DEFAULT_TERM_TYPE = "xterm"
class PtyWithClients(object):
def __init__(self, argv, env=[], cwd=None):
self.clients = []
# If you start the process and then construct this object from it,
# output generated by the process prior to the object's creation
# is lost. Hence the change from 0.8.3.
# Buffer output until a client connects; then let the client
# drain the buffer.
# We keep the same read_buffer as before
self.read_buffer = deque([], maxlen=10)
self.preopen_buffer = deque([])
kwargs = dict(argv=argv, env=env, cwd=cwd)
if preexec_fn is not None:
kwargs["preexec_fn"] = preexec_fn
self.ptyproc = PtyProcessUnicode.spawn(**kwargs)
# The output might not be strictly UTF-8 encoded, so
# we replace the inner decoder of PtyProcessUnicode
# to allow non-strict decode.
self.ptyproc.decoder = codecs.getincrementaldecoder('utf-8')(errors='replace')
def resize_to_smallest(self):
"""Set the terminal size to that of the smallest client dimensions.
A terminal not using the full space available is much nicer than a
terminal trying to use more than the available space, so we keep it
sized to the smallest client.
"""
minrows = mincols = 10001
for client in self.clients:
rows, cols = client.size
if rows is not None and rows < minrows:
minrows = rows
if cols is not None and cols < mincols:
mincols = cols
if minrows == 10001 or mincols == 10001:
return
rows, cols = self.ptyproc.getwinsize()
if (rows, cols) != (minrows, mincols):
self.ptyproc.setwinsize(minrows, mincols)
def kill(self, sig=signal.SIGTERM):
"""Send a signal to the process in the pty"""
self.ptyproc.kill(sig)
def killpg(self, sig=signal.SIGTERM):
"""Send a signal to the process group of the process in the pty"""
if os.name == 'nt':
return self.ptyproc.kill(sig)
pgid = os.getpgid(self.ptyproc.pid)
os.killpg(pgid, sig)
async def terminate(self, force=False):
'''This forces a child process to terminate. It starts nicely with
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
returns True if the child was terminated. This returns False if the
child could not be terminated. '''
if os.name == 'nt':
signals = [signal.SIGINT, signal.SIGTERM]
else:
signals = [signal.SIGHUP, signal.SIGCONT, signal.SIGINT,
signal.SIGTERM]
loop = IOLoop.current()
def sleep(): return asyncio.sleep(self.ptyproc.delayafterterminate)
if not self.ptyproc.isalive():
return True
try:
for sig in signals:
self.kill(sig)
await sleep()
if not self.ptyproc.isalive():
return True
if force:
self.kill(signal.SIGKILL)
await sleep()
if not self.ptyproc.isalive():
return True
else:
return False
return False
except OSError:
# I think there are kernel timing issues that sometimes cause
# this to happen. I think isalive() reports True, but the
# process is dead to the kernel.
# Make one last attempt to see if the kernel is up to date.
await sleep()
if not self.ptyproc.isalive():
return True
else:
return False
def _update_removing(target, changes):
"""Like dict.update(), but remove keys where the value is None.
"""
for k, v in changes.items():
if v is None:
target.pop(k, None)
else:
target[k] = v
class TermManagerBase(object):
"""Base class for a terminal manager."""
def __init__(self, shell_command, server_url="", term_settings={},
extra_env=None, ioloop=None):
self.shell_command = shell_command
self.server_url = server_url
self.term_settings = term_settings
self.extra_env = extra_env
self.log = logging.getLogger(__name__)
self.ptys_by_fd = {}
if ioloop is not None:
warnings.warn(
f"Setting {self.__class__.__name__}.ioloop is deprecated and ignored",
DeprecationWarning,
stacklevel=2,
)
def make_term_env(self, height=25, width=80, winheight=0, winwidth=0, **kwargs):
"""Build the environment variables for the process in the terminal."""
env = os.environ.copy()
env["TERM"] = self.term_settings.get("type", DEFAULT_TERM_TYPE)
dimensions = "%dx%d" % (width, height)
if winwidth and winheight:
dimensions += ";%dx%d" % (winwidth, winheight)
env[ENV_PREFIX+"DIMENSIONS"] = dimensions
env["COLUMNS"] = str(width)
env["LINES"] = str(height)
if self.server_url:
env[ENV_PREFIX+"URL"] = self.server_url
if self.extra_env:
_update_removing(env, self.extra_env)
return env
def new_terminal(self, **kwargs):
"""Make a new terminal, return a :class:`PtyWithClients` instance."""
options = self.term_settings.copy()
options['shell_command'] = self.shell_command
options.update(kwargs)
argv = options['shell_command']
env = self.make_term_env(**options)
cwd = options.get('cwd', None)
return PtyWithClients(argv, env, cwd)
def start_reading(self, ptywclients):
"""Connect a terminal to the tornado event loop to read data from it."""
fd = ptywclients.ptyproc.fd
self.ptys_by_fd[fd] = ptywclients
loop = IOLoop.current()
loop.add_handler(fd, self.pty_read, loop.READ)
def on_eof(self, ptywclients):
"""Called when the pty has closed.
"""
# Stop trying to read from that terminal
fd = ptywclients.ptyproc.fd
self.log.info("EOF on FD %d; stopping reading", fd)
del self.ptys_by_fd[fd]
IOLoop.current().remove_handler(fd)
# This closes the fd, and should result in the process being reaped.
ptywclients.ptyproc.close()
def pty_read(self, fd, events=None):
"""Called by the event loop when there is pty data ready to read."""
r, _, _ = select.select([fd], [], [], .1)
if not r:
return
ptywclients = self.ptys_by_fd[fd]
try:
s = ptywclients.ptyproc.read(65536)
client_list = ptywclients.clients
ptywclients.read_buffer.append(s)
if not client_list:
# No one to consume our output: buffer it.
ptywclients.preopen_buffer.append(s)
return
for client in ptywclients.clients:
client.on_pty_read(s)
except EOFError:
self.on_eof(ptywclients)
for client in ptywclients.clients:
client.on_pty_died()
def get_terminal(self, url_component=None):
"""Override in a subclass to give a terminal to a new websocket connection
The :class:`TermSocket` handler works with zero or one URL components
(capturing groups in the URL spec regex). If it receives one, it is
passed as the ``url_component`` parameter; otherwise, this is None.
"""
raise NotImplementedError
def client_disconnected(self, websocket):
"""Override this to e.g. kill terminals on client disconnection.
"""
pass
async def shutdown(self):
await self.kill_all()
async def kill_all(self):
futures = []
for term in self.ptys_by_fd.values():
futures.append(term.terminate(force=True))
# wait for futures to finish
if futures:
await asyncio.gather(*futures)
class SingleTermManager(TermManagerBase):
"""All connections to the websocket share a common terminal."""
def __init__(self, **kwargs):
super(SingleTermManager, self).__init__(**kwargs)
self.terminal = None
def get_terminal(self, url_component=None):
if self.terminal is None:
self.terminal = self.new_terminal()
self.start_reading(self.terminal)
return self.terminal
async def kill_all(self):
await super().kill_all()
self.terminal = None
class MaxTerminalsReached(Exception):
def __init__(self, max_terminals):
self.max_terminals = max_terminals
def __str__(self):
return "Cannot create more than %d terminals" % self.max_terminals
class UniqueTermManager(TermManagerBase):
"""Give each websocket a unique terminal to use."""
def __init__(self, max_terminals=None, **kwargs):
super(UniqueTermManager, self).__init__(**kwargs)
self.max_terminals = max_terminals
def get_terminal(self, url_component=None):
if self.max_terminals and len(self.ptys_by_fd) >= self.max_terminals:
raise MaxTerminalsReached(self.max_terminals)
term = self.new_terminal()
self.start_reading(term)
return term
def client_disconnected(self, websocket):
"""Send terminal SIGHUP when client disconnects."""
self.log.info("Websocket closed, sending SIGHUP to terminal.")
if websocket.terminal:
if os.name == 'nt':
websocket.terminal.kill()
# Immediately call the pty reader to process
# the eof and free up space
self.pty_read(websocket.terminal.ptyproc.fd)
return
websocket.terminal.killpg(signal.SIGHUP)
class NamedTermManager(TermManagerBase):
"""Share terminals between websockets connected to the same endpoint.
"""
def __init__(self, max_terminals=None, **kwargs):
super(NamedTermManager, self).__init__(**kwargs)
self.max_terminals = max_terminals
self.terminals = {}
def get_terminal(self, term_name):
assert term_name is not None
if term_name in self.terminals:
return self.terminals[term_name]
if self.max_terminals and len(self.terminals) >= self.max_terminals:
raise MaxTerminalsReached(self.max_terminals)
# Create new terminal
self.log.info("New terminal with specified name: %s", term_name)
term = self.new_terminal()
term.term_name = term_name
self.terminals[term_name] = term
self.start_reading(term)
return term
name_template = "%d"
def _next_available_name(self):
for n in itertools.count(start=1):
name = self.name_template % n
if name not in self.terminals:
return name
def new_named_terminal(self, **kwargs):
if 'name' in kwargs:
name = kwargs['name']
else:
name = self._next_available_name()
term = self.new_terminal(**kwargs)
self.log.info("New terminal with automatic name: %s", name)
term.term_name = name
self.terminals[name] = term
self.start_reading(term)
return name, term
def kill(self, name, sig=signal.SIGTERM):
term = self.terminals[name]
term.kill(sig) # This should lead to an EOF
async def terminate(self, name, force=False):
term = self.terminals[name]
await term.terminate(force=force)
def on_eof(self, ptywclients):
super(NamedTermManager, self).on_eof(ptywclients)
name = ptywclients.term_name
self.log.info("Terminal %s closed", name)
self.terminals.pop(name, None)
async def kill_all(self):
await super().kill_all()
self.terminals = {}
|
takluyver/terminado
|
terminado/management.py
|
Python
|
bsd-2-clause
| 12,822 | 0.000468 |
from django.utils.encoding import smart_unicode
def ssn_check_digit(value):
"Calculate Italian social security number check digit."
ssn_even_chars = {
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8,
'9': 9, 'A': 0, 'B': 1, 'C': 2, 'D': 3, 'E': 4, 'F': 5, 'G': 6, 'H': 7,
'I': 8, 'J': 9, 'K': 10, 'L': 11, 'M': 12, 'N': 13, 'O': 14, 'P': 15,
'Q': 16, 'R': 17, 'S': 18, 'T': 19, 'U': 20, 'V': 21, 'W': 22, 'X': 23,
'Y': 24, 'Z': 25
}
ssn_odd_chars = {
'0': 1, '1': 0, '2': 5, '3': 7, '4': 9, '5': 13, '6': 15, '7': 17, '8':
19, '9': 21, 'A': 1, 'B': 0, 'C': 5, 'D': 7, 'E': 9, 'F': 13, 'G': 15,
'H': 17, 'I': 19, 'J': 21, 'K': 2, 'L': 4, 'M': 18, 'N': 20, 'O': 11,
'P': 3, 'Q': 6, 'R': 8, 'S': 12, 'T': 14, 'U': 16, 'V': 10, 'W': 22,
'X': 25, 'Y': 24, 'Z': 23
}
# Chars from 'A' to 'Z'
ssn_check_digits = [chr(x) for x in range(65, 91)]
ssn = value.upper()
total = 0
for i in range(0, 15):
try:
if i % 2 == 0:
total += ssn_odd_chars[ssn[i]]
else:
total += ssn_even_chars[ssn[i]]
except KeyError:
msg = "Character '%(char)s' is not allowed." % {'char': ssn[i]}
raise ValueError(msg)
return ssn_check_digits[total % 26]
def vat_number_check_digit(vat_number):
"Calculate Italian VAT number check digit."
normalized_vat_number = smart_unicode(vat_number).zfill(10)
total = 0
for i in range(0, 10, 2):
total += int(normalized_vat_number[i])
for i in range(1, 11, 2):
quotient , remainder = divmod(int(normalized_vat_number[i]) * 2, 10)
total += quotient + remainder
return smart_unicode((10 - total % 10) % 10)
|
rebost/django
|
django/contrib/localflavor/it/util.py
|
Python
|
bsd-3-clause
| 1,800 | 0.001667 |
print "You enter a dark room with two doors. Do you go through door #1 or door #2?"
door = raw_input("> ")
if door == "1":
print "There`s a giant bear here eating a chees cake. What do you do?"
print "1. Take the cake."
print "2. Scream at the bear."
bear = raw_input("> ")
if bear == "1":
print "The bear eats your face off. Good job!"
elif bear == "2":
print "The bear eats your legs off. Good job!"
else:
print "Well, doing %s is probably better. Bear runs away." %bear
elif door =="2":
print "You stare into the endless abyss at Cthulhu's retina."
print "1. Blueberries."
print "2. Yellow jacket clothespins."
print "3. Understanding revolvers yelling melodies."
insanity = raw_input("> ")
if insanity == "1" or insanity =="2":
print "Your body survives powered by a mind of jello. Good job!"
else:
print "The insanity rots your eyes into a pool of muck. Good job!"
else:
print "You stumble around and fall on a knife and die. Good job!"
|
aleksl05/IS-206
|
ex31.py
|
Python
|
gpl-3.0
| 982 | 0.037678 |
# Copyright 2013 Virantha Ekanayake All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides capability to search PDFs and file to a specific folder based
on keywords
"""
from sets import Set
import sys, os
import re
import logging
import shutil
from PyPDF2 import PdfFileReader
from pypdfocr_filer import PyFiler
from pypdfocr_filer_dirs import PyFilerDirs
class PyPdfFiler(object):
def __init__(self, filer):
assert isinstance(filer, PyFiler)
self.filer = filer # Must be a subclass of PyFiler
# Whether to fall back on filename for matching keywords against
# if there is no match in the text
self.file_using_filename = False
def iter_pdf_page_text(self, filename):
self.filename = filename
reader = PdfFileReader(filename)
logging.info("pdf scanner found %d pages in %s" % (reader.getNumPages(), filename))
for pgnum in range(reader.getNumPages()):
text = reader.getPage(pgnum).extractText()
text = text.encode('ascii', 'ignore')
text = text.replace('\n', ' ')
yield text
def _get_matching_folder(self, pdfText):
searchText = pdfText.lower()
for folder,strings in self.filer.folder_targets.items():
for s in strings:
logging.debug("Checking string %s" % s)
if s in searchText:
logging.info("Matched keyword '%s'" % s)
return folder
# No match found, so return
return None
def file_original (self, original_filename):
return self.filer.file_original(original_filename)
def move_to_matching_folder(self, filename):
for page_text in self.iter_pdf_page_text(filename):
tgt_folder = self._get_matching_folder(page_text)
if tgt_folder: break # Stop searching through pdf pages as soon as we find a match
if not tgt_folder and self.file_using_filename:
tgt_folder = self._get_matching_folder(filename)
tgt_file = self.filer.move_to_matching_folder(filename, tgt_folder)
return tgt_file
if __name__ == '__main__':
p = PyPdfFiler(PyFilerDirs())
for page_text in p.iter_pdf_page_text("scan_ocr.pdf"):
print (page_text)
|
virantha/pypdfocr
|
pypdfocr/pypdfocr_pdffiler.py
|
Python
|
apache-2.0
| 2,902 | 0.004824 |
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os.path
from twisted.enterprise import adbapi
from calvin.runtime.south.async import async
from calvin.utilities.calvinlogger import get_logger
from calvin.runtime.south.calvinsys import base_calvinsys_object
_log = get_logger(__name__)
class PersistentBuffer(base_calvinsys_object.BaseCalvinsysObject):
"""
Asynchronous (using twisted adbapi) SQLite-based implementation of persistant queue
Based on the following (from sqlite.org):
1) If no ROWID is specified on the insert [...] then an appropriate ROWID is created automatically.
2) The usual algorithm is to give the newly created row a ROWID that is one larger than the largest
ROWID in the table prior to the insert.
3) If the table is initially empty, then a ROWID of 1 is used.
4) If the largest ROWID is equal to the largest possible integer (9223372036854775807) then the
database engine starts picking positive candidate ROWIDs at random until it finds one
that is not previously used.
5) The normal ROWID selection [...] will generate monotonically increasing unique ROWIDs as long
as you never use the maximum ROWID value and you never delete the entry in the table with the largest ROWID.
Since we are implementing a FIFO queue, 1) should ensure there is a row id, 2) & 5) that the ordering is correct
and 3) that the rowid is reset whenever the queue is emptied, so 4) should never happen.
"""
init_schema = {
"type": "object",
"properties": {
"buffer_id": {
"description": "Buffer identifier, should be unique - will be used as part of filename",
"type": "string",
"pattern": "^[a-zA-Z0-9]+"
},
"reporting": {
"description": "Log some statistics on buffer at given interval (in seconds)",
"type": "number"
}
},
"required": ["buffer_id"],
"description": "Initialize buffer"
}
can_write_schema = {
"description": "Returns True if buffer ready for write, otherwise False",
"type": "boolean"
}
write_schema = {
"description": "Push data to buffer; always a list of json serializable items",
"type": "array"
}
can_read_schema = {
"description": "Returns True if data can be read, otherwise False",
"type": "boolean"
}
read_schema = {
"description": "Pop data from buffer, always a list",
"type": "array"
}
def init(self, buffer_id, reporting=None, *args, **kwargs):
self.db_name = buffer_id
self.db_path = os.path.join(os.path.abspath(os.path.curdir), self.db_name + ".sq3")
self.db = adbapi.ConnectionPool('sqlite3', self.db_path, check_same_thread=False)
self._pushed_values = 0
self._popped_values = 0
self._latest_timestamp = 0
self._value = None
self._changed = None
self._statlogging = None
def ready(length):
def log_stats():
_log.info("{} : pushed {}, popped {} (latest timestamp: {}) ".format(self.db_name, self._pushed_values, self._popped_values, self._latest_timestamp))
self._statlogging.reset()
self._changed = True # Something has changed, need to check if readable
# install timer to report on pushing/popping
if reporting:
self._statlogging= async.DelayedCall(reporting, log_stats)
self.scheduler_wakeup()
def create(db):
# Create simple queue table. Using TEXT unless there is a reason not to.
db.execute("CREATE TABLE IF NOT EXISTS queue (value BLOB)")
def error(e):
_log.error("Error initializing queue {}: {}".format(self.db_name, e))
q = self.db.runInteraction(create)
q.addCallback(ready)
q.addErrback(error)
def can_write(self):
# Can always write after init, meaning changed is no longer None
return self._changed is not None
def write(self, value):
def error(e):
_log.warning("Error during write: {}".format(e))
done() # Call done to wake scheduler, not sure this is a good idea
def done(unused=None):
self._changed = True # Let can_read know there may be something new to read
self.scheduler_wakeup()
self._pushed_values += len(value)
try:
value = json.dumps(value) # Convert to string for sqlite
except TypeError:
_log.error("Value is not json serializable")
else:
q = self.db.runOperation("INSERT INTO queue (value) VALUES (?)", (value, ))
q.addCallback(done)
q.addErrback(error)
def can_read(self):
def error(e):
_log.warning("Error during read: {}".format(e))
done()
def done(value=None):
if value:
self._changed = True # alert can_read that the database has changed
self._value = value
self.scheduler_wakeup()
def pop(db):
limit = 2 # <- Not empirically/theoretically tested
db.execute("SELECT value FROM queue ORDER BY rowid LIMIT (?)", (limit,))
value = db.fetchall() # a list of (value, ) tuples, or None
if value:
# pop values (i.e. delete rows with len(value) lowest row ids)
db.execute("DELETE FROM queue WHERE rowid in (SELECT rowid FROM queue ORDER BY rowid LIMIT (?))",
(len(value),))
return value
if self._value :
# There is a value to read
return True
elif self._changed :
# Something has changed, try to pop a value
self._changed = False
q = self.db.runInteraction(pop)
q.addCallback(done)
q.addErrback(error)
# Nothing to do
return False
def read(self):
value = []
while self._value:
# get an item from list of replies
dbtuple = self._value.pop(0)
# the item is a tuple, get the first value
dbvalue = dbtuple[0]
# convert value from string and return it
try:
value.extend(json.loads(dbvalue))
except ValueError:
_log.error("No value decoded - possibly corrupt file")
self._popped_values += len(value)
return value
def close(self):
if self._statlogging:
self._statlogging.cancel()
def done(response):
# A count response; [(cnt,)]
if response[0][0] == 0:
try:
os.remove(self.db_path)
except:
# Failed for some reason
_log.warning("Could not remove db file {}".format(self._dbpath))
q = self.db.runQuery("SELECT COUNT(*) from queue")
q.addCallback(done)
self.db.close()
|
EricssonResearch/calvin-base
|
calvinextras/calvinsys/data/buffer/PersistentBuffer.py
|
Python
|
apache-2.0
| 7,704 | 0.005582 |
"""Treadmill app configurator daemon, subscribes to eventmgr events.
"""
import click
from .. import appcfgmgr
def init():
"""Top level command handler."""
@click.command()
@click.option('--approot', type=click.Path(exists=True),
envvar='TREADMILL_APPROOT', required=True)
def top(approot):
"""Starts appcfgmgr process."""
mgr = appcfgmgr.AppCfgMgr(root=approot)
mgr.run()
return top
|
keithhendry/treadmill
|
treadmill/sproc/appcfgmgr.py
|
Python
|
apache-2.0
| 452 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitPeeringsOperations:
"""ExpressRouteCircuitPeeringsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified peering from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuitPeering":
"""Gets the specified peering for the express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.ExpressRouteCircuitPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCircuitPeering",
**kwargs: Any
) -> "_models.ExpressRouteCircuitPeering":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCircuitPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCircuitPeering",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitPeering"]:
"""Creates or updates a peering in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update express route circuit
peering operation.
:type peering_parameters: ~azure.mgmt.network.v2021_02_01.models.ExpressRouteCircuitPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitPeering or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_02_01.models.ExpressRouteCircuitPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteCircuitPeeringListResult"]:
"""Gets all peerings in a specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitPeeringListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.ExpressRouteCircuitPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01/aio/operations/_express_route_circuit_peerings_operations.py
|
Python
|
mit
| 21,960 | 0.005237 |
#!/usr/bin/env python
import time
import json
import random
import re
from bottle import route, hook, response, run, static_file
@route('/')
def index():
return static_file('index.html', root = '.')
@route('/maptweets.js')
def index_css():
return static_file('maptweets.js', root = '.')
@route('/cross.jpg')
def index_css():
return static_file('cross.jpg', root = '.')
@route('/light.png')
def index_css():
return static_file('light.png', root = '.')
@route('/event.png')
def index_css():
return static_file('event.png', root = '.')
run(host = '0.0.0.0', port = 80, server = 'tornado', debug = True)
|
relh/cathhacks
|
app.py
|
Python
|
mit
| 626 | 0.038339 |
from __future__ import print_function, division
from sympy.core.numbers import nan
from .function import Function
class Mod(Function):
"""Represents a modulo operation on symbolic expressions.
Receives two arguments, dividend p and divisor q.
The convention used is the same as Python's: the remainder always has the
same sign as the divisor.
Examples
========
>>> from sympy.abc import x, y
>>> x**2 % y
Mod(x**2, y)
>>> _.subs({x: 5, y: 6})
1
"""
@classmethod
def eval(cls, p, q):
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.singleton import S
from sympy.core.exprtools import gcd_terms
from sympy.polys.polytools import gcd
def doit(p, q):
"""Try to return p % q if both are numbers or +/-p is known
to be less than or equal q.
"""
if p.is_infinite or q.is_infinite or p is nan or q is nan:
return nan
if (p == q or p == -q or
p.is_Pow and p.exp.is_Integer and p.base == q or
p.is_integer and q == 1):
return S.Zero
if q.is_Number:
if p.is_Number:
return (p % q)
if q == 2:
if p.is_even:
return S.Zero
elif p.is_odd:
return S.One
# by ratio
r = p/q
try:
d = int(r)
except TypeError:
pass
else:
if type(d) is int:
rv = p - d*q
if (rv*q < 0) == True:
rv += q
return rv
# by difference
d = p - q
if d.is_negative:
if q.is_negative:
return d
elif q.is_positive:
return p
rv = doit(p, q)
if rv is not None:
return rv
# denest
if p.func is cls:
# easy
qinner = p.args[1]
if qinner == q:
return p
# XXX other possibilities?
# extract gcd; any further simplification should be done by the user
G = gcd(p, q)
if G != 1:
p, q = [
gcd_terms(i/G, clear=False, fraction=False) for i in (p, q)]
pwas, qwas = p, q
# simplify terms
# (x + y + 2) % x -> Mod(y + 2, x)
if p.is_Add:
args = []
for i in p.args:
a = cls(i, q)
if a.count(cls) > i.count(cls):
args.append(i)
else:
args.append(a)
if args != list(p.args):
p = Add(*args)
else:
# handle coefficients if they are not Rational
# since those are not handled by factor_terms
# e.g. Mod(.6*x, .3*y) -> 0.3*Mod(2*x, y)
cp, p = p.as_coeff_Mul()
cq, q = q.as_coeff_Mul()
ok = False
if not cp.is_Rational or not cq.is_Rational:
r = cp % cq
if r == 0:
G *= cq
p *= int(cp/cq)
ok = True
if not ok:
p = cp*p
q = cq*q
# simple -1 extraction
if p.could_extract_minus_sign() and q.could_extract_minus_sign():
G, p, q = [-i for i in (G, p, q)]
# check again to see if p and q can now be handled as numbers
rv = doit(p, q)
if rv is not None:
return rv*G
# put 1.0 from G on inside
if G.is_Float and G == 1:
p *= G
return cls(p, q, evaluate=False)
elif G.is_Mul and G.args[0].is_Float and G.args[0] == 1:
p = G.args[0]*p
G = Mul._from_args(G.args[1:])
return G*cls(p, q, evaluate=(p, q) != (pwas, qwas))
def _eval_is_integer(self):
from sympy.core.logic import fuzzy_and, fuzzy_not
p, q = self.args
if fuzzy_and([p.is_integer, q.is_integer, fuzzy_not(q.is_zero)]):
return True
def _eval_is_nonnegative(self):
if self.args[1].is_positive:
return True
def _eval_is_nonpositive(self):
if self.args[1].is_negative:
return True
|
Shaswat27/sympy
|
sympy/core/mod.py
|
Python
|
bsd-3-clause
| 4,488 | 0.000223 |
#!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""Re-runs initial_sharding.py with a varbinary keyspace_id."""
from vtdb import keyrange_constants
import base_sharding
import initial_sharding
import utils
# this test is just re-running an entire initial_sharding.py with a
# varbinary keyspace_id
if __name__ == '__main__':
base_sharding.keyspace_id_type = keyrange_constants.KIT_BYTES
utils.main(initial_sharding)
|
theskyinflames/bpulse-go-client
|
vendor/github.com/youtube/vitess/test/initial_sharding_bytes.py
|
Python
|
apache-2.0
| 553 | 0.003617 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from typing import Tuple
from time import sleep
from fedlearner_webconsole.composer.interface import IItem, IRunner, ItemType
from fedlearner_webconsole.composer.models import Context, RunnerStatus
from fedlearner_webconsole.db import get_session
from fedlearner_webconsole.workflow.models import Workflow, WorkflowState
class WorkflowCronJobItem(IItem):
def __init__(self, task_id: int):
self.id = task_id
def type(self) -> ItemType:
return ItemType.WORKFLOW_CRON_JOB
def get_id(self) -> int:
return self.id
def __eq__(self, obj: IItem):
return self.id == obj.id and self.type() == obj.type()
class WorkflowCronJob(IRunner):
""" start workflow every intervals
"""
def __init__(self, task_id: int):
self._workflow_id = task_id
self._msg = None
def start(self, context: Context):
with get_session(context.db_engine) as session:
try:
workflow: Workflow = session.query(Workflow).filter_by(
id=self._workflow_id).one()
# TODO: This is a hack!!! Templatelly use this method
# cc @hangweiqiang: Transaction State Refactor
state = workflow.get_state_for_frontend()
if state in ('COMPLETED', 'FAILED', 'READY', 'STOPPED', 'NEW'):
if state in ('COMPLETED', 'FAILED'):
workflow.update_target_state(
target_state=WorkflowState.STOPPED)
session.commit()
# check workflow stopped
# TODO: use composer timeout cc @yurunyu
for _ in range(24):
# use session refresh to get the latest info
# otherwise it'll use the indentity map locally
session.refresh(workflow)
if workflow.state == WorkflowState.STOPPED:
break
sleep(5)
else:
self._msg = f'failed to stop \
workflow[{self._workflow_id}]'
return
workflow.update_target_state(
target_state=WorkflowState.RUNNING)
session.commit()
self._msg = f'restarted workflow[{self._workflow_id}]'
elif state == 'RUNNING':
self._msg = f'skip restarting workflow[{self._workflow_id}]'
elif state == 'INVALID':
self._msg = f'current workflow[{self._workflow_id}] \
is invalid'
else:
self._msg = f'workflow[{self._workflow_id}] \
state is {state}, which is out of expection'
except Exception as err: # pylint: disable=broad-except
self._msg = f'exception of workflow[{self._workflow_id}], \
details is {err}'
def result(self, context: Context) -> Tuple[RunnerStatus, dict]:
del context # unused by result
if self._msg is None:
return RunnerStatus.RUNNING, {}
output = {'msg': self._msg}
return RunnerStatus.DONE, output
|
bytedance/fedlearner
|
web_console_v2/api/fedlearner_webconsole/workflow/cronjob.py
|
Python
|
apache-2.0
| 3,936 | 0.000254 |
"""Helper stuff for things"""
import gc
gc.disable()
print 'Disabled GC'
def timeit(func, iter = 1000, *args, **kwargs):
"""timeit(func, iter = 1000 *args, **kwargs) -> elapsed time
calls func iter times with args and kwargs, returns time elapsed
"""
import time
r = range(iter)
t = time.time()
for i in r:
func(*args, **kwargs)
return time.time() - t
|
hortonworks/hortonworks-sandbox
|
desktop/core/ext-py/Twisted/doc/core/benchmarks/timer.py
|
Python
|
apache-2.0
| 401 | 0.009975 |
class Excel(object):
def __init__(self, H, W):
"""
:type H: int
:type W: str
"""
self.table = [[{'v': 0, 'sum': None} for _ in range(ord(W) - 64)] for __ in range(H)]
def set(self, r, c, v):
"""
:type r: int
:type c: str
:type v: int
:rtype: void
"""
self.table[r - 1][ord(c) - 65] = {'v': v, 'sum': None}
def get(self, r, c):
"""
:type r: int
:type c: str
:rtype: int
"""
cell = self.table[r - 1][ord(c) - 65]
if not cell['sum']: return cell['v']
return sum(self.get(*pos) * cell['sum'][pos] for pos in cell['sum'])
def sum(self, r, c, strs):
"""
:type r: int
:type c: str
:type strs: List[str]
:rtype: int
"""
self.table [r - 1][ord(c) - 65]['sum'] = self.parse(strs)
return self.get(r, c)
def parse(self, strs):
c = collections.Counter()
for s in strs:
s, e = s.split(':')[0], s.split(':')[1] if ':' in s else s
for i in range(int(s[1:]), int(e[1:]) + 1):
for j in range(ord(s[0]) - 64, ord(e[0]) - 64 + 1):
c[(i, chr(j + 64))] += 1
return c
# Your Excel object will be instantiated and called as such:
# obj = Excel(H, W)
# obj.set(r,c,v)
# param_2 = obj.get(r,c)
# param_3 = obj.sum(r,c,strs)
|
Mlieou/oj_solutions
|
leetcode/python/ex_631.py
|
Python
|
mit
| 1,437 | 0.004175 |
# -*- coding: utf-8 -*-
# Authors: Y. Jia <ytjia.zju@gmail.com>
"""
Given a collection of intervals, merge all overlapping intervals.
https://leetcode.com/problems/merge-intervals/description/
"""
# Definition for an interval.
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
def __eq__(self, other):
return self.start == other.start and self.end == other.end
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
lens = len(intervals)
if lens <= 1:
return intervals
merged_intervals = list()
intervals.sort(key=lambda interval: interval.start)
i = 0
j = i + 1
while j < lens:
if intervals[i].end >= intervals[j].start:
intervals[i].end = max(intervals[i].end, intervals[j].end)
j += 1
else:
merged_intervals.append(intervals[i])
i = j
j = i + 1
merged_intervals.append(intervals[i])
return merged_intervals
|
ytjia/coding-practice
|
algorithms/python/leetcode/MergeIntervals.py
|
Python
|
mit
| 1,167 | 0 |
#AUTOGENERATED!!! Date: 2020-06-19 19:44:10.693270
from opcua.ua.uaerrors import UaStatusCodeError
class StatusCodes:
Good = 0
Uncertain = 0x40000000
Bad = 0x80000000
BadUnexpectedError = 0x80010000
BadInternalError = 0x80020000
BadOutOfMemory = 0x80030000
BadResourceUnavailable = 0x80040000
BadCommunicationError = 0x80050000
BadEncodingError = 0x80060000
BadDecodingError = 0x80070000
BadEncodingLimitsExceeded = 0x80080000
BadRequestTooLarge = 0x80B80000
BadResponseTooLarge = 0x80B90000
BadUnknownResponse = 0x80090000
BadTimeout = 0x800A0000
BadServiceUnsupported = 0x800B0000
BadShutdown = 0x800C0000
BadServerNotConnected = 0x800D0000
BadServerHalted = 0x800E0000
BadNothingToDo = 0x800F0000
BadTooManyOperations = 0x80100000
BadTooManyMonitoredItems = 0x80DB0000
BadDataTypeIdUnknown = 0x80110000
BadCertificateInvalid = 0x80120000
BadSecurityChecksFailed = 0x80130000
BadCertificatePolicyCheckFailed = 0x81140000
BadCertificateTimeInvalid = 0x80140000
BadCertificateIssuerTimeInvalid = 0x80150000
BadCertificateHostNameInvalid = 0x80160000
BadCertificateUriInvalid = 0x80170000
BadCertificateUseNotAllowed = 0x80180000
BadCertificateIssuerUseNotAllowed = 0x80190000
BadCertificateUntrusted = 0x801A0000
BadCertificateRevocationUnknown = 0x801B0000
BadCertificateIssuerRevocationUnknown = 0x801C0000
BadCertificateRevoked = 0x801D0000
BadCertificateIssuerRevoked = 0x801E0000
BadCertificateChainIncomplete = 0x810D0000
BadUserAccessDenied = 0x801F0000
BadIdentityTokenInvalid = 0x80200000
BadIdentityTokenRejected = 0x80210000
BadSecureChannelIdInvalid = 0x80220000
BadInvalidTimestamp = 0x80230000
BadNonceInvalid = 0x80240000
BadSessionIdInvalid = 0x80250000
BadSessionClosed = 0x80260000
BadSessionNotActivated = 0x80270000
BadSubscriptionIdInvalid = 0x80280000
BadRequestHeaderInvalid = 0x802A0000
BadTimestampsToReturnInvalid = 0x802B0000
BadRequestCancelledByClient = 0x802C0000
BadTooManyArguments = 0x80E50000
BadLicenseExpired = 0x810E0000
BadLicenseLimitsExceeded = 0x810F0000
BadLicenseNotAvailable = 0x81100000
GoodSubscriptionTransferred = 0x002D0000
GoodCompletesAsynchronously = 0x002E0000
GoodOverload = 0x002F0000
GoodClamped = 0x00300000
BadNoCommunication = 0x80310000
BadWaitingForInitialData = 0x80320000
BadNodeIdInvalid = 0x80330000
BadNodeIdUnknown = 0x80340000
BadAttributeIdInvalid = 0x80350000
BadIndexRangeInvalid = 0x80360000
BadIndexRangeNoData = 0x80370000
BadDataEncodingInvalid = 0x80380000
BadDataEncodingUnsupported = 0x80390000
BadNotReadable = 0x803A0000
BadNotWritable = 0x803B0000
BadOutOfRange = 0x803C0000
BadNotSupported = 0x803D0000
BadNotFound = 0x803E0000
BadObjectDeleted = 0x803F0000
BadNotImplemented = 0x80400000
BadMonitoringModeInvalid = 0x80410000
BadMonitoredItemIdInvalid = 0x80420000
BadMonitoredItemFilterInvalid = 0x80430000
BadMonitoredItemFilterUnsupported = 0x80440000
BadFilterNotAllowed = 0x80450000
BadStructureMissing = 0x80460000
BadEventFilterInvalid = 0x80470000
BadContentFilterInvalid = 0x80480000
BadFilterOperatorInvalid = 0x80C10000
BadFilterOperatorUnsupported = 0x80C20000
BadFilterOperandCountMismatch = 0x80C30000
BadFilterOperandInvalid = 0x80490000
BadFilterElementInvalid = 0x80C40000
BadFilterLiteralInvalid = 0x80C50000
BadContinuationPointInvalid = 0x804A0000
BadNoContinuationPoints = 0x804B0000
BadReferenceTypeIdInvalid = 0x804C0000
BadBrowseDirectionInvalid = 0x804D0000
BadNodeNotInView = 0x804E0000
BadNumericOverflow = 0x81120000
BadServerUriInvalid = 0x804F0000
BadServerNameMissing = 0x80500000
BadDiscoveryUrlMissing = 0x80510000
BadSempahoreFileMissing = 0x80520000
BadRequestTypeInvalid = 0x80530000
BadSecurityModeRejected = 0x80540000
BadSecurityPolicyRejected = 0x80550000
BadTooManySessions = 0x80560000
BadUserSignatureInvalid = 0x80570000
BadApplicationSignatureInvalid = 0x80580000
BadNoValidCertificates = 0x80590000
BadIdentityChangeNotSupported = 0x80C60000
BadRequestCancelledByRequest = 0x805A0000
BadParentNodeIdInvalid = 0x805B0000
BadReferenceNotAllowed = 0x805C0000
BadNodeIdRejected = 0x805D0000
BadNodeIdExists = 0x805E0000
BadNodeClassInvalid = 0x805F0000
BadBrowseNameInvalid = 0x80600000
BadBrowseNameDuplicated = 0x80610000
BadNodeAttributesInvalid = 0x80620000
BadTypeDefinitionInvalid = 0x80630000
BadSourceNodeIdInvalid = 0x80640000
BadTargetNodeIdInvalid = 0x80650000
BadDuplicateReferenceNotAllowed = 0x80660000
BadInvalidSelfReference = 0x80670000
BadReferenceLocalOnly = 0x80680000
BadNoDeleteRights = 0x80690000
UncertainReferenceNotDeleted = 0x40BC0000
BadServerIndexInvalid = 0x806A0000
BadViewIdUnknown = 0x806B0000
BadViewTimestampInvalid = 0x80C90000
BadViewParameterMismatch = 0x80CA0000
BadViewVersionInvalid = 0x80CB0000
UncertainNotAllNodesAvailable = 0x40C00000
GoodResultsMayBeIncomplete = 0x00BA0000
BadNotTypeDefinition = 0x80C80000
UncertainReferenceOutOfServer = 0x406C0000
BadTooManyMatches = 0x806D0000
BadQueryTooComplex = 0x806E0000
BadNoMatch = 0x806F0000
BadMaxAgeInvalid = 0x80700000
BadSecurityModeInsufficient = 0x80E60000
BadHistoryOperationInvalid = 0x80710000
BadHistoryOperationUnsupported = 0x80720000
BadInvalidTimestampArgument = 0x80BD0000
BadWriteNotSupported = 0x80730000
BadTypeMismatch = 0x80740000
BadMethodInvalid = 0x80750000
BadArgumentsMissing = 0x80760000
BadNotExecutable = 0x81110000
BadTooManySubscriptions = 0x80770000
BadTooManyPublishRequests = 0x80780000
BadNoSubscription = 0x80790000
BadSequenceNumberUnknown = 0x807A0000
BadMessageNotAvailable = 0x807B0000
BadInsufficientClientProfile = 0x807C0000
BadStateNotActive = 0x80BF0000
BadAlreadyExists = 0x81150000
BadTcpServerTooBusy = 0x807D0000
BadTcpMessageTypeInvalid = 0x807E0000
BadTcpSecureChannelUnknown = 0x807F0000
BadTcpMessageTooLarge = 0x80800000
BadTcpNotEnoughResources = 0x80810000
BadTcpInternalError = 0x80820000
BadTcpEndpointUrlInvalid = 0x80830000
BadRequestInterrupted = 0x80840000
BadRequestTimeout = 0x80850000
BadSecureChannelClosed = 0x80860000
BadSecureChannelTokenUnknown = 0x80870000
BadSequenceNumberInvalid = 0x80880000
BadProtocolVersionUnsupported = 0x80BE0000
BadConfigurationError = 0x80890000
BadNotConnected = 0x808A0000
BadDeviceFailure = 0x808B0000
BadSensorFailure = 0x808C0000
BadOutOfService = 0x808D0000
BadDeadbandFilterInvalid = 0x808E0000
UncertainNoCommunicationLastUsableValue = 0x408F0000
UncertainLastUsableValue = 0x40900000
UncertainSubstituteValue = 0x40910000
UncertainInitialValue = 0x40920000
UncertainSensorNotAccurate = 0x40930000
UncertainEngineeringUnitsExceeded = 0x40940000
UncertainSubNormal = 0x40950000
GoodLocalOverride = 0x00960000
BadRefreshInProgress = 0x80970000
BadConditionAlreadyDisabled = 0x80980000
BadConditionAlreadyEnabled = 0x80CC0000
BadConditionDisabled = 0x80990000
BadEventIdUnknown = 0x809A0000
BadEventNotAcknowledgeable = 0x80BB0000
BadDialogNotActive = 0x80CD0000
BadDialogResponseInvalid = 0x80CE0000
BadConditionBranchAlreadyAcked = 0x80CF0000
BadConditionBranchAlreadyConfirmed = 0x80D00000
BadConditionAlreadyShelved = 0x80D10000
BadConditionNotShelved = 0x80D20000
BadShelvingTimeOutOfRange = 0x80D30000
BadNoData = 0x809B0000
BadBoundNotFound = 0x80D70000
BadBoundNotSupported = 0x80D80000
BadDataLost = 0x809D0000
BadDataUnavailable = 0x809E0000
BadEntryExists = 0x809F0000
BadNoEntryExists = 0x80A00000
BadTimestampNotSupported = 0x80A10000
GoodEntryInserted = 0x00A20000
GoodEntryReplaced = 0x00A30000
UncertainDataSubNormal = 0x40A40000
GoodNoData = 0x00A50000
GoodMoreData = 0x00A60000
BadAggregateListMismatch = 0x80D40000
BadAggregateNotSupported = 0x80D50000
BadAggregateInvalidInputs = 0x80D60000
BadAggregateConfigurationRejected = 0x80DA0000
GoodDataIgnored = 0x00D90000
BadRequestNotAllowed = 0x80E40000
BadRequestNotComplete = 0x81130000
GoodEdited = 0x00DC0000
GoodPostActionFailed = 0x00DD0000
UncertainDominantValueChanged = 0x40DE0000
GoodDependentValueChanged = 0x00E00000
BadDominantValueChanged = 0x80E10000
UncertainDependentValueChanged = 0x40E20000
BadDependentValueChanged = 0x80E30000
GoodEdited_DependentValueChanged = 0x01160000
GoodEdited_DominantValueChanged = 0x01170000
GoodEdited_DominantValueChanged_DependentValueChanged = 0x01180000
BadEdited_OutOfRange = 0x81190000
BadInitialValue_OutOfRange = 0x811A0000
BadOutOfRange_DominantValueChanged = 0x811B0000
BadEdited_OutOfRange_DominantValueChanged = 0x811C0000
BadOutOfRange_DominantValueChanged_DependentValueChanged = 0x811D0000
BadEdited_OutOfRange_DominantValueChanged_DependentValueChanged = 0x811E0000
GoodCommunicationEvent = 0x00A70000
GoodShutdownEvent = 0x00A80000
GoodCallAgain = 0x00A90000
GoodNonCriticalTimeout = 0x00AA0000
BadInvalidArgument = 0x80AB0000
BadConnectionRejected = 0x80AC0000
BadDisconnect = 0x80AD0000
BadConnectionClosed = 0x80AE0000
BadInvalidState = 0x80AF0000
BadEndOfStream = 0x80B00000
BadNoDataAvailable = 0x80B10000
BadWaitingForResponse = 0x80B20000
BadOperationAbandoned = 0x80B30000
BadExpectedStreamToBlock = 0x80B40000
BadWouldBlock = 0x80B50000
BadSyntaxError = 0x80B60000
BadMaxConnectionsReached = 0x80B70000
code_to_name_doc = {
0: ('Good', 'The operation completed successfully.'),
0x40000000: ('Uncertain', 'The operation completed however its outputs may not be usable.'),
0x80000000: ('Bad', 'The operation failed.'),
0x80010000: ('BadUnexpectedError', '"An unexpected error occurred."'),
0x80020000: ('BadInternalError', '"An internal error occurred as a result of a programming or configuration error."'),
0x80030000: ('BadOutOfMemory', '"Not enough memory to complete the operation."'),
0x80040000: ('BadResourceUnavailable', '"An operating system resource is not available."'),
0x80050000: ('BadCommunicationError', '"A low level communication error occurred."'),
0x80060000: ('BadEncodingError', '"Encoding halted because of invalid data in the objects being serialized."'),
0x80070000: ('BadDecodingError', '"Decoding halted because of invalid data in the stream."'),
0x80080000: ('BadEncodingLimitsExceeded', '"The message encoding/decoding limits imposed by the stack have been exceeded."'),
0x80B80000: ('BadRequestTooLarge', '"The request message size exceeds limits set by the server."'),
0x80B90000: ('BadResponseTooLarge', '"The response message size exceeds limits set by the client."'),
0x80090000: ('BadUnknownResponse', '"An unrecognized response was received from the server."'),
0x800A0000: ('BadTimeout', '"The operation timed out."'),
0x800B0000: ('BadServiceUnsupported', '"The server does not support the requested service."'),
0x800C0000: ('BadShutdown', '"The operation was cancelled because the application is shutting down."'),
0x800D0000: ('BadServerNotConnected', '"The operation could not complete because the client is not connected to the server."'),
0x800E0000: ('BadServerHalted', '"The server has stopped and cannot process any requests."'),
0x800F0000: ('BadNothingToDo', '"There was nothing to do because the client passed a list of operations with no elements."'),
0x80100000: ('BadTooManyOperations', '"The request could not be processed because it specified too many operations."'),
0x80DB0000: ('BadTooManyMonitoredItems', '"The request could not be processed because there are too many monitored items in the subscription."'),
0x80110000: ('BadDataTypeIdUnknown', '"The extension object cannot be (de)serialized because the data type id is not recognized."'),
0x80120000: ('BadCertificateInvalid', '"The certificate provided as a parameter is not valid."'),
0x80130000: ('BadSecurityChecksFailed', '"An error occurred verifying security."'),
0x81140000: ('BadCertificatePolicyCheckFailed', '"The certificate does not meet the requirements of the security policy."'),
0x80140000: ('BadCertificateTimeInvalid', '"The certificate has expired or is not yet valid."'),
0x80150000: ('BadCertificateIssuerTimeInvalid', '"An issuer certificate has expired or is not yet valid."'),
0x80160000: ('BadCertificateHostNameInvalid', '"The HostName used to connect to a server does not match a HostName in the certificate."'),
0x80170000: ('BadCertificateUriInvalid', '"The URI specified in the ApplicationDescription does not match the URI in the certificate."'),
0x80180000: ('BadCertificateUseNotAllowed', '"The certificate may not be used for the requested operation."'),
0x80190000: ('BadCertificateIssuerUseNotAllowed', '"The issuer certificate may not be used for the requested operation."'),
0x801A0000: ('BadCertificateUntrusted', '"The certificate is not trusted."'),
0x801B0000: ('BadCertificateRevocationUnknown', '"It was not possible to determine if the certificate has been revoked."'),
0x801C0000: ('BadCertificateIssuerRevocationUnknown', '"It was not possible to determine if the issuer certificate has been revoked."'),
0x801D0000: ('BadCertificateRevoked', '"The certificate has been revoked."'),
0x801E0000: ('BadCertificateIssuerRevoked', '"The issuer certificate has been revoked."'),
0x810D0000: ('BadCertificateChainIncomplete', '"The certificate chain is incomplete."'),
0x801F0000: ('BadUserAccessDenied', '"User does not have permission to perform the requested operation."'),
0x80200000: ('BadIdentityTokenInvalid', '"The user identity token is not valid."'),
0x80210000: ('BadIdentityTokenRejected', '"The user identity token is valid but the server has rejected it."'),
0x80220000: ('BadSecureChannelIdInvalid', '"The specified secure channel is no longer valid."'),
0x80230000: ('BadInvalidTimestamp', '"The timestamp is outside the range allowed by the server."'),
0x80240000: ('BadNonceInvalid', '"The nonce does appear to be not a random value or it is not the correct length."'),
0x80250000: ('BadSessionIdInvalid', '"The session id is not valid."'),
0x80260000: ('BadSessionClosed', '"The session was closed by the client."'),
0x80270000: ('BadSessionNotActivated', '"The session cannot be used because ActivateSession has not been called."'),
0x80280000: ('BadSubscriptionIdInvalid', '"The subscription id is not valid."'),
0x802A0000: ('BadRequestHeaderInvalid', '"The header for the request is missing or invalid."'),
0x802B0000: ('BadTimestampsToReturnInvalid', '"The timestamps to return parameter is invalid."'),
0x802C0000: ('BadRequestCancelledByClient', '"The request was cancelled by the client."'),
0x80E50000: ('BadTooManyArguments', '"Too many arguments were provided."'),
0x810E0000: ('BadLicenseExpired', '"The server requires a license to operate in general or to perform a service or operation, but existing license is expired."'),
0x810F0000: ('BadLicenseLimitsExceeded', '"The server has limits on number of allowed operations / objects, based on installed licenses, and these limits where exceeded."'),
0x81100000: ('BadLicenseNotAvailable', '"The server does not have a license which is required to operate in general or to perform a service or operation."'),
0x002D0000: ('GoodSubscriptionTransferred', '"The subscription was transferred to another session."'),
0x002E0000: ('GoodCompletesAsynchronously', '"The processing will complete asynchronously."'),
0x002F0000: ('GoodOverload', '"Sampling has slowed down due to resource limitations."'),
0x00300000: ('GoodClamped', '"The value written was accepted but was clamped."'),
0x80310000: ('BadNoCommunication', '"Communication with the data source is defined, but not established, and there is no last known value available."'),
0x80320000: ('BadWaitingForInitialData', '"Waiting for the server to obtain values from the underlying data source."'),
0x80330000: ('BadNodeIdInvalid', '"The syntax of the node id is not valid."'),
0x80340000: ('BadNodeIdUnknown', '"The node id refers to a node that does not exist in the server address space."'),
0x80350000: ('BadAttributeIdInvalid', '"The attribute is not supported for the specified Node."'),
0x80360000: ('BadIndexRangeInvalid', '"The syntax of the index range parameter is invalid."'),
0x80370000: ('BadIndexRangeNoData', '"No data exists within the range of indexes specified."'),
0x80380000: ('BadDataEncodingInvalid', '"The data encoding is invalid."'),
0x80390000: ('BadDataEncodingUnsupported', '"The server does not support the requested data encoding for the node."'),
0x803A0000: ('BadNotReadable', '"The access level does not allow reading or subscribing to the Node."'),
0x803B0000: ('BadNotWritable', '"The access level does not allow writing to the Node."'),
0x803C0000: ('BadOutOfRange', '"The value was out of range."'),
0x803D0000: ('BadNotSupported', '"The requested operation is not supported."'),
0x803E0000: ('BadNotFound', '"A requested item was not found or a search operation ended without success."'),
0x803F0000: ('BadObjectDeleted', '"The object cannot be used because it has been deleted."'),
0x80400000: ('BadNotImplemented', '"Requested operation is not implemented."'),
0x80410000: ('BadMonitoringModeInvalid', '"The monitoring mode is invalid."'),
0x80420000: ('BadMonitoredItemIdInvalid', '"The monitoring item id does not refer to a valid monitored item."'),
0x80430000: ('BadMonitoredItemFilterInvalid', '"The monitored item filter parameter is not valid."'),
0x80440000: ('BadMonitoredItemFilterUnsupported', '"The server does not support the requested monitored item filter."'),
0x80450000: ('BadFilterNotAllowed', '"A monitoring filter cannot be used in combination with the attribute specified."'),
0x80460000: ('BadStructureMissing', '"A mandatory structured parameter was missing or null."'),
0x80470000: ('BadEventFilterInvalid', '"The event filter is not valid."'),
0x80480000: ('BadContentFilterInvalid', '"The content filter is not valid."'),
0x80C10000: ('BadFilterOperatorInvalid', '"An unrecognized operator was provided in a filter."'),
0x80C20000: ('BadFilterOperatorUnsupported', '"A valid operator was provided, but the server does not provide support for this filter operator."'),
0x80C30000: ('BadFilterOperandCountMismatch', '"The number of operands provided for the filter operator was less then expected for the operand provided."'),
0x80490000: ('BadFilterOperandInvalid', '"The operand used in a content filter is not valid."'),
0x80C40000: ('BadFilterElementInvalid', '"The referenced element is not a valid element in the content filter."'),
0x80C50000: ('BadFilterLiteralInvalid', '"The referenced literal is not a valid value."'),
0x804A0000: ('BadContinuationPointInvalid', '"The continuation point provide is longer valid."'),
0x804B0000: ('BadNoContinuationPoints', '"The operation could not be processed because all continuation points have been allocated."'),
0x804C0000: ('BadReferenceTypeIdInvalid', '"The reference type id does not refer to a valid reference type node."'),
0x804D0000: ('BadBrowseDirectionInvalid', '"The browse direction is not valid."'),
0x804E0000: ('BadNodeNotInView', '"The node is not part of the view."'),
0x81120000: ('BadNumericOverflow', '"The number was not accepted because of a numeric overflow."'),
0x804F0000: ('BadServerUriInvalid', '"The ServerUri is not a valid URI."'),
0x80500000: ('BadServerNameMissing', '"No ServerName was specified."'),
0x80510000: ('BadDiscoveryUrlMissing', '"No DiscoveryUrl was specified."'),
0x80520000: ('BadSempahoreFileMissing', '"The semaphore file specified by the client is not valid."'),
0x80530000: ('BadRequestTypeInvalid', '"The security token request type is not valid."'),
0x80540000: ('BadSecurityModeRejected', '"The security mode does not meet the requirements set by the server."'),
0x80550000: ('BadSecurityPolicyRejected', '"The security policy does not meet the requirements set by the server."'),
0x80560000: ('BadTooManySessions', '"The server has reached its maximum number of sessions."'),
0x80570000: ('BadUserSignatureInvalid', '"The user token signature is missing or invalid."'),
0x80580000: ('BadApplicationSignatureInvalid', '"The signature generated with the client certificate is missing or invalid."'),
0x80590000: ('BadNoValidCertificates', '"The client did not provide at least one software certificate that is valid and meets the profile requirements for the server."'),
0x80C60000: ('BadIdentityChangeNotSupported', '"The server does not support changing the user identity assigned to the session."'),
0x805A0000: ('BadRequestCancelledByRequest', '"The request was cancelled by the client with the Cancel service."'),
0x805B0000: ('BadParentNodeIdInvalid', '"The parent node id does not to refer to a valid node."'),
0x805C0000: ('BadReferenceNotAllowed', '"The reference could not be created because it violates constraints imposed by the data model."'),
0x805D0000: ('BadNodeIdRejected', '"The requested node id was reject because it was either invalid or server does not allow node ids to be specified by the client."'),
0x805E0000: ('BadNodeIdExists', '"The requested node id is already used by another node."'),
0x805F0000: ('BadNodeClassInvalid', '"The node class is not valid."'),
0x80600000: ('BadBrowseNameInvalid', '"The browse name is invalid."'),
0x80610000: ('BadBrowseNameDuplicated', '"The browse name is not unique among nodes that share the same relationship with the parent."'),
0x80620000: ('BadNodeAttributesInvalid', '"The node attributes are not valid for the node class."'),
0x80630000: ('BadTypeDefinitionInvalid', '"The type definition node id does not reference an appropriate type node."'),
0x80640000: ('BadSourceNodeIdInvalid', '"The source node id does not reference a valid node."'),
0x80650000: ('BadTargetNodeIdInvalid', '"The target node id does not reference a valid node."'),
0x80660000: ('BadDuplicateReferenceNotAllowed', '"The reference type between the nodes is already defined."'),
0x80670000: ('BadInvalidSelfReference', '"The server does not allow this type of self reference on this node."'),
0x80680000: ('BadReferenceLocalOnly', '"The reference type is not valid for a reference to a remote server."'),
0x80690000: ('BadNoDeleteRights', '"The server will not allow the node to be deleted."'),
0x40BC0000: ('UncertainReferenceNotDeleted', '"The server was not able to delete all target references."'),
0x806A0000: ('BadServerIndexInvalid', '"The server index is not valid."'),
0x806B0000: ('BadViewIdUnknown', '"The view id does not refer to a valid view node."'),
0x80C90000: ('BadViewTimestampInvalid', '"The view timestamp is not available or not supported."'),
0x80CA0000: ('BadViewParameterMismatch', '"The view parameters are not consistent with each other."'),
0x80CB0000: ('BadViewVersionInvalid', '"The view version is not available or not supported."'),
0x40C00000: ('UncertainNotAllNodesAvailable', '"The list of references may not be complete because the underlying system is not available."'),
0x00BA0000: ('GoodResultsMayBeIncomplete', '"The server should have followed a reference to a node in a remote server but did not. The result set may be incomplete."'),
0x80C80000: ('BadNotTypeDefinition', '"The provided Nodeid was not a type definition nodeid."'),
0x406C0000: ('UncertainReferenceOutOfServer', '"One of the references to follow in the relative path references to a node in the address space in another server."'),
0x806D0000: ('BadTooManyMatches', '"The requested operation has too many matches to return."'),
0x806E0000: ('BadQueryTooComplex', '"The requested operation requires too many resources in the server."'),
0x806F0000: ('BadNoMatch', '"The requested operation has no match to return."'),
0x80700000: ('BadMaxAgeInvalid', '"The max age parameter is invalid."'),
0x80E60000: ('BadSecurityModeInsufficient', '"The operation is not permitted over the current secure channel."'),
0x80710000: ('BadHistoryOperationInvalid', '"The history details parameter is not valid."'),
0x80720000: ('BadHistoryOperationUnsupported', '"The server does not support the requested operation."'),
0x80BD0000: ('BadInvalidTimestampArgument', '"The defined timestamp to return was invalid."'),
0x80730000: ('BadWriteNotSupported', '"The server does not support writing the combination of value, status and timestamps provided."'),
0x80740000: ('BadTypeMismatch', '"The value supplied for the attribute is not of the same type as the attribute"s value."'),
0x80750000: ('BadMethodInvalid', '"The method id does not refer to a method for the specified object."'),
0x80760000: ('BadArgumentsMissing', '"The client did not specify all of the input arguments for the method."'),
0x81110000: ('BadNotExecutable', '"The executable attribute does not allow the execution of the method."'),
0x80770000: ('BadTooManySubscriptions', '"The server has reached its maximum number of subscriptions."'),
0x80780000: ('BadTooManyPublishRequests', '"The server has reached the maximum number of queued publish requests."'),
0x80790000: ('BadNoSubscription', '"There is no subscription available for this session."'),
0x807A0000: ('BadSequenceNumberUnknown', '"The sequence number is unknown to the server."'),
0x807B0000: ('BadMessageNotAvailable', '"The requested notification message is no longer available."'),
0x807C0000: ('BadInsufficientClientProfile', '"The client of the current session does not support one or more Profiles that are necessary for the subscription."'),
0x80BF0000: ('BadStateNotActive', '"The sub-state machine is not currently active."'),
0x81150000: ('BadAlreadyExists', '"An equivalent rule already exists."'),
0x807D0000: ('BadTcpServerTooBusy', '"The server cannot process the request because it is too busy."'),
0x807E0000: ('BadTcpMessageTypeInvalid', '"The type of the message specified in the header invalid."'),
0x807F0000: ('BadTcpSecureChannelUnknown', '"The SecureChannelId and/or TokenId are not currently in use."'),
0x80800000: ('BadTcpMessageTooLarge', '"The size of the message specified in the header is too large."'),
0x80810000: ('BadTcpNotEnoughResources', '"There are not enough resources to process the request."'),
0x80820000: ('BadTcpInternalError', '"An internal error occurred."'),
0x80830000: ('BadTcpEndpointUrlInvalid', '"The server does not recognize the QueryString specified."'),
0x80840000: ('BadRequestInterrupted', '"The request could not be sent because of a network interruption."'),
0x80850000: ('BadRequestTimeout', '"Timeout occurred while processing the request."'),
0x80860000: ('BadSecureChannelClosed', '"The secure channel has been closed."'),
0x80870000: ('BadSecureChannelTokenUnknown', '"The token has expired or is not recognized."'),
0x80880000: ('BadSequenceNumberInvalid', '"The sequence number is not valid."'),
0x80BE0000: ('BadProtocolVersionUnsupported', '"The applications do not have compatible protocol versions."'),
0x80890000: ('BadConfigurationError', '"There is a problem with the configuration that affects the usefulness of the value."'),
0x808A0000: ('BadNotConnected', '"The variable should receive its value from another variable, but has never been configured to do so."'),
0x808B0000: ('BadDeviceFailure', '"There has been a failure in the device/data source that generates the value that has affected the value."'),
0x808C0000: ('BadSensorFailure', '"There has been a failure in the sensor from which the value is derived by the device/data source."'),
0x808D0000: ('BadOutOfService', '"The source of the data is not operational."'),
0x808E0000: ('BadDeadbandFilterInvalid', '"The deadband filter is not valid."'),
0x408F0000: ('UncertainNoCommunicationLastUsableValue', '"Communication to the data source has failed. The variable value is the last value that had a good quality."'),
0x40900000: ('UncertainLastUsableValue', '"Whatever was updating this value has stopped doing so."'),
0x40910000: ('UncertainSubstituteValue', '"The value is an operational value that was manually overwritten."'),
0x40920000: ('UncertainInitialValue', '"The value is an initial value for a variable that normally receives its value from another variable."'),
0x40930000: ('UncertainSensorNotAccurate', '"The value is at one of the sensor limits."'),
0x40940000: ('UncertainEngineeringUnitsExceeded', '"The value is outside of the range of values defined for this parameter."'),
0x40950000: ('UncertainSubNormal', '"The value is derived from multiple sources and has less than the required number of Good sources."'),
0x00960000: ('GoodLocalOverride', '"The value has been overridden."'),
0x80970000: ('BadRefreshInProgress', '"This Condition refresh failed, a Condition refresh operation is already in progress."'),
0x80980000: ('BadConditionAlreadyDisabled', '"This condition has already been disabled."'),
0x80CC0000: ('BadConditionAlreadyEnabled', '"This condition has already been enabled."'),
0x80990000: ('BadConditionDisabled', '"Property not available, this condition is disabled."'),
0x809A0000: ('BadEventIdUnknown', '"The specified event id is not recognized."'),
0x80BB0000: ('BadEventNotAcknowledgeable', '"The event cannot be acknowledged."'),
0x80CD0000: ('BadDialogNotActive', '"The dialog condition is not active."'),
0x80CE0000: ('BadDialogResponseInvalid', '"The response is not valid for the dialog."'),
0x80CF0000: ('BadConditionBranchAlreadyAcked', '"The condition branch has already been acknowledged."'),
0x80D00000: ('BadConditionBranchAlreadyConfirmed', '"The condition branch has already been confirmed."'),
0x80D10000: ('BadConditionAlreadyShelved', '"The condition has already been shelved."'),
0x80D20000: ('BadConditionNotShelved', '"The condition is not currently shelved."'),
0x80D30000: ('BadShelvingTimeOutOfRange', '"The shelving time not within an acceptable range."'),
0x809B0000: ('BadNoData', '"No data exists for the requested time range or event filter."'),
0x80D70000: ('BadBoundNotFound', '"No data found to provide upper or lower bound value."'),
0x80D80000: ('BadBoundNotSupported', '"The server cannot retrieve a bound for the variable."'),
0x809D0000: ('BadDataLost', '"Data is missing due to collection started/stopped/lost."'),
0x809E0000: ('BadDataUnavailable', '"Expected data is unavailable for the requested time range due to an un-mounted volume, an off-line archive or tape, or similar reason for temporary unavailability."'),
0x809F0000: ('BadEntryExists', '"The data or event was not successfully inserted because a matching entry exists."'),
0x80A00000: ('BadNoEntryExists', '"The data or event was not successfully updated because no matching entry exists."'),
0x80A10000: ('BadTimestampNotSupported', '"The client requested history using a timestamp format the server does not support (i.e requested ServerTimestamp when server only supports SourceTimestamp)."'),
0x00A20000: ('GoodEntryInserted', '"The data or event was successfully inserted into the historical database."'),
0x00A30000: ('GoodEntryReplaced', '"The data or event field was successfully replaced in the historical database."'),
0x40A40000: ('UncertainDataSubNormal', '"The value is derived from multiple values and has less than the required number of Good values."'),
0x00A50000: ('GoodNoData', '"No data exists for the requested time range or event filter."'),
0x00A60000: ('GoodMoreData', '"The data or event field was successfully replaced in the historical database."'),
0x80D40000: ('BadAggregateListMismatch', '"The requested number of Aggregates does not match the requested number of NodeIds."'),
0x80D50000: ('BadAggregateNotSupported', '"The requested Aggregate is not support by the server."'),
0x80D60000: ('BadAggregateInvalidInputs', '"The aggregate value could not be derived due to invalid data inputs."'),
0x80DA0000: ('BadAggregateConfigurationRejected', '"The aggregate configuration is not valid for specified node."'),
0x00D90000: ('GoodDataIgnored', '"The request specifies fields which are not valid for the EventType or cannot be saved by the historian."'),
0x80E40000: ('BadRequestNotAllowed', '"The request was rejected by the server because it did not meet the criteria set by the server."'),
0x81130000: ('BadRequestNotComplete', '"The request has not been processed by the server yet."'),
0x00DC0000: ('GoodEdited', '"The value does not come from the real source and has been edited by the server."'),
0x00DD0000: ('GoodPostActionFailed', '"There was an error in execution of these post-actions."'),
0x40DE0000: ('UncertainDominantValueChanged', '"The related EngineeringUnit has been changed but the Variable Value is still provided based on the previous unit."'),
0x00E00000: ('GoodDependentValueChanged', '"A dependent value has been changed but the change has not been applied to the device."'),
0x80E10000: ('BadDominantValueChanged', '"The related EngineeringUnit has been changed but this change has not been applied to the device. The Variable Value is still dependent on the previous unit but its status is currently Bad."'),
0x40E20000: ('UncertainDependentValueChanged', '"A dependent value has been changed but the change has not been applied to the device. The quality of the dominant variable is uncertain."'),
0x80E30000: ('BadDependentValueChanged', '"A dependent value has been changed but the change has not been applied to the device. The quality of the dominant variable is Bad."'),
0x01160000: ('GoodEdited_DependentValueChanged', '"It is delivered with a dominant Variable value when a dependent Variable has changed but the change has not been applied."'),
0x01170000: ('GoodEdited_DominantValueChanged', '"It is delivered with a dependent Variable value when a dominant Variable has changed but the change has not been applied."'),
0x01180000: ('GoodEdited_DominantValueChanged_DependentValueChanged', '"It is delivered with a dependent Variable value when a dominant or dependent Variable has changed but change has not been applied."'),
0x81190000: ('BadEdited_OutOfRange', '"It is delivered with a Variable value when Variable has changed but the value is not legal."'),
0x811A0000: ('BadInitialValue_OutOfRange', '"It is delivered with a Variable value when a source Variable has changed but the value is not legal."'),
0x811B0000: ('BadOutOfRange_DominantValueChanged', '"It is delivered with a dependent Variable value when a dominant Variable has changed and the value is not legal."'),
0x811C0000: ('BadEdited_OutOfRange_DominantValueChanged', '"It is delivered with a dependent Variable value when a dominant Variable has changed, the value is not legal and the change has not been applied."'),
0x811D0000: ('BadOutOfRange_DominantValueChanged_DependentValueChanged', '"It is delivered with a dependent Variable value when a dominant or dependent Variable has changed and the value is not legal."'),
0x811E0000: ('BadEdited_OutOfRange_DominantValueChanged_DependentValueChanged', '"It is delivered with a dependent Variable value when a dominant or dependent Variable has changed, the value is not legal and the change has not been applied."'),
0x00A70000: ('GoodCommunicationEvent', '"The communication layer has raised an event."'),
0x00A80000: ('GoodShutdownEvent', '"The system is shutting down."'),
0x00A90000: ('GoodCallAgain', '"The operation is not finished and needs to be called again."'),
0x00AA0000: ('GoodNonCriticalTimeout', '"A non-critical timeout occurred."'),
0x80AB0000: ('BadInvalidArgument', '"One or more arguments are invalid."'),
0x80AC0000: ('BadConnectionRejected', '"Could not establish a network connection to remote server."'),
0x80AD0000: ('BadDisconnect', '"The server has disconnected from the client."'),
0x80AE0000: ('BadConnectionClosed', '"The network connection has been closed."'),
0x80AF0000: ('BadInvalidState', '"The operation cannot be completed because the object is closed, uninitialized or in some other invalid state."'),
0x80B00000: ('BadEndOfStream', '"Cannot move beyond end of the stream."'),
0x80B10000: ('BadNoDataAvailable', '"No data is currently available for reading from a non-blocking stream."'),
0x80B20000: ('BadWaitingForResponse', '"The asynchronous operation is waiting for a response."'),
0x80B30000: ('BadOperationAbandoned', '"The asynchronous operation was abandoned by the caller."'),
0x80B40000: ('BadExpectedStreamToBlock', '"The stream did not return all data requested (possibly because it is a non-blocking stream)."'),
0x80B50000: ('BadWouldBlock', '"Non blocking behaviour is required and the operation would block."'),
0x80B60000: ('BadSyntaxError', '"A value had an invalid syntax."'),
0x80B70000: ('BadMaxConnectionsReached', '"The operation could not be finished because all available connections are in use."'),
}
def get_name_and_doc(val):
if val in code_to_name_doc:
return code_to_name_doc[val]
else:
if val & 1 << 31:
return 'Bad', 'Unknown StatusCode value: {}'.format(val)
elif val & 1 << 30:
return 'UncertainIn', 'Unknown StatusCode value: {}'.format(val)
else:
return 'Good', 'Unknown StatusCode value: {}'.format(val)
|
FreeOpcUa/python-opcua
|
opcua/ua/status_codes.py
|
Python
|
lgpl-3.0
| 38,535 | 0.005969 |
# Copyright Kevin Deldycke <kevin@deldycke.com> and contributors.
# All Rights Reserved.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import pytest
from click_extra.tests.conftest import destructive
from ..pool import ALL_MANAGER_IDS
from .test_cli import CLISubCommandTests
@pytest.fixture
def subcmd():
return "install", "arrow"
class TestInstall(CLISubCommandTests):
strict_selection_match = False
""" Install sub-command try each user-selected manager until it find one providing
the package we seek to install, after which the process stop. This mean not all
managers will be called, so we allow the CLI output checks to partially match.
"""
def test_no_package_id(self, invoke):
result = invoke("install")
assert result.exit_code == 2
assert not result.stdout
assert "Error: Missing argument 'PACKAGE_ID'." in result.stderr
PACKAGE_IDS = {
"apm": "markdown-pdf",
"apt": "wget",
"apt-mint": "exiftool",
"brew": "jpeginfo",
"cask": "pngyu",
"choco": "ccleaner",
"composer": "illuminate/contracts",
"flatpak": "org.gnome.Dictionary",
"gem": "markdown",
"mas": "747648890", # Telegram
"npm": "raven",
"opkg": "enigma2-hotplug",
"pip": "arrow",
"snap": "standard-notes",
"vscode": "tamasfe.even-better-toml",
"yarn": "markdown",
}
assert set(PACKAGE_IDS) == set(ALL_MANAGER_IDS)
@destructive
@pytest.mark.parametrize(
"mid,package_id", (pytest.param(*v, id=v[0]) for v in PACKAGE_IDS.items())
)
def test_single_manager_install(self, invoke, mid, package_id):
result = invoke("--manager", mid, "install", package_id)
assert result.exit_code == 0
self.check_manager_selection(result, {mid}, reference_set=ALL_MANAGER_IDS)
destructive()(TestInstall.test_stats)
destructive()(TestInstall.test_default_all_managers)
destructive()(TestInstall.test_manager_selection)
|
kdeldycke/meta-package-manager
|
meta_package_manager/tests/test_cli_install.py
|
Python
|
gpl-2.0
| 2,682 | 0.001864 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import range, zip
from future.utils import viewitems
import six
import itertools
import math
import re
import collections
import copy
import numbers
import textwrap
from contextlib import contextmanager
import numpy as np
from scipy.spatial.distance import hamming
import pandas as pd
from skbio._base import SkbioObject
from skbio.sequence._base import ElasticLines
from skbio.util._misc import chunk_str
from skbio.util._decorator import stable, experimental
class Sequence(collections.Sequence, SkbioObject):
"""Store biological sequence data and optional associated metadata.
``Sequence`` objects do not enforce an alphabet and are thus the most
generic objects for storing biological sequence data. Subclasses ``DNA``,
``RNA``, and ``Protein`` enforce the IUPAC character set [1]_ for, and
provide operations specific to, each respective molecule type.
``Sequence`` objects consist of the underlying sequence data, as well
as optional metadata and positional metadata. The underlying sequence
is immutable, while the metdata and positional metadata are mutable.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the biological sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence. A shallow copy
of the ``dict`` will be made (see Examples section below for details).
positional_metadata : pd.DataFrame consumable, optional
Arbitrary per-character metadata (e.g., sequence read quality
scores). Must be able to be passed directly to ``pd.DataFrame``
constructor. Each column of metadata must be the same length as the
biological sequence. A shallow copy of the positional metadata will be
made if necessary (see Examples section below for details).
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters. If ``False``, no characters will be converted.
If a str, it will be treated as a key into the positional metadata of
the object. All lowercase characters will be converted to uppercase,
and a ``True`` value will be stored in a boolean array in the
positional metadata under the key.
Attributes
----------
values
metadata
positional_metadata
observed_chars
See Also
--------
DNA
RNA
Protein
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from pprint import pprint
>>> from skbio import Sequence
**Creating sequences:**
Create a sequence without any metadata:
>>> seq = Sequence('GGUCGUGAAGGA')
>>> seq
Sequence
---------------
Stats:
length: 12
---------------
0 GGUCGUGAAG GA
Create a sequence with metadata and positional metadata:
>>> metadata = {'id':'seq-id', 'desc':'seq desc', 'authors': ['Alice']}
>>> positional_metadata = {'quality': [3, 3, 4, 10],
... 'exons': [True, True, False, True]}
>>> seq = Sequence('ACGT', metadata=metadata,
... positional_metadata=positional_metadata)
>>> seq
Sequence
-----------------------------
Metadata:
'authors': <class 'list'>
'desc': 'seq desc'
'id': 'seq-id'
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
-----------------------------
0 ACGT
**Retrieving underlying sequence data:**
Retrieve underlying sequence:
>>> seq.values # doctest: +NORMALIZE_WHITESPACE
array([b'A', b'C', b'G', b'T'],
dtype='|S1')
Underlying sequence immutable:
>>> seq.values = np.array([b'T', b'C', b'G', b'A'], dtype='|S1')
Traceback (most recent call last):
...
AttributeError: can't set attribute
>>> seq.values[0] = b'T'
Traceback (most recent call last):
...
ValueError: assignment destination is read-only
**Retrieving sequence metadata:**
Retrieve metadata:
>>> pprint(seq.metadata) # using pprint to display dict in sorted order
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'seq-id'}
Retrieve positional metadata:
>>> seq.positional_metadata
exons quality
0 True 3
1 True 3
2 False 4
3 True 10
**Updating sequence metadata:**
.. warning:: Be aware that a shallow copy of ``metadata`` and
``positional_metadata`` is made for performance. Since a deep copy is
not made, changes made to mutable Python objects stored as metadata may
affect the metadata of other ``Sequence`` objects or anything else that
shares a reference to the object. The following examples illustrate this
behavior.
First, let's create a sequence and update its metadata:
>>> metadata = {'id':'seq-id', 'desc':'seq desc', 'authors': ['Alice']}
>>> seq = Sequence('ACGT', metadata=metadata)
>>> seq.metadata['id'] = 'new-id'
>>> seq.metadata['pubmed'] = 12345
>>> pprint(seq.metadata)
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'new-id', 'pubmed': 12345}
Note that the original metadata dictionary (stored in variable
``metadata``) hasn't changed because a shallow copy was made:
>>> pprint(metadata)
{'authors': ['Alice'], 'desc': 'seq desc', 'id': 'seq-id'}
>>> seq.metadata == metadata
False
Note however that since only a *shallow* copy was made, updates to mutable
objects will also change the original metadata dictionary:
>>> seq.metadata['authors'].append('Bob')
>>> seq.metadata['authors']
['Alice', 'Bob']
>>> metadata['authors']
['Alice', 'Bob']
This behavior can also occur when manipulating a sequence that has been
derived from another sequence:
>>> subseq = seq[1:3]
>>> subseq
Sequence
-----------------------------
Metadata:
'authors': <class 'list'>
'desc': 'seq desc'
'id': 'new-id'
'pubmed': 12345
Stats:
length: 2
-----------------------------
0 CG
>>> pprint(subseq.metadata)
{'authors': ['Alice', 'Bob'],
'desc': 'seq desc',
'id': 'new-id',
'pubmed': 12345}
The subsequence has inherited the metadata of its parent sequence. If we
update the subsequence's author list, we see the changes propagated in the
parent sequence and original metadata dictionary:
>>> subseq.metadata['authors'].append('Carol')
>>> subseq.metadata['authors']
['Alice', 'Bob', 'Carol']
>>> seq.metadata['authors']
['Alice', 'Bob', 'Carol']
>>> metadata['authors']
['Alice', 'Bob', 'Carol']
The behavior for updating positional metadata is similar. Let's create a
new sequence with positional metadata that is already stored in a
``pd.DataFrame``:
>>> positional_metadata = pd.DataFrame(
... {'quality': [3, 3, 4, 10], 'list': [[], [], [], []]})
>>> seq = Sequence('ACGT', positional_metadata=positional_metadata)
>>> seq
Sequence
-----------------------------
Positional metadata:
'list': <dtype: object>
'quality': <dtype: int64>
Stats:
length: 4
-----------------------------
0 ACGT
>>> seq.positional_metadata
list quality
0 [] 3
1 [] 3
2 [] 4
3 [] 10
Now let's update the sequence's positional metadata by adding a new column
and changing a value in another column:
>>> seq.positional_metadata['gaps'] = [False, False, False, False]
>>> seq.positional_metadata.loc[0, 'quality'] = 999
>>> seq.positional_metadata
list quality gaps
0 [] 999 False
1 [] 3 False
2 [] 4 False
3 [] 10 False
Note that the original positional metadata (stored in variable
``positional_metadata``) hasn't changed because a shallow copy was made:
>>> positional_metadata
list quality
0 [] 3
1 [] 3
2 [] 4
3 [] 10
>>> seq.positional_metadata.equals(positional_metadata)
False
Next let's create a sequence that has been derived from another sequence:
>>> subseq = seq[1:3]
>>> subseq
Sequence
-----------------------------
Positional metadata:
'list': <dtype: object>
'quality': <dtype: int64>
'gaps': <dtype: bool>
Stats:
length: 2
-----------------------------
0 CG
>>> subseq.positional_metadata
list quality gaps
0 [] 3 False
1 [] 4 False
As described above for metadata, since only a *shallow* copy was made of
the positional metadata, updates to mutable objects will also change the
parent sequence's positional metadata and the original positional metadata
``pd.DataFrame``:
>>> subseq.positional_metadata.loc[0, 'list'].append('item')
>>> subseq.positional_metadata
list quality gaps
0 [item] 3 False
1 [] 4 False
>>> seq.positional_metadata
list quality gaps
0 [] 999 False
1 [item] 3 False
2 [] 4 False
3 [] 10 False
>>> positional_metadata
list quality
0 [] 3
1 [item] 3
2 [] 4
3 [] 10
"""
_number_of_extended_ascii_codes = 256
# ASCII is built such that the difference between uppercase and lowercase
# is the 6th bit.
_ascii_invert_case_bit_offset = 32
_ascii_lowercase_boundary = 90
default_write_format = 'fasta'
__hash__ = None
@property
@stable(as_of="0.4.0")
def values(self):
"""Array containing underlying sequence characters.
Notes
-----
This property is not writeable.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('AACGA')
>>> s.values # doctest: +NORMALIZE_WHITESPACE
array([b'A', b'A', b'C', b'G', b'A'],
dtype='|S1')
"""
return self._bytes.view('|S1')
@property
@stable(as_of="0.4.0")
def metadata(self):
"""``dict`` containing metadata which applies to the entire sequence.
Notes
-----
This property can be set and deleted.
Examples
--------
>>> from pprint import pprint
>>> from skbio import Sequence
Create a sequence with metadata:
>>> s = Sequence('ACGTACGTACGTACGT',
... metadata={'id': 'seq-id',
... 'description': 'seq description'})
>>> s
Sequence
------------------------------------
Metadata:
'description': 'seq description'
'id': 'seq-id'
Stats:
length: 16
------------------------------------
0 ACGTACGTAC GTACGT
Retrieve metadata:
>>> pprint(s.metadata) # using pprint to display dict in sorted order
{'description': 'seq description', 'id': 'seq-id'}
Update metadata:
>>> s.metadata['id'] = 'new-id'
>>> s.metadata['pubmed'] = 12345
>>> pprint(s.metadata)
{'description': 'seq description', 'id': 'new-id', 'pubmed': 12345}
Set metadata:
>>> s.metadata = {'abc': 123}
>>> s.metadata
{'abc': 123}
Delete metadata:
>>> s.has_metadata()
True
>>> del s.metadata
>>> s.metadata
{}
>>> s.has_metadata()
False
"""
if self._metadata is None:
# not using setter to avoid copy
self._metadata = {}
return self._metadata
@metadata.setter
def metadata(self, metadata):
if not isinstance(metadata, dict):
raise TypeError("metadata must be a dict")
# shallow copy
self._metadata = metadata.copy()
@metadata.deleter
def metadata(self):
self._metadata = None
@property
@stable(as_of="0.4.0")
def positional_metadata(self):
"""``pd.DataFrame`` containing metadata on a per-character basis.
Notes
-----
This property can be set and deleted.
Examples
--------
Create a DNA sequence with positional metadata:
>>> from skbio import DNA
>>> seq = DNA(
... 'ACGT',
... positional_metadata={'quality': [3, 3, 20, 11],
... 'exons': [True, True, False, True]})
>>> seq
DNA
-----------------------------
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
has gaps: False
has degenerates: False
has non-degenerates: True
GC-content: 50.00%
-----------------------------
0 ACGT
Retrieve positional metadata:
>>> seq.positional_metadata
exons quality
0 True 3
1 True 3
2 False 20
3 True 11
Update positional metadata:
>>> seq.positional_metadata['gaps'] = seq.gaps()
>>> seq.positional_metadata
exons quality gaps
0 True 3 False
1 True 3 False
2 False 20 False
3 True 11 False
Set positional metadata:
>>> seq.positional_metadata = {'degenerates': seq.degenerates()}
>>> seq.positional_metadata
degenerates
0 False
1 False
2 False
3 False
Delete positional metadata:
>>> seq.has_positional_metadata()
True
>>> del seq.positional_metadata
>>> seq.positional_metadata
Empty DataFrame
Columns: []
Index: [0, 1, 2, 3]
>>> seq.has_positional_metadata()
False
"""
if self._positional_metadata is None:
# not using setter to avoid copy
self._positional_metadata = pd.DataFrame(
index=np.arange(len(self)))
return self._positional_metadata
@positional_metadata.setter
def positional_metadata(self, positional_metadata):
try:
# copy=True to copy underlying data buffer
positional_metadata = pd.DataFrame(positional_metadata, copy=True)
except pd.core.common.PandasError as e:
raise TypeError('Positional metadata invalid. Must be consumable '
'by pd.DataFrame. Original pandas error message: '
'"%s"' % e)
num_rows = len(positional_metadata.index)
if num_rows != len(self):
raise ValueError(
"Number of positional metadata values (%d) must match the "
"number of characters in the sequence (%d)." %
(num_rows, len(self)))
positional_metadata.reset_index(drop=True, inplace=True)
self._positional_metadata = positional_metadata
@positional_metadata.deleter
def positional_metadata(self):
self._positional_metadata = None
@property
@experimental(as_of="0.4.0-dev")
def observed_chars(self):
"""Set of observed characters in the sequence.
Notes
-----
This property is not writeable.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('AACGAC')
>>> s.observed_chars == {'G', 'A', 'C'}
True
"""
return set(str(self))
@property
def _string(self):
return self._bytes.tostring()
@stable(as_of="0.4.0")
def __init__(self, sequence, metadata=None, positional_metadata=None,
lowercase=False):
if isinstance(sequence, np.ndarray):
if sequence.dtype == np.uint8:
self._set_bytes_contiguous(sequence)
elif sequence.dtype == '|S1':
sequence = sequence.view(np.uint8)
# Guarantee the sequence is an array (might be scalar before
# this).
if sequence.shape == ():
sequence = np.array([sequence], dtype=np.uint8)
self._set_bytes_contiguous(sequence)
else:
raise TypeError(
"Can only create sequence from numpy.ndarray of dtype "
"np.uint8 or '|S1'. Invalid dtype: %s" %
sequence.dtype)
elif isinstance(sequence, Sequence):
# we're not simply accessing sequence.metadata in order to avoid
# creating "empty" metadata representations on both sequence
# objects if they don't have metadata. same strategy is used below
# for positional metadata
if metadata is None and sequence.has_metadata():
metadata = sequence.metadata
if (positional_metadata is None and
sequence.has_positional_metadata()):
positional_metadata = sequence.positional_metadata
sequence = sequence._bytes
self._owns_bytes = False
self._set_bytes(sequence)
else:
# Python 3 will not raise a UnicodeEncodeError so we force it by
# encoding it as ascii
if isinstance(sequence, six.text_type):
sequence = sequence.encode("ascii")
s = np.fromstring(sequence, dtype=np.uint8)
# There are two possibilities (to our knowledge) at this point:
# Either the sequence we were given was something string-like,
# (else it would not have made it past fromstring), or it was a
# numpy scalar, and so our length must be 1.
if isinstance(sequence, np.generic) and len(s) != 1:
raise TypeError("Can cannot create a sequence with %r" %
type(sequence).__name__)
sequence = s
self._owns_bytes = True
self._set_bytes(sequence)
if metadata is None:
self._metadata = None
else:
self.metadata = metadata
if positional_metadata is None:
self._positional_metadata = None
else:
self.positional_metadata = positional_metadata
if lowercase is False:
pass
elif lowercase is True or isinstance(lowercase, six.string_types):
lowercase_mask = self._bytes > self._ascii_lowercase_boundary
self._convert_to_uppercase(lowercase_mask)
# If it isn't True, it must be a string_type
if not (lowercase is True):
self.positional_metadata[lowercase] = lowercase_mask
else:
raise TypeError("lowercase keyword argument expected a bool or "
"string, but got %s" % type(lowercase))
def _set_bytes_contiguous(self, sequence):
"""Munge the sequence data into a numpy array of dtype uint8."""
if not sequence.flags['C_CONTIGUOUS']:
# numpy doesn't support views of non-contiguous arrays. Since we're
# making heavy use of views internally, and users may also supply
# us with a view, make sure we *always* store a contiguous array to
# avoid hard-to-track bugs. See
# https://github.com/numpy/numpy/issues/5716
sequence = np.ascontiguousarray(sequence)
self._owns_bytes = True
else:
self._owns_bytes = False
self._set_bytes(sequence)
def _set_bytes(self, sequence):
sequence.flags.writeable = False
self._bytes = sequence
def _convert_to_uppercase(self, lowercase):
if np.any(lowercase):
with self._byte_ownership():
self._bytes[lowercase] ^= self._ascii_invert_case_bit_offset
@stable(as_of="0.4.0")
def __contains__(self, subsequence):
"""Determine if a subsequence is contained in the biological sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
The putative subsequence.
Returns
-------
bool
Indicates whether `subsequence` is contained in the biological
sequence.
Raises
------
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUGAAGGA')
>>> 'GGU' in s
True
>>> 'CCC' in s
False
"""
return self._munge_to_bytestring(subsequence, "in") in self._string
@stable(as_of="0.4.0")
def __eq__(self, other):
"""Determine if the biological sequence is equal to another.
Biological sequences are equal if they are *exactly* the same type and
their sequence characters, metadata, and positional metadata are the
same.
Parameters
----------
other : Sequence
Sequence to test for equality against.
Returns
-------
bool
Indicates whether the biological sequence is equal to `other`.
Examples
--------
Define two biological sequences that have the same underlying sequence
of characters:
>>> from skbio import Sequence
>>> s = Sequence('ACGT')
>>> t = Sequence('ACGT')
The two sequences are considered equal because they are the same type,
their underlying sequence of characters are the same, and their
optional metadata attributes (``metadata`` and ``positional_metadata``)
were not provided:
>>> s == t
True
>>> t == s
True
Define another biological sequence with a different sequence of
characters than the previous two biological sequences:
>>> u = Sequence('ACGA')
>>> u == t
False
Define a biological sequence with the same sequence of characters as
``u`` but with different metadata and positional metadata:
>>> v = Sequence('ACGA', metadata={'id': 'abc'},
... positional_metadata={'quality':[1, 5, 3, 3]})
The two sequences are not considered equal because their metadata and
positional metadata do not match:
>>> u == v
False
"""
# checks ordered from least to most expensive
if self.__class__ != other.__class__:
return False
# we're not simply comparing self.metadata to other.metadata in order
# to avoid creating "empty" metadata representations on the sequence
# objects if they don't have metadata. same strategy is used below for
# positional metadata
if self.has_metadata() and other.has_metadata():
if self.metadata != other.metadata:
return False
elif not (self.has_metadata() or other.has_metadata()):
# both don't have metadata
pass
else:
# one has metadata while the other does not
return False
if self._string != other._string:
return False
if self.has_positional_metadata() and other.has_positional_metadata():
if not self.positional_metadata.equals(other.positional_metadata):
return False
elif not (self.has_positional_metadata() or
other.has_positional_metadata()):
# both don't have positional metadata
pass
else:
# one has positional metadata while the other does not
return False
return True
@stable(as_of="0.4.0")
def __ne__(self, other):
"""Determine if the biological sequence is not equal to another.
Biological sequences are not equal if they are not *exactly* the same
type, or their sequence characters, metadata, or positional metadata
differ.
Parameters
----------
other : Sequence
Sequence to test for inequality against.
Returns
-------
bool
Indicates whether the biological sequence is not equal to `other`.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACGT')
>>> t = Sequence('ACGT')
>>> s != t
False
>>> u = Sequence('ACGA')
>>> u != t
True
>>> v = Sequence('ACGA', metadata={'id': 'v'})
>>> u != v
True
"""
return not (self == other)
@stable(as_of="0.4.0")
def __getitem__(self, indexable):
"""Slice the biological sequence.
Parameters
----------
indexable : int, slice, iterable (int and slice), 1D array_like (bool)
The position(s) to return from the biological sequence. If
`indexable` is an iterable of integers, these are assumed to be
indices in the sequence to keep. If `indexable` is a 1D
``array_like`` of booleans, these are assumed to be the positions
in the sequence to keep.
Returns
-------
Sequence
New biological sequence containing the position(s) specified by
`indexable` in the current biological sequence. If quality scores
are present, they will be sliced in the same manner and included in
the returned biological sequence. ID and description are also
included.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUGAAGGA')
Obtain a single character from the biological sequence:
>>> s[1]
Sequence
-------------
Stats:
length: 1
-------------
0 G
Obtain a slice:
>>> s[7:]
Sequence
-------------
Stats:
length: 5
-------------
0 AAGGA
Obtain characters at the following indices:
>>> s[[3, 4, 7, 0, 3]]
Sequence
-------------
Stats:
length: 5
-------------
0 CGAGC
Obtain characters at positions evaluating to `True`:
>>> s = Sequence('GGUCG')
>>> index = [True, False, True, 'a' is 'a', False]
>>> s[index]
Sequence
-------------
Stats:
length: 3
-------------
0 GUC
"""
if (not isinstance(indexable, np.ndarray) and
((not isinstance(indexable, six.string_types)) and
hasattr(indexable, '__iter__'))):
indexable_ = indexable
indexable = np.asarray(indexable)
if indexable.dtype == object:
indexable = list(indexable_) # TODO: Don't blow out memory
if len(indexable) == 0:
# indexing with an empty list, so convert to ndarray and
# fall through to ndarray slicing below
indexable = np.asarray(indexable)
else:
seq = np.concatenate(
list(_slices_from_iter(self._bytes, indexable)))
index = _as_slice_if_single_index(indexable)
positional_metadata = None
if self.has_positional_metadata():
pos_md_slices = list(_slices_from_iter(
self.positional_metadata, index))
positional_metadata = pd.concat(pos_md_slices)
return self._to(sequence=seq,
positional_metadata=positional_metadata)
elif (isinstance(indexable, six.string_types) or
isinstance(indexable, bool)):
raise IndexError("Cannot index with %s type: %r" %
(type(indexable).__name__, indexable))
if (isinstance(indexable, np.ndarray) and
indexable.dtype == bool and
len(indexable) != len(self)):
raise IndexError("An boolean vector index must be the same length"
" as the sequence (%d, not %d)." %
(len(self), len(indexable)))
if isinstance(indexable, np.ndarray) and indexable.size == 0:
# convert an empty ndarray to a supported dtype for slicing a numpy
# array
indexable = indexable.astype(int)
seq = self._bytes[indexable]
positional_metadata = self._slice_positional_metadata(indexable)
return self._to(sequence=seq, positional_metadata=positional_metadata)
def _slice_positional_metadata(self, indexable):
if self.has_positional_metadata():
if _is_single_index(indexable):
index = _single_index_to_slice(indexable)
else:
index = indexable
return self.positional_metadata.iloc[index]
else:
return None
@stable(as_of="0.4.0")
def __len__(self):
"""Return the number of characters in the biological sequence.
Returns
-------
int
The length of the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> len(s)
4
"""
return self._bytes.size
@stable(as_of="0.4.0")
def __nonzero__(self):
"""Returns truth value (truthiness) of sequence.
Returns
-------
bool
True if length of sequence is greater than 0, else False.
Examples
--------
>>> from skbio import Sequence
>>> bool(Sequence(''))
False
>>> bool(Sequence('ACGT'))
True
"""
return len(self) > 0
@stable(as_of="0.4.0")
def __iter__(self):
"""Iterate over positions in the biological sequence.
Yields
------
Sequence
Single character subsequence, one for each position in the
sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> for c in s:
... str(c)
'G'
'G'
'U'
'C'
"""
for i in range(len(self)):
yield self[i]
@stable(as_of="0.4.0")
def __reversed__(self):
"""Iterate over positions in the biological sequence in reverse order.
Yields
------
Sequence
Single character subsequence, one for each position in the
sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> for c in reversed(s):
... str(c)
'C'
'U'
'G'
'G'
"""
return iter(self[::-1])
@stable(as_of="0.4.0")
def __str__(self):
"""Return biological sequence characters as a string.
Returns
-------
str
Sequence characters as a string. No metadata or positional
metadata will be included.
See Also
--------
sequence
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCGUAAAGGA', metadata={'id':'hello'})
>>> str(s)
'GGUCGUAAAGGA'
"""
return str(self._string.decode("ascii"))
@stable(as_of="0.4.0")
def __repr__(self):
r"""Return a string representation of the biological sequence object.
Representation includes:
* sequence type
* metadata keys and values: will display key/value if it is an
understood type, otherwise just the type will be displayed. If it is
an understood type whose representation is too long, just the type
will be displayed
* positional metadata: column names and column dtypes will be displayed
in the order they appear in the positional metadata ``pd.DataFrame``.
Column names (i.e., keys) follow the same display rules as metadata
keys
* sequence stats (e.g., length)
* up to five lines of chunked sequence data. Each line of chunked
sequence data displays the current position in the sequence
Returns
-------
str
String representation of the biological sequence object.
Notes
-----
Subclasses can override Sequence._repr_stats to provide custom
statistics.
Examples
--------
Short sequence without metadata:
>>> from skbio import Sequence
>>> Sequence('ACGTAATGGATACGTAATGCA')
Sequence
-------------------------
Stats:
length: 21
-------------------------
0 ACGTAATGGA TACGTAATGC A
Longer sequence displays first two lines and last two lines:
>>> Sequence('ACGT' * 100)
Sequence
---------------------------------------------------------------------
Stats:
length: 400
---------------------------------------------------------------------
0 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
60 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
...
300 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
360 ACGTACGTAC GTACGTACGT ACGTACGTAC GTACGTACGT
Sequence with metadata and positional metadata:
>>> metadata = {
... 'id': 'seq-id',
... 'description': 'description of the sequence, wrapping across '
... 'lines if it\'s too long',
... 'authors': ['Alice', 'Bob', 'Carol'],
... 'year': 2015,
... 'published': True
... }
>>> positional_metadata = {
... 'quality': [3, 10, 11, 10],
... 'exons': [True, True, False, True]
... }
>>> Sequence('ACGT', metadata=metadata,
... positional_metadata=positional_metadata)
Sequence
----------------------------------------------------------------------
Metadata:
'authors': <class 'list'>
'description': "description of the sequence, wrapping across lines
if it's too long"
'id': 'seq-id'
'published': True
'year': 2015
Positional metadata:
'exons': <dtype: bool>
'quality': <dtype: int64>
Stats:
length: 4
----------------------------------------------------------------------
0 ACGT
"""
return _SequenceReprBuilder(
seq=self,
width=71, # 79 for pep8, 8 space indent for docstrings
indent=4,
chunk_size=10).build()
def _repr_stats(self):
"""Define statistics to display in the sequence's repr.
Subclasses can override this method to provide type-specific
statistics.
This method computes a single statistic: length.
Returns
-------
list
List of tuples where each tuple represents a statistic. Each tuple
contains exactly two ``str`` elements: the statistic's name/label,
and the str-formatted value of the statistic. Ordering of
statistics (i.e., list order) determines display order in the
sequence repr.
"""
return [('length', '%d' % len(self))]
@stable(as_of="0.4.0")
def __copy__(self):
"""Return a shallow copy of the biological sequence.
See Also
--------
copy
Notes
-----
This method is equivalent to ``seq.copy(deep=False)``.
"""
return self.copy(deep=False)
@stable(as_of="0.4.0")
def __deepcopy__(self, memo):
"""Return a deep copy of the biological sequence.
See Also
--------
copy
Notes
-----
This method is equivalent to ``seq.copy(deep=True)``.
"""
return self._copy(True, memo)
@stable(as_of="0.4.0")
def has_metadata(self):
"""Determine if the sequence contains metadata.
Returns
-------
bool
Indicates whether the sequence has metadata
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_metadata()
False
>>> t = DNA('ACACGACGTT', metadata={'id': 'seq-id'})
>>> t.has_metadata()
True
"""
return self._metadata is not None and bool(self.metadata)
@stable(as_of="0.4.0")
def has_positional_metadata(self):
"""Determine if the sequence contains positional metadata.
Returns
-------
bool
Indicates whether the sequence has positional metadata
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACACGACGTT')
>>> s.has_positional_metadata()
False
>>> t = DNA('ACACGACGTT', positional_metadata={'quality': range(10)})
>>> t.has_positional_metadata()
True
"""
return (self._positional_metadata is not None and
len(self.positional_metadata.columns) > 0)
@stable(as_of="0.4.0")
def copy(self, deep=False):
"""Return a copy of the biological sequence.
Parameters
----------
deep : bool, optional
Perform a deep copy. If ``False``, perform a shallow copy.
Returns
-------
Sequence
Copy of the biological sequence.
Notes
-----
Since sequence objects can share the same underlying immutable sequence
data (or pieces of it), this method can be used to create a sequence
object with its own copy of the sequence data so that the original
sequence data can be garbage-collected.
Examples
--------
Create a sequence:
>>> from pprint import pprint
>>> from skbio import Sequence
>>> seq = Sequence('ACGT',
... metadata={'id': 'seq-id', 'authors': ['Alice']},
... positional_metadata={'quality': [7, 10, 8, 5],
... 'list': [[], [], [], []]})
Make a shallow copy of the sequence:
>>> seq_copy = seq.copy()
>>> seq_copy == seq
True
Setting new references in the copied sequence's metadata doesn't affect
the original sequence's metadata:
>>> seq_copy.metadata['id'] = 'new-id'
>>> pprint(seq_copy.metadata)
{'authors': ['Alice'], 'id': 'new-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice'], 'id': 'seq-id'}
The same applies to the sequence's positional metadata:
>>> seq_copy.positional_metadata.loc[0, 'quality'] = 999
>>> seq_copy.positional_metadata
list quality
0 [] 999
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
list quality
0 [] 7
1 [] 10
2 [] 8
3 [] 5
Since only a *shallow* copy was made, updates to mutable objects stored
as metadata affect the original sequence's metadata:
>>> seq_copy.metadata['authors'].append('Bob')
>>> pprint(seq_copy.metadata)
{'authors': ['Alice', 'Bob'], 'id': 'new-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice', 'Bob'], 'id': 'seq-id'}
The same applies to the sequence's positional metadata:
>>> seq_copy.positional_metadata.loc[0, 'list'].append(1)
>>> seq_copy.positional_metadata
list quality
0 [1] 999
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
list quality
0 [1] 7
1 [] 10
2 [] 8
3 [] 5
Perform a deep copy to avoid this behavior:
>>> seq_deep_copy = seq.copy(deep=True)
Updates to mutable objects no longer affect the original sequence's
metadata:
>>> seq_deep_copy.metadata['authors'].append('Carol')
>>> pprint(seq_deep_copy.metadata)
{'authors': ['Alice', 'Bob', 'Carol'], 'id': 'seq-id'}
>>> pprint(seq.metadata)
{'authors': ['Alice', 'Bob'], 'id': 'seq-id'}
Nor its positional metadata:
>>> seq_deep_copy.positional_metadata.loc[0, 'list'].append(2)
>>> seq_deep_copy.positional_metadata
list quality
0 [1, 2] 7
1 [] 10
2 [] 8
3 [] 5
>>> seq.positional_metadata
list quality
0 [1] 7
1 [] 10
2 [] 8
3 [] 5
"""
return self._copy(deep, {})
def _copy(self, deep, memo):
# strategy: copy the sequence without metadata first, then set metadata
# attributes with copies. we take this approach instead of simply
# passing the metadata through the Sequence constructor because we
# don't want to copy twice (this could happen when deep=True, where we
# deep copy here and then shallow copy in the Sequence constructor). we
# also directly set the private metadata attributes instead of using
# their public setters to avoid an unnecessary copy
# we don't make a distinction between deep vs. shallow copy of bytes
# because dtype=np.uint8. we only need to make the distinction when
# dealing with object dtype
bytes = np.copy(self._bytes)
seq_copy = self._constructor(sequence=bytes, metadata=None,
positional_metadata=None)
if self.has_metadata():
metadata = self.metadata
if deep:
metadata = copy.deepcopy(metadata, memo)
else:
metadata = metadata.copy()
seq_copy._metadata = metadata
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
if deep:
positional_metadata = copy.deepcopy(positional_metadata, memo)
else:
# deep=True makes a shallow copy of the underlying data buffer
positional_metadata = positional_metadata.copy(deep=True)
seq_copy._positional_metadata = positional_metadata
return seq_copy
@stable(as_of='0.4.0')
def lowercase(self, lowercase):
"""Return a case-sensitive string representation of the sequence.
Parameters
----------
lowercase: str or boolean vector
If lowercase is a boolean vector, it is used to set sequence
characters to lowercase in the output string. True values in the
boolean vector correspond to lowercase characters. If lowercase
is a str, it is treated like a key into the positional metadata,
pointing to a column which must be a boolean vector.
That boolean vector is then used as described previously.
Returns
-------
str
String representation of sequence with specified characters set to
lowercase.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACGT')
>>> s.lowercase([True, True, False, False])
'acGT'
>>> s = Sequence('ACGT',
... positional_metadata={
... 'exons': [True, False, False, True]})
>>> s.lowercase('exons')
'aCGt'
Constructor automatically populates a column in positional metadata
when the ``lowercase`` keyword argument is provided with a column name:
>>> s = Sequence('ACgt', lowercase='introns')
>>> s.lowercase('introns')
'ACgt'
>>> s = Sequence('ACGT', lowercase='introns')
>>> s.lowercase('introns')
'ACGT'
"""
index = self._munge_to_index_array(lowercase)
outbytes = self._bytes.copy()
outbytes[index] ^= self._ascii_invert_case_bit_offset
return str(outbytes.tostring().decode('ascii'))
@stable(as_of="0.4.0")
def count(self, subsequence, start=None, end=None):
"""Count occurrences of a subsequence in the biological sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Subsequence to count occurrences of.
start : int, optional
The position at which to start counting (inclusive).
end : int, optional
The position at which to stop counting (exclusive).
Returns
-------
int
Number of occurrences of `subsequence` in the biological sequence.
Raises
------
ValueError
If `subsequence` is of length 0.
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUCG')
>>> s.count('G')
3
>>> s.count('GG')
1
>>> s.count('T')
0
>>> s.count('G', 2, 5)
1
"""
if len(subsequence) == 0:
raise ValueError("`count` is not defined for empty subsequences.")
return self._string.count(
self._munge_to_bytestring(subsequence, "count"), start, end)
@stable(as_of="0.4.0")
def index(self, subsequence, start=None, end=None):
"""Find position where subsequence first occurs in the sequence.
Parameters
----------
subsequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Subsequence to search for in the biological sequence.
start : int, optional
The position at which to start searching (inclusive).
end : int, optional
The position at which to stop searching (exclusive).
Returns
-------
int
Position where `subsequence` first occurs in the biological
sequence.
Raises
------
ValueError
If `subsequence` is not present in the biological sequence.
TypeError
If `subsequence` is a ``Sequence`` object with a different type
than the biological sequence.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACACGACGTT-')
>>> s.index('ACG')
2
"""
try:
return self._string.index(
self._munge_to_bytestring(subsequence, "index"), start, end)
except ValueError:
raise ValueError(
"%r is not present in %r." % (subsequence, self))
@experimental(as_of="0.4.0")
def distance(self, other, metric=None):
"""Compute the distance to another sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compute the distance to.
metric : function, optional
Function used to compute the distance between the biological
sequence and `other`. If ``None`` (the default),
``scipy.spatial.distance.hamming`` will be used. This function
should take two ``skbio.Sequence`` objects and return a ``float``.
Returns
-------
float
Distance between the biological sequence and `other`.
Raises
------
ValueError
If the sequences are not the same length when `metric` is ``None``
(i.e., `metric` is ``scipy.spatial.distance.hamming``). This is
only checked when using this metric, as equal length is not a
requirement of all sequence distance metrics. In general, the
metric itself should test and give an informative error message,
but the message from ``scipy.spatial.distance.hamming`` is somewhat
cryptic (as of this writing), and it's the default metric, so we
explicitly do this check here. This metric-specific check will be
removed from this method when the ``skbio.sequence.stats`` module
is created (track progress on issue #913).
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
fraction_diff
fraction_same
scipy.spatial.distance.hamming
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('AGUC')
>>> s.distance(t)
0.25
>>> def custom_dist(s1, s2): return 0.42
>>> s.distance(t, custom_dist)
0.42
"""
# TODO refactor this method to accept a name (string) of the distance
# metric to apply and accept **kwargs
other = self._munge_to_sequence(other, 'distance')
if metric is None:
return self._hamming(other)
return float(metric(self, other))
def _hamming(self, other):
# Hamming requires equal length sequences. We are checking this
# here because the error you would get otherwise is cryptic.
if len(self) != len(other):
raise ValueError(
"Sequences do not have equal length. "
"Hamming distances can only be computed between "
"sequences of equal length.")
return float(hamming(self.values, other.values))
@stable(as_of="0.4.0")
def matches(self, other):
"""Find positions that match with another sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` at position ``i`` indicates a match
between the sequences at their positions ``i``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
mismatches
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('GAUU')
>>> s.matches(t)
array([ True, False, True, False], dtype=bool)
"""
other = self._munge_to_sequence(other, 'matches/mismatches')
if len(self) != len(other):
raise ValueError("Match and mismatch vectors can only be "
"generated from equal length sequences.")
return self._bytes == other._bytes
@stable(as_of="0.4.0")
def mismatches(self, other):
"""Find positions that do not match with another sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
Returns
-------
1D np.ndarray (bool)
Boolean vector where ``True`` at position ``i`` indicates a
mismatch between the sequences at their positions ``i``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
matches
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('GAUU')
>>> s.mismatches(t)
array([False, True, False, True], dtype=bool)
"""
return np.invert(self.matches(other))
@stable(as_of="0.4.0")
def match_frequency(self, other, relative=False):
"""Return count of positions that are the same between two sequences.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
relative : bool, optional
If ``True``, return the relative frequency of matches instead of
the count.
Returns
-------
int or float
Number of positions that are the same between the sequences. This
will be an ``int`` if `relative` is ``False`` and a ``float``
if `relative` is ``True``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
mismatch_frequency
matches
mismatches
distance
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('AGUC')
>>> s.match_frequency(t)
3
>>> s.match_frequency(t, relative=True)
0.75
"""
if relative:
return float(self.matches(other).mean())
else:
return int(self.matches(other).sum())
@stable(as_of="0.4.0")
def mismatch_frequency(self, other, relative=False):
"""Return count of positions that differ between two sequences.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
relative : bool, optional
If ``True``, return the relative frequency of mismatches instead of
the count.
Returns
-------
int or float
Number of positions that differ between the sequences. This will be
an ``int`` if `relative` is ``False`` and a ``float``
if `relative` is ``True``.
Raises
------
ValueError
If the sequences are not the same length.
TypeError
If `other` is a ``Sequence`` object with a different type than the
biological sequence.
See Also
--------
match_frequency
matches
mismatches
distance
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('GGUC')
>>> t = Sequence('AGUC')
>>> s.mismatch_frequency(t)
1
>>> s.mismatch_frequency(t, relative=True)
0.25
"""
if relative:
return float(self.mismatches(other).mean())
else:
return int(self.mismatches(other).sum())
@experimental(as_of="0.4.0-dev")
def frequencies(self, chars=None, relative=False):
"""Compute frequencies of characters in the sequence.
Parameters
----------
chars : str or set of str, optional
Characters to compute the frequencies of. May be a ``str``
containing a single character or a ``set`` of single-character
strings. If ``None``, frequencies will be computed for all
characters present in the sequence.
relative : bool, optional
If ``True``, return the relative frequency of each character
instead of its count. If `chars` is provided, relative frequencies
will be computed with respect to the number of characters in the
sequence, **not** the total count of characters observed in
`chars`. Thus, the relative frequencies will not necessarily sum to
1.0 if `chars` is provided.
Returns
-------
dict
Frequencies of characters in the sequence.
Raises
------
TypeError
If `chars` is not a ``str`` or ``set`` of ``str``.
ValueError
If `chars` is not a single-character ``str`` or a ``set`` of
single-character strings.
ValueError
If `chars` contains characters outside the allowable range of
characters in a ``Sequence`` object.
See Also
--------
kmer_frequencies
iter_kmers
Notes
-----
If the sequence is empty (i.e., length zero), ``relative=True``,
**and** `chars` is provided, the relative frequency of each specified
character will be ``np.nan``.
If `chars` is not provided, this method is equivalent to, but faster
than, ``seq.kmer_frequencies(k=1)``.
If `chars` is not provided, it is equivalent to, but faster than,
passing ``chars=seq.observed_chars``.
Examples
--------
Compute character frequencies of a sequence:
>>> from pprint import pprint
>>> from skbio import Sequence
>>> seq = Sequence('AGAAGACC')
>>> freqs = seq.frequencies()
>>> pprint(freqs) # using pprint to display dict in sorted order
{'A': 4, 'C': 2, 'G': 2}
Compute relative character frequencies:
>>> freqs = seq.frequencies(relative=True)
>>> pprint(freqs)
{'A': 0.5, 'C': 0.25, 'G': 0.25}
Compute relative frequencies of characters A, C, and T:
>>> freqs = seq.frequencies(chars={'A', 'C', 'T'}, relative=True)
>>> pprint(freqs)
{'A': 0.5, 'C': 0.25, 'T': 0.0}
Note that since character T is not in the sequence we receive a
relative frequency of 0.0. The relative frequencies of A and C are
relative to the number of characters in the sequence (8), **not** the
number of A and C characters (4 + 2 = 6).
"""
freqs = np.bincount(self._bytes,
minlength=self._number_of_extended_ascii_codes)
if chars is not None:
chars, indices = self._chars_to_indices(chars)
else:
indices, = np.nonzero(freqs)
# Downcast from int64 to uint8 then convert to str. This is safe
# because we are guaranteed to have indices in the range 0 to 255
# inclusive.
chars = indices.astype(np.uint8).tostring().decode('ascii')
obs_counts = freqs[indices]
if relative:
obs_counts = obs_counts / len(self)
# Use tolist() for minor performance gain.
return dict(zip(chars, obs_counts.tolist()))
def _chars_to_indices(self, chars):
"""Helper for Sequence.frequencies."""
if isinstance(chars, six.string_types) or \
isinstance(chars, six.binary_type):
chars = set([chars])
elif not isinstance(chars, set):
raise TypeError(
"`chars` must be of type `set`, not %r" % type(chars).__name__)
# Impose an (arbitrary) ordering to `chars` so that we can return
# `indices` in that same order.
chars = list(chars)
indices = []
for char in chars:
if not (isinstance(char, six.string_types) or
isinstance(char, six.binary_type)):
raise TypeError(
"Each element of `chars` must be string-like, not %r" %
type(char).__name__)
if len(char) != 1:
raise ValueError(
"Each element of `chars` must contain a single "
"character (found %d characters)" % len(char))
index = ord(char)
if index >= self._number_of_extended_ascii_codes:
raise ValueError(
"Character %r in `chars` is outside the range of "
"allowable characters in a `Sequence` object." % char)
indices.append(index)
return chars, indices
@stable(as_of="0.4.0")
def iter_kmers(self, k, overlap=True):
"""Generate kmers of length `k` from the biological sequence.
Parameters
----------
k : int
The kmer length.
overlap : bool, optional
Defines whether the kmers should be overlapping or not.
Yields
------
Sequence
kmer of length `k` contained in the biological sequence.
Raises
------
ValueError
If `k` is less than 1.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('ACACGACGTT')
>>> for kmer in s.iter_kmers(4, overlap=False):
... str(kmer)
'ACAC'
'GACG'
>>> for kmer in s.iter_kmers(3, overlap=True):
... str(kmer)
'ACA'
'CAC'
'ACG'
'CGA'
'GAC'
'ACG'
'CGT'
'GTT'
"""
if k < 1:
raise ValueError("k must be greater than 0.")
if overlap:
step = 1
count = len(self) - k + 1
else:
step = k
count = len(self) // k
if self.has_positional_metadata():
for i in range(0, len(self) - k + 1, step):
yield self[i:i+k]
# Optimized path when no positional metadata
else:
kmers = np.lib.stride_tricks.as_strided(
self._bytes, shape=(k, count), strides=(1, step)).T
for s in kmers:
yield self._to(sequence=s)
@stable(as_of="0.4.0")
def kmer_frequencies(self, k, overlap=True, relative=False):
"""Return counts of words of length `k` from the biological sequence.
Parameters
----------
k : int
The word length.
overlap : bool, optional
Defines whether the kmers should be overlapping or not.
relative : bool, optional
If ``True``, return the relative frequency of each kmer instead of
its count.
Returns
-------
dict
Frequencies of words of length `k` contained in the biological
sequence.
Raises
------
ValueError
If `k` is less than 1.
Examples
--------
>>> from pprint import pprint
>>> from skbio import Sequence
>>> s = Sequence('ACACATTTATTA')
>>> freqs = s.kmer_frequencies(3, overlap=False)
>>> pprint(freqs) # using pprint to display dict in sorted order
{'ACA': 1, 'CAT': 1, 'TTA': 2}
>>> freqs = s.kmer_frequencies(3, relative=True, overlap=False)
>>> pprint(freqs)
{'ACA': 0.25, 'CAT': 0.25, 'TTA': 0.5}
"""
kmers = self.iter_kmers(k, overlap=overlap)
freqs = dict(collections.Counter((str(seq) for seq in kmers)))
if relative:
if overlap:
num_kmers = len(self) - k + 1
else:
num_kmers = len(self) // k
relative_freqs = {}
for kmer, count in viewitems(freqs):
relative_freqs[kmer] = count / num_kmers
freqs = relative_freqs
return freqs
@stable(as_of="0.4.0")
def find_with_regex(self, regex, ignore=None):
"""Generate slices for patterns matched by a regular expression.
Parameters
----------
regex : str or regular expression object
String to be compiled into a regular expression, or a pre-
compiled regular expression object (e.g., from calling
``re.compile``).
ignore : 1D array_like (bool) or iterable (slices or ints), optional
Indicate the positions to ignore when matching.
Yields
------
slice
Location where the regular expression matched.
Examples
--------
>>> from skbio import Sequence
>>> s = Sequence('AATATACCGGTTATAA')
>>> for match in s.find_with_regex('(TATA+)'):
... match
... str(s[match])
slice(2, 6, None)
'TATA'
slice(11, 16, None)
'TATAA'
"""
if isinstance(regex, six.string_types):
regex = re.compile(regex)
lookup = np.arange(len(self))
if ignore is None:
string = str(self)
else:
ignore = self._munge_to_index_array(ignore)
lookup = np.delete(lookup, ignore)
string = str(self[lookup])
for match in regex.finditer(string):
# We start at 1 because we don't want the group that contains all
# other groups.
for g in range(1, len(match.groups())+1):
yield slice(lookup[match.start(g)],
lookup[match.end(g) - 1] + 1)
@stable(as_of="0.4.0")
def iter_contiguous(self, included, min_length=1, invert=False):
"""Yield contiguous subsequences based on `included`.
Parameters
----------
included : 1D array_like (bool) or iterable (slices or ints)
`included` is transformed into a flat boolean vector where each
position will either be included or skipped. All contiguous
included positions will be yielded as a single region.
min_length : int, optional
The minimum length of a subsequence for it to be yielded.
Default is 1.
invert : bool, optional
Whether to invert `included` such that it describes what should be
skipped instead of included. Default is False.
Yields
------
Sequence
Contiguous subsequence as indicated by `included`.
Notes
-----
If slices provide adjacent ranges, then they will be considered the
same contiguous subsequence.
Examples
--------
Here we use `iter_contiguous` to find all of the contiguous ungapped
sequences using a boolean vector derived from our DNA sequence.
>>> from skbio import DNA
>>> s = DNA('AAA--TT-CCCC-G-')
>>> no_gaps = ~s.gaps()
>>> for ungapped_subsequence in s.iter_contiguous(no_gaps,
... min_length=2):
... print(ungapped_subsequence)
AAA
TT
CCCC
Note how the last potential subsequence was skipped because it would
have been smaller than our `min_length` which was set to 2.
We can also use `iter_contiguous` on a generator of slices as is
produced by `find_motifs` (and `find_with_regex`).
>>> from skbio import Protein
>>> s = Protein('ACDFNASANFTACGNPNRTESL')
>>> for subseq in s.iter_contiguous(s.find_motifs('N-glycosylation')):
... print(subseq)
NASANFTA
NRTE
Note how the first subsequence contains two N-glycosylation sites. This
happened because they were contiguous.
"""
idx = self._munge_to_index_array(included)
if invert:
idx = np.delete(np.arange(len(self)), idx)
# Adapted from http://stackoverflow.com/a/7353335/579416
for contig in np.split(idx, np.where(np.diff(idx) != 1)[0] + 1):
r = self[contig]
if len(r) >= min_length:
yield r
def _to(self, sequence=None, metadata=None, positional_metadata=None):
"""Return a copy of the current biological sequence.
Returns a copy of the current biological sequence, optionally with
updated attributes specified as keyword arguments.
Arguments are the same as those passed to the ``Sequence`` constructor.
The returned copy will have its attributes updated based on the
arguments. If an attribute is missing, the copy will keep the same
attribute as the current biological sequence. Valid attribute names
are `'sequence'`, `'metadata'`, and `'positional_metadata'`. Default
behavior is to return a copy of the current biological sequence
without changing any attributes.
Parameters
----------
sequence : optional
metadata : optional
positional_metadata : optional
Returns
-------
Sequence
Copy of the current biological sequence, optionally with updated
attributes based on arguments. Will be the same type as the current
biological sequence (`self`).
Notes
-----
By default, `metadata` and `positional_metadata` are shallow-copied and
the reference to `sequence` is used (without copying) for efficiency
since `sequence` is immutable. This differs from the behavior of
`Sequence.copy`, which will actually copy `sequence`.
This method is the preferred way of creating new instances from an
existing biological sequence, instead of calling
``self.__class__(...)``, as the latter can be error-prone (e.g.,
it's easy to forget to propagate attributes to the new instance).
"""
if sequence is None:
sequence = self._bytes
if metadata is None:
metadata = self._metadata
if positional_metadata is None:
positional_metadata = self._positional_metadata
return self._constructor(sequence=sequence, metadata=metadata,
positional_metadata=positional_metadata)
def _constructor(self, **kwargs):
return self.__class__(**kwargs)
def _munge_to_index_array(self, sliceable):
"""Return an index array from something isomorphic to a boolean vector.
"""
if isinstance(sliceable, six.string_types):
if sliceable in self.positional_metadata:
if self.positional_metadata[sliceable].dtype == np.bool:
sliceable = self.positional_metadata[sliceable]
else:
raise TypeError("Column '%s' in positional metadata does "
"not correspond to a boolean vector" %
sliceable)
else:
raise ValueError("No positional metadata associated with key "
"'%s'" % sliceable)
if not hasattr(sliceable, 'dtype') or (hasattr(sliceable, 'dtype') and
sliceable.dtype == 'object'):
sliceable = tuple(sliceable)
bool_mode = False
int_mode = False
for s in sliceable:
if isinstance(s, (bool, np.bool_)):
bool_mode = True
elif isinstance(s, (slice, int, np.signedinteger)) or (
hasattr(s, 'dtype') and s.dtype != np.bool):
int_mode = True
else:
raise TypeError("Invalid type in iterable: %s, must be one"
" of {bool, int, slice, np.signedinteger}"
% s.__class__.__name__)
if bool_mode and int_mode:
raise TypeError("Cannot provide iterable of both bool and"
" int.")
sliceable = np.r_[sliceable]
if sliceable.dtype == np.bool:
if sliceable.size != len(self):
raise ValueError("Boolean array (%d) does not match length of"
" sequence (%d)."
% (sliceable.size, len(self)))
normalized, = np.where(sliceable)
else:
normalized = np.bincount(sliceable)
if np.any(normalized > 1):
raise ValueError("Overlapping index regions are not allowed.")
normalized, = np.where(normalized)
if np.any(normalized != sliceable):
raise ValueError("Index regions are out of order.")
return normalized
def _munge_to_sequence(self, other, method):
if isinstance(other, Sequence):
if type(other) != type(self):
raise TypeError("Cannot use %s and %s together with `%s`" %
(self.__class__.__name__,
other.__class__.__name__, method))
else:
return other
# We don't use self.__class__ or self._constructor here because we want
# to construct the most general type of Sequence object in order to
# avoid validation errors.
return Sequence(other)
def _munge_to_bytestring(self, other, method):
if type(other) is bytes:
return other
elif isinstance(other, six.string_types):
return other.encode('ascii')
else:
return self._munge_to_sequence(other, method)._string
@contextmanager
def _byte_ownership(self):
if not self._owns_bytes:
self._bytes = self._bytes.copy()
self._owns_bytes = True
self._bytes.flags.writeable = True
yield
self._bytes.flags.writeable = False
def _single_index_to_slice(start_index):
end_index = None if start_index == -1 else start_index+1
return slice(start_index, end_index)
def _is_single_index(index):
return (isinstance(index, numbers.Integral) and
not isinstance(index, bool))
def _as_slice_if_single_index(indexable):
if _is_single_index(indexable):
return _single_index_to_slice(indexable)
else:
return indexable
def _slices_from_iter(array, indexables):
for i in indexables:
if isinstance(i, slice):
pass
elif _is_single_index(i):
i = _single_index_to_slice(i)
else:
raise IndexError("Cannot slice sequence from iterable "
"containing %r." % i)
yield array[i]
class _SequenceReprBuilder(object):
"""Build a ``Sequence`` repr.
Parameters
----------
seq : Sequence
Sequence to repr.
width : int
Maximum width of the repr.
indent : int
Number of spaces to use for indented lines.
chunk_size: int
Number of characters in each chunk of a sequence.
"""
def __init__(self, seq, width, indent, chunk_size):
self._seq = seq
self._width = width
self._indent = ' ' * indent
self._chunk_size = chunk_size
def build(self):
lines = ElasticLines()
cls_name = self._seq.__class__.__name__
lines.add_line(cls_name)
lines.add_separator()
if self._seq.has_metadata():
lines.add_line('Metadata:')
# Python 3 doesn't allow sorting of mixed types so we can't just
# use sorted() on the metadata keys. Sort first by type then sort
# by value within each type.
for key in self._sorted_keys_grouped_by_type(self._seq.metadata):
value = self._seq.metadata[key]
lines.add_lines(self._format_metadata_key_value(key, value))
if self._seq.has_positional_metadata():
lines.add_line('Positional metadata:')
for key in self._seq.positional_metadata.columns.values.tolist():
dtype = self._seq.positional_metadata[key].dtype
lines.add_lines(
self._format_positional_metadata_column(key, dtype))
lines.add_line('Stats:')
for label, value in self._seq._repr_stats():
lines.add_line('%s%s: %s' % (self._indent, label, value))
lines.add_separator()
num_lines, num_chars, column_width = self._find_optimal_seq_chunking()
# display entire sequence if we can, else display the first two and
# last two lines separated by ellipsis
if num_lines <= 5:
lines.add_lines(self._format_chunked_seq(
range(num_lines), num_chars, column_width))
else:
lines.add_lines(self._format_chunked_seq(
range(2), num_chars, column_width))
lines.add_line('...')
lines.add_lines(self._format_chunked_seq(
range(num_lines - 2, num_lines), num_chars, column_width))
return lines.to_str()
def _sorted_keys_grouped_by_type(self, dict_):
"""Group keys within a dict by their type and sort within type."""
type_sorted = sorted(dict_, key=self._type_sort_key)
type_and_value_sorted = []
for _, group in itertools.groupby(type_sorted, self._type_sort_key):
type_and_value_sorted.extend(sorted(group))
return type_and_value_sorted
def _type_sort_key(self, key):
return repr(type(key))
def _format_metadata_key_value(self, key, value):
"""Format metadata key:value, wrapping across lines if necessary."""
key_fmt = self._format_key(key)
supported_type = True
if isinstance(value, (six.text_type, six.binary_type)):
# for stringy values, there may be u'' or b'' depending on the type
# of `value` and version of Python. find the starting quote
# character so that wrapped text will line up with that instead of
# the string literal prefix character. for example:
#
# 'foo': u'abc def ghi
# jkl mno'
value_repr = repr(value)
extra_indent = 1
if not (value_repr.startswith("'") or value_repr.startswith('"')):
extra_indent = 2
# handles any number, this includes bool
elif value is None or isinstance(value, numbers.Number):
value_repr = repr(value)
extra_indent = 0
else:
supported_type = False
if not supported_type or len(value_repr) > 140:
value_repr = str(type(value))
# extra indent of 1 so that wrapped text lines up past the bracket:
#
# 'foo': <type
# 'dict'>
extra_indent = 1
return self._wrap_text_with_indent(value_repr, key_fmt, extra_indent)
def _format_key(self, key):
"""Format metadata key.
Includes initial indent and trailing colon and space:
<indent>'foo':<space>
"""
key_fmt = self._indent + repr(key)
supported_types = (six.text_type, six.binary_type, numbers.Number,
type(None))
if len(key_fmt) > (self._width / 2) or not isinstance(key,
supported_types):
key_fmt = self._indent + str(type(key))
return '%s: ' % key_fmt
def _wrap_text_with_indent(self, text, initial_text, extra_indent):
"""Wrap text across lines with an initial indentation.
For example:
'foo': 'abc def
ghi jkl
mno pqr'
<indent>'foo':<space> is `initial_text`. `extra_indent` is 1. Wrapped
lines are indented such that they line up with the start of the
previous line of wrapped text.
"""
return textwrap.wrap(
text, width=self._width, expand_tabs=False,
initial_indent=initial_text,
subsequent_indent=' ' * (len(initial_text) + extra_indent))
def _format_positional_metadata_column(self, key, dtype):
key_fmt = self._format_key(key)
dtype_fmt = '<dtype: %s>' % str(dtype)
return self._wrap_text_with_indent(dtype_fmt, key_fmt, 1)
def _find_optimal_seq_chunking(self):
"""Find the optimal number of sequence chunks to fit on a single line.
Returns the number of lines the sequence will occupy, the number of
sequence characters displayed on each line, and the column width
necessary to display position info using the optimal number of sequence
chunks.
"""
# strategy: use an iterative approach to find the optimal number of
# sequence chunks per line. start with a single chunk and increase
# until the max line width is exceeded. when this happens, the previous
# number of chunks is optimal
num_lines = 0
num_chars = 0
column_width = 0
num_chunks = 1
not_exceeded = True
while not_exceeded:
line_len, new_chunk_info = self._compute_chunked_seq_line_len(
num_chunks)
not_exceeded = line_len <= self._width
if not_exceeded:
num_lines, num_chars, column_width = new_chunk_info
num_chunks += 1
return num_lines, num_chars, column_width
def _compute_chunked_seq_line_len(self, num_chunks):
"""Compute line length based on a number of chunks."""
num_chars = num_chunks * self._chunk_size
# ceil to account for partial line
num_lines = int(math.ceil(len(self._seq) / num_chars))
# position column width is fixed width, based on the number of
# characters necessary to display the position of the final line (all
# previous positions will be left justified using this width)
column_width = len('%d ' % ((num_lines - 1) * num_chars))
# column width + number of sequence characters + spaces between chunks
line_len = column_width + num_chars + (num_chunks - 1)
return line_len, (num_lines, num_chars, column_width)
def _format_chunked_seq(self, line_idxs, num_chars, column_width):
"""Format specified lines of chunked sequence data."""
lines = []
for line_idx in line_idxs:
seq_idx = line_idx * num_chars
chars = str(self._seq[seq_idx:seq_idx+num_chars])
chunked_chars = chunk_str(chars, self._chunk_size, ' ')
lines.append(('%d' % seq_idx).ljust(column_width) + chunked_chars)
return lines
|
colinbrislawn/scikit-bio
|
skbio/sequence/_sequence.py
|
Python
|
bsd-3-clause
| 83,555 | 0.000108 |
from JumpScale import j
import JumpScale.baselib.redis
import JumpScale.grid.jumpscripts
class CmdRouter(object):
def __init__(self, path=None):
j.core.jumpscripts.load(path)
def route(self,organization,actor,name,**args):
pass
|
Jumpscale/jumpscale6_core
|
lib/JumpScale/baselib/cmdrouter/CmdRouter.py
|
Python
|
bsd-2-clause
| 264 | 0.018939 |
import pandas as pd
adv = pd.read_csv('Advertising.csv')
tv_budget_x = adv.TV.tolist()
print(tv_budget_x)
|
akanuragkumar/tensorflow-basics
|
ex1.py
|
Python
|
gpl-3.0
| 110 | 0.009091 |
from .actions import *
from .actions_re import *
from .expectations import *
|
spyoungtech/behave-webdriver
|
behave_webdriver/steps/__init__.py
|
Python
|
mit
| 77 | 0 |
"""
Support for an interface to work with a remote instance of Home Assistant.
If a connection error occurs while communicating with the API a
HomeAssistantError will be raised.
For more details about the Python API, please refer to the documentation at
https://home-assistant.io/developers/python_api/
"""
from datetime import datetime
import enum
import json
import logging
import threading
import urllib.parse
import requests
import homeassistant.bootstrap as bootstrap
import homeassistant.core as ha
from homeassistant.const import (
HTTP_HEADER_HA_AUTH, SERVER_PORT, URL_API, URL_API_EVENT_FORWARD,
URL_API_EVENTS, URL_API_EVENTS_EVENT, URL_API_SERVICES,
URL_API_SERVICES_SERVICE, URL_API_STATES, URL_API_STATES_ENTITY,
HTTP_HEADER_CONTENT_TYPE, CONTENT_TYPE_JSON)
from homeassistant.exceptions import HomeAssistantError
METHOD_GET = "get"
METHOD_POST = "post"
METHOD_DELETE = "delete"
_LOGGER = logging.getLogger(__name__)
class APIStatus(enum.Enum):
"""Represent API status."""
# pylint: disable=no-init,invalid-name,too-few-public-methods
OK = "ok"
INVALID_PASSWORD = "invalid_password"
CANNOT_CONNECT = "cannot_connect"
UNKNOWN = "unknown"
def __str__(self):
"""Return the state."""
return self.value
class API(object):
"""Object to pass around Home Assistant API location and credentials."""
# pylint: disable=too-few-public-methods
def __init__(self, host, api_password=None, port=None, use_ssl=False):
"""Initalize the API."""
self.host = host
self.port = port or SERVER_PORT
self.api_password = api_password
if use_ssl:
self.base_url = "https://{}:{}".format(host, self.port)
else:
self.base_url = "http://{}:{}".format(host, self.port)
self.status = None
self._headers = {
HTTP_HEADER_CONTENT_TYPE: CONTENT_TYPE_JSON,
}
if api_password is not None:
self._headers[HTTP_HEADER_HA_AUTH] = api_password
def validate_api(self, force_validate=False):
"""Test if we can communicate with the API."""
if self.status is None or force_validate:
self.status = validate_api(self)
return self.status == APIStatus.OK
def __call__(self, method, path, data=None):
"""Make a call to the Home Assistant API."""
if data is not None:
data = json.dumps(data, cls=JSONEncoder)
url = urllib.parse.urljoin(self.base_url, path)
try:
if method == METHOD_GET:
return requests.get(
url, params=data, timeout=5, headers=self._headers)
else:
return requests.request(
method, url, data=data, timeout=5, headers=self._headers)
except requests.exceptions.ConnectionError:
_LOGGER.exception("Error connecting to server")
raise HomeAssistantError("Error connecting to server")
except requests.exceptions.Timeout:
error = "Timeout when talking to {}".format(self.host)
_LOGGER.exception(error)
raise HomeAssistantError(error)
def __repr__(self):
"""Return the representation of the API."""
return "API({}, {}, {})".format(
self.host, self.api_password, self.port)
class HomeAssistant(ha.HomeAssistant):
"""Home Assistant that forwards work."""
# pylint: disable=super-init-not-called,too-many-instance-attributes
def __init__(self, remote_api, local_api=None):
"""Initalize the forward instance."""
if not remote_api.validate_api():
raise HomeAssistantError(
"Remote API at {}:{} not valid: {}".format(
remote_api.host, remote_api.port, remote_api.status))
self.remote_api = remote_api
self.pool = pool = ha.create_worker_pool()
self.bus = EventBus(remote_api, pool)
self.services = ha.ServiceRegistry(self.bus, pool)
self.states = StateMachine(self.bus, self.remote_api)
self.config = ha.Config()
self.config.api = local_api
def start(self):
"""Start the instance."""
# Ensure a local API exists to connect with remote
if 'api' not in self.config.components:
if not bootstrap.setup_component(self, 'api'):
raise HomeAssistantError(
'Unable to setup local API to receive events')
ha.create_timer(self)
self.bus.fire(ha.EVENT_HOMEASSISTANT_START,
origin=ha.EventOrigin.remote)
# Give eventlet time to startup
import eventlet
eventlet.sleep(0.1)
# Setup that events from remote_api get forwarded to local_api
# Do this after we fire START, otherwise HTTP is not started
if not connect_remote_events(self.remote_api, self.config.api):
raise HomeAssistantError((
'Could not setup event forwarding from api {} to '
'local api {}').format(self.remote_api, self.config.api))
def stop(self):
"""Stop Home Assistant and shuts down all threads."""
_LOGGER.info("Stopping")
self.bus.fire(ha.EVENT_HOMEASSISTANT_STOP,
origin=ha.EventOrigin.remote)
self.pool.stop()
# Disconnect master event forwarding
disconnect_remote_events(self.remote_api, self.config.api)
class EventBus(ha.EventBus):
"""EventBus implementation that forwards fire_event to remote API."""
# pylint: disable=too-few-public-methods
def __init__(self, api, pool=None):
"""Initalize the eventbus."""
super().__init__(pool)
self._api = api
def fire(self, event_type, event_data=None, origin=ha.EventOrigin.local):
"""Forward local events to remote target.
Handles remote event as usual.
"""
# All local events that are not TIME_CHANGED are forwarded to API
if origin == ha.EventOrigin.local and \
event_type != ha.EVENT_TIME_CHANGED:
fire_event(self._api, event_type, event_data)
else:
super().fire(event_type, event_data, origin)
class EventForwarder(object):
"""Listens for events and forwards to specified APIs."""
def __init__(self, hass, restrict_origin=None):
"""Initalize the event forwarder."""
self.hass = hass
self.restrict_origin = restrict_origin
# We use a tuple (host, port) as key to ensure
# that we do not forward to the same host twice
self._targets = {}
self._lock = threading.Lock()
def connect(self, api):
"""Attach to a Home Assistant instance and forward events.
Will overwrite old target if one exists with same host/port.
"""
with self._lock:
if len(self._targets) == 0:
# First target we get, setup listener for events
self.hass.bus.listen(ha.MATCH_ALL, self._event_listener)
key = (api.host, api.port)
self._targets[key] = api
def disconnect(self, api):
"""Remove target from being forwarded to."""
with self._lock:
key = (api.host, api.port)
did_remove = self._targets.pop(key, None) is None
if len(self._targets) == 0:
# Remove event listener if no forwarding targets present
self.hass.bus.remove_listener(ha.MATCH_ALL,
self._event_listener)
return did_remove
def _event_listener(self, event):
"""Listen and forward all events."""
with self._lock:
# We don't forward time events or, if enabled, non-local events
if event.event_type == ha.EVENT_TIME_CHANGED or \
(self.restrict_origin and event.origin != self.restrict_origin):
return
for api in self._targets.values():
fire_event(api, event.event_type, event.data)
class StateMachine(ha.StateMachine):
"""Fire set events to an API. Uses state_change events to track states."""
def __init__(self, bus, api):
"""Initalize the statemachine."""
super().__init__(None)
self._api = api
self.mirror()
bus.listen(ha.EVENT_STATE_CHANGED, self._state_changed_listener)
def remove(self, entity_id):
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
"""
return remove_state(self._api, entity_id)
def set(self, entity_id, new_state, attributes=None):
"""Call set_state on remote API."""
set_state(self._api, entity_id, new_state, attributes)
def mirror(self):
"""Discard current data and mirrors the remote state machine."""
self._states = {state.entity_id: state for state
in get_states(self._api)}
def _state_changed_listener(self, event):
"""Listen for state changed events and applies them."""
if event.data['new_state'] is None:
self._states.pop(event.data['entity_id'], None)
else:
self._states[event.data['entity_id']] = event.data['new_state']
class JSONEncoder(json.JSONEncoder):
"""JSONEncoder that supports Home Assistant objects."""
# pylint: disable=too-few-public-methods,method-hidden
def default(self, obj):
"""Convert Home Assistant objects.
Hand other objects to the original method.
"""
if isinstance(obj, datetime):
return obj.isoformat()
elif hasattr(obj, 'as_dict'):
return obj.as_dict()
try:
return json.JSONEncoder.default(self, obj)
except TypeError:
# If the JSON serializer couldn't serialize it
# it might be a generator, convert it to a list
try:
return [self.default(child_obj)
for child_obj in obj]
except TypeError:
# Ok, we're lost, cause the original error
return json.JSONEncoder.default(self, obj)
def validate_api(api):
"""Make a call to validate API."""
try:
req = api(METHOD_GET, URL_API)
if req.status_code == 200:
return APIStatus.OK
elif req.status_code == 401:
return APIStatus.INVALID_PASSWORD
else:
return APIStatus.UNKNOWN
except HomeAssistantError:
return APIStatus.CANNOT_CONNECT
def connect_remote_events(from_api, to_api):
"""Setup from_api to forward all events to to_api."""
data = {
'host': to_api.host,
'api_password': to_api.api_password,
'port': to_api.port
}
try:
req = from_api(METHOD_POST, URL_API_EVENT_FORWARD, data)
if req.status_code == 200:
return True
else:
_LOGGER.error(
"Error setting up event forwarding: %s - %s",
req.status_code, req.text)
return False
except HomeAssistantError:
_LOGGER.exception("Error setting up event forwarding")
return False
def disconnect_remote_events(from_api, to_api):
"""Disconnect forwarding events from from_api to to_api."""
data = {
'host': to_api.host,
'port': to_api.port
}
try:
req = from_api(METHOD_DELETE, URL_API_EVENT_FORWARD, data)
if req.status_code == 200:
return True
else:
_LOGGER.error(
"Error removing event forwarding: %s - %s",
req.status_code, req.text)
return False
except HomeAssistantError:
_LOGGER.exception("Error removing an event forwarder")
return False
def get_event_listeners(api):
"""List of events that is being listened for."""
try:
req = api(METHOD_GET, URL_API_EVENTS)
return req.json() if req.status_code == 200 else {}
except (HomeAssistantError, ValueError):
# ValueError if req.json() can't parse the json
_LOGGER.exception("Unexpected result retrieving event listeners")
return {}
def fire_event(api, event_type, data=None):
"""Fire an event at remote API."""
try:
req = api(METHOD_POST, URL_API_EVENTS_EVENT.format(event_type), data)
if req.status_code != 200:
_LOGGER.error("Error firing event: %d - %s",
req.status_code, req.text)
except HomeAssistantError:
_LOGGER.exception("Error firing event")
def get_state(api, entity_id):
"""Query given API for state of entity_id."""
try:
req = api(METHOD_GET, URL_API_STATES_ENTITY.format(entity_id))
# req.status_code == 422 if entity does not exist
return ha.State.from_dict(req.json()) \
if req.status_code == 200 else None
except (HomeAssistantError, ValueError):
# ValueError if req.json() can't parse the json
_LOGGER.exception("Error fetching state")
return None
def get_states(api):
"""Query given API for all states."""
try:
req = api(METHOD_GET,
URL_API_STATES)
return [ha.State.from_dict(item) for
item in req.json()]
except (HomeAssistantError, ValueError, AttributeError):
# ValueError if req.json() can't parse the json
_LOGGER.exception("Error fetching states")
return []
def remove_state(api, entity_id):
"""Call API to remove state for entity_id.
Return True if entity is gone (removed/never existed).
"""
try:
req = api(METHOD_DELETE, URL_API_STATES_ENTITY.format(entity_id))
if req.status_code in (200, 404):
return True
_LOGGER.error("Error removing state: %d - %s",
req.status_code, req.text)
return False
except HomeAssistantError:
_LOGGER.exception("Error removing state")
return False
def set_state(api, entity_id, new_state, attributes=None):
"""Tell API to update state for entity_id.
Return True if success.
"""
attributes = attributes or {}
data = {'state': new_state,
'attributes': attributes}
try:
req = api(METHOD_POST,
URL_API_STATES_ENTITY.format(entity_id),
data)
if req.status_code not in (200, 201):
_LOGGER.error("Error changing state: %d - %s",
req.status_code, req.text)
return False
else:
return True
except HomeAssistantError:
_LOGGER.exception("Error setting state")
return False
def is_state(api, entity_id, state):
"""Query API to see if entity_id is specified state."""
cur_state = get_state(api, entity_id)
return cur_state and cur_state.state == state
def get_services(api):
"""Return a list of dicts.
Each dict has a string "domain" and a list of strings "services".
"""
try:
req = api(METHOD_GET, URL_API_SERVICES)
return req.json() if req.status_code == 200 else {}
except (HomeAssistantError, ValueError):
# ValueError if req.json() can't parse the json
_LOGGER.exception("Got unexpected services result")
return {}
def call_service(api, domain, service, service_data=None):
"""Call a service at the remote API."""
try:
req = api(METHOD_POST,
URL_API_SERVICES_SERVICE.format(domain, service),
service_data)
if req.status_code != 200:
_LOGGER.error("Error calling service: %d - %s",
req.status_code, req.text)
except HomeAssistantError:
_LOGGER.exception("Error calling service")
|
mikaelboman/home-assistant
|
homeassistant/remote.py
|
Python
|
mit
| 15,888 | 0 |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from miasm2.expression.expression import ExprId
from miasm2.core.cpu import gen_reg, gen_regs
gen_reg('PC', globals())
gen_reg('PC_FETCH', globals())
gen_reg('R_LO', globals())
gen_reg('R_HI', globals())
exception_flags = ExprId('exception_flags', 32)
PC_init = ExprId("PC_init")
PC_FETCH_init = ExprId("PC_FETCH_init")
regs32_str = ["ZERO", 'AT', 'V0', 'V1'] +\
['A%d'%i for i in xrange(4)] +\
['T%d'%i for i in xrange(8)] +\
['S%d'%i for i in xrange(8)] +\
['T%d'%i for i in xrange(8, 10)] +\
['K0', 'K1'] +\
['GP', 'SP', 'FP', 'RA']
regs32_expr = [ExprId(x, 32) for x in regs32_str]
regs_flt_str = ['F%d'%i for i in xrange(0x20)]
regs_fcc_str = ['FCC%d'%i for i in xrange(8)]
R_LO = ExprId('R_LO', 32)
R_HI = ExprId('R_HI', 32)
R_LO_init = ExprId('R_LO_init', 32)
R_HI_init = ExprId('R_HI_init', 32)
cpr0_str = ["CPR0_%d"%x for x in xrange(0x100)]
cpr0_str[0] = "INDEX"
cpr0_str[16] = "ENTRYLO0"
cpr0_str[24] = "ENTRYLO1"
cpr0_str[40] = "PAGEMASK"
cpr0_str[72] = "COUNT"
cpr0_str[80] = "ENTRYHI"
cpr0_str[104] = "CAUSE"
cpr0_str[112] = "EPC"
cpr0_str[128] = "CONFIG"
cpr0_str[152] = "WATCHHI"
regs_cpr0_expr, regs_cpr0_init, regs_cpr0_info = gen_regs(cpr0_str, globals())
gpregs_expr, gpregs_init, gpregs = gen_regs(regs32_str, globals())
regs_flt_expr, regs_flt_init, fltregs = gen_regs(regs_flt_str, globals(), sz=64)
regs_fcc_expr, regs_fcc_init, fccregs = gen_regs(regs_fcc_str, globals())
all_regs_ids = [PC, PC_FETCH, R_LO, R_HI] + gpregs_expr + regs_flt_expr + \
regs_fcc_expr + regs_cpr0_expr
all_regs_ids_byname = dict([(x.name, x) for x in all_regs_ids])
all_regs_ids_init = [PC_init, PC_FETCH_init, R_LO_init, R_HI_init] + \
gpregs_init + regs_flt_init + regs_fcc_init + regs_cpr0_init
all_regs_ids_no_alias = all_regs_ids[:]
regs_init = {}
for i, r in enumerate(all_regs_ids):
regs_init[r] = all_regs_ids_init[i]
|
rom1sqr/miasm
|
miasm2/arch/mips32/regs.py
|
Python
|
gpl-2.0
| 1,927 | 0.00467 |
from __future__ import absolute_import
from __future__ import division
from builtins import next
from builtins import zip
from builtins import range
from past.utils import old_div
from builtins import object
import itertools as it
import abc
import numpy as np
import sima
from . import _motion as mc
from future.utils import with_metaclass
def add_with_offset(array1, array2, offset):
"""
>>> from sima.motion.motion import add_with_offset
>>> import numpy as np
>>> a1 = np.zeros((4, 4))
>>> a2 = np.ones((1, 2))
>>> add_with_offset(a1, a2, (1, 2))
>>> np.array_equal(a1[1:2, 2:4], a2)
True
"""
slices = tuple(slice(o, o + e) for o, e in zip(offset, array2.shape))
array1[slices] += array2
class MotionEstimationStrategy(with_metaclass(abc.ABCMeta, object)):
@classmethod
def _make_nonnegative(cls, displacements):
min_displacement = np.nanmin(
[np.nanmin(s.reshape(-1, s.shape[-1]), 0) for s in displacements],
0)
new_displacements = [d - min_displacement for d in displacements]
min_shifts = np.nanmin([np.nanmin(s.reshape(-1, s.shape[-1]), 0)
for s in new_displacements], 0)
assert np.all(min_shifts == 0)
return new_displacements
@abc.abstractmethod
def _estimate(self, dataset):
return
def estimate(self, dataset):
"""Estimate the displacements for a dataset.
Parameters
----------
dataset : sima.ImagingDataset
Returns
-------
displacements : list of ndarray of int
"""
shifts = self._estimate(dataset)
assert np.any(np.all(x is not np.ma.masked for x in shift)
for shift in it.chain.from_iterable(shifts))
assert np.all(
np.all(x is np.ma.masked for x in shift) or
not np.any(x is np.ma.masked for x in shift)
for shift in it.chain.from_iterable(shifts))
shifts = self._make_nonnegative(shifts)
assert np.any(np.all(x is not np.ma.masked for x in shift)
for shift in it.chain.from_iterable(shifts))
assert np.all(
np.all(x is np.ma.masked for x in shift) or
not np.any(x is np.ma.masked for x in shift)
for shift in it.chain.from_iterable(shifts))
return shifts
def correct(self, dataset, savedir, channel_names=None, info=None,
correction_channels=None, trim_criterion=None):
"""Create a motion-corrected dataset.
Parameters
----------
dataset : sima.ImagingDataset or list of sima.Sequence
Dataset or sequences to be motion corrected.
savedir : str
The directory used to store the dataset. If the directory
name does not end with .sima, then this extension will
be appended.
channel_names : list of str, optional
Names for the channels. Defaults to ['0', '1', '2', ...].
info : dict
Data for the order and timing of the data acquisition.
See sima.ImagingDataset for details.
correction_channels : list of int, optional
Information from the channels corresponding to these indices
will be used for motion correction. By default, all channels
will be used.
trim_criterion : float, optional
The required fraction of frames during which a location must
be within the field of view for it to be included in the
motion-corrected imaging frames. By default, only locations
that are always within the field of view are retained.
Returns
-------
dataset : sima.ImagingDataset
The motion-corrected dataset.
"""
sequences = [s for s in dataset]
if correction_channels:
correction_channels = [
sima.misc.resolve_channels(c, channel_names, len(sequences[0]))
for c in correction_channels]
mc_sequences = [s[:, :, :, :, correction_channels]
for s in sequences]
else:
mc_sequences = sequences
displacements = self.estimate(sima.ImagingDataset(mc_sequences, None))
disp_dim = displacements[0].shape[-1]
max_disp = np.max(list(it.chain.from_iterable(d.reshape(-1, disp_dim)
for d in displacements)),
axis=0)
frame_shape = np.array(sequences[0].shape)[1: -1] # (z, y, x)
if len(max_disp) == 2: # if 2D displacements
frame_shape[1:3] += max_disp
else: # if 3D displacements
frame_shape += max_disp
corrected_sequences = [s.apply_displacements(d, frame_shape)
for s, d in zip(sequences, displacements)]
planes, rows, columns = _trim_coords(
trim_criterion, displacements, sequences[0].shape[1:4],
frame_shape)
corrected_sequences = [
s[:, planes, rows, columns] for s in corrected_sequences]
return sima.ImagingDataset(
corrected_sequences, savedir, channel_names=channel_names)
class ResonantCorrection(MotionEstimationStrategy):
"""Motion estimation strategy for resonant scanner data.
When acquiring data imaging data with a resonant scanner, the data
acquired when imaging the same positions can be substantially different
depending no whether the resonant scanner is moving in one direction
or the other when passing over that row. This can cause problems when
trying to motion correct the data, since even rows are collected while
scanning in one direction and odd rows are collected by scanning
in the other direction.
The class defined here addresses this issue by using only the even
rows to estimate the displacements, and then uses those displacements
to motion-correct the entire dataset.
Parameters
----------
base_strategy : sima.motion.MotionEstimationStrategy
The underlying motion estimation strategy that will be used.
offset : int
Horizontal displacement to be added to odd rows. Note the
convention that row 0 (i.e. the "first" row) is considered
even.
"""
def __init__(self, base_strategy, offset=0):
self._base_strategy = base_strategy
self._offset = offset
def _estimate(self, dataset):
if not next(iter(dataset)).shape[2] % 2 == 0:
raise ValueError(
'Resonant motion correction requires an even number of rows')
downsampled_dataset = sima.ImagingDataset(
[sima.Sequence.join(
*it.chain.from_iterable(
(seq[:, :, ::2, :, c], seq[:, :, 1::2, :, c])
for c in range(seq.shape[4])))
for seq in dataset],
None)
downsampled_displacements = self._base_strategy.estimate(
downsampled_dataset)
displacements = []
for d_disps in downsampled_displacements:
disps = np.repeat(d_disps, 2, axis=2) # Repeat the displacements
disps[:, :, :, 0] *= 2 # multiply y-shifts by 2
disps[:, :, 1::2, -1] += self._offset # shift even rows by offset
displacements.append(disps)
return displacements
def _trim_coords(trim_criterion, displacements, raw_shape, untrimmed_shape):
"""The coordinates used to trim the corrected imaging data."""
epsilon = 1e-8
assert len(raw_shape) == 3
assert len(untrimmed_shape) == 3
if trim_criterion is None:
trim_criterion = 1.
if trim_criterion == 0.:
trim_criterion = epsilon
if not isinstance(trim_criterion, (float, int)):
raise TypeError('Invalid type for trim_criterion')
obs_counts = sum(_observation_counts(raw_shape, d, untrimmed_shape)
for d in it.chain.from_iterable(displacements))
num_frames = sum(len(x) for x in displacements)
occupancy = old_div(obs_counts.astype(float), num_frames)
plane_occupancy = old_div(occupancy.sum(axis=2).sum(axis=1), (
raw_shape[1] * raw_shape[2]))
good_planes = plane_occupancy + epsilon > trim_criterion
plane_min = np.nonzero(good_planes)[0].min()
plane_max = np.nonzero(good_planes)[0].max() + 1
row_occupancy = old_div(occupancy.sum(axis=2).sum(axis=0), (
raw_shape[0] * raw_shape[2]))
good_rows = row_occupancy + epsilon > trim_criterion
row_min = np.nonzero(good_rows)[0].min()
row_max = np.nonzero(good_rows)[0].max() + 1
col_occupancy = old_div(occupancy.sum(axis=1).sum(axis=0), np.prod(
raw_shape[:2]))
good_cols = col_occupancy + epsilon > trim_criterion
col_min = np.nonzero(good_cols)[0].min()
col_max = np.nonzero(good_cols)[0].max() + 1
rows = slice(row_min, row_max)
columns = slice(col_min, col_max)
planes = slice(plane_min, plane_max)
return planes, rows, columns
def _observation_counts(raw_shape, displacements, untrimmed_shape):
cnt = np.zeros(untrimmed_shape, dtype=int)
if displacements.ndim == 1:
z, y, x = displacements
cnt[z:(z + raw_shape[0]),
y:(y + raw_shape[1]),
x:(x + raw_shape[2])] = 1
elif displacements.ndim == 2:
for plane in range(raw_shape[0]):
d = list(displacements[plane])
if len(d) == 2:
d = [0] + d
cnt[plane + d[0],
d[1]:(d[1] + raw_shape[1]),
d[2]:(d[2] + raw_shape[2])] += 1
elif displacements.ndim == 3:
if displacements.shape[-1] == 2:
return mc.observation_counts(raw_shape, displacements,
untrimmed_shape)
else:
for plane, p_disp in enumerate(displacements):
for row, r_disp in enumerate(p_disp):
add_with_offset(cnt, np.ones((1, 1, raw_shape[2])),
r_disp + np.array([plane, row, 0]))
else:
raise ValueError
return cnt
|
pkaifosh/sima
|
sima/motion/motion.py
|
Python
|
gpl-2.0
| 10,210 | 0 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
import sys
from Muon.GUI.Common.muon_load_data import MuonLoadData
from Muon.GUI.Common.utilities.load_utils import load_workspace_from_filename
from Muon.GUI.Common.muon_data_context import MuonDataContext
from Muon.GUI.FrequencyDomainAnalysis.frequency_context import FrequencyContext
from mantid.api import AnalysisDataService
import unittest
from Muon.GUI.Common.observer_pattern import Observer
from mantid.api import FileFinder
import copy
if sys.version_info.major < 2:
from unittest import mock
else:
import mock
class MuonDataContextTest(unittest.TestCase):
def setUp(self):
self.loaded_data = MuonLoadData()
self.context = MuonDataContext(self.loaded_data)
self.frequency_context = FrequencyContext(self.context)
self.gui_variable_observer = Observer()
self.gui_variable_observer.update = mock.MagicMock()
self.context.gui_variables_notifier.add_subscriber(self.gui_variable_observer)
self.context.instrument = 'CHRONUS'
self.gui_variable_observer = Observer()
self.gui_variable_observer.update = mock.MagicMock()
self.context.gui_variables_notifier.add_subscriber(self.gui_variable_observer)
filepath = FileFinder.findRuns('CHRONUS00003422.nxs')[0]
load_result, run, filename = load_workspace_from_filename(filepath)
self.loaded_data.add_data(workspace=load_result, run=[run], filename=filename, instrument='CHRONUS')
self.context.current_runs = [[run]]
self.context.update_current_data()
def tearDown(self):
AnalysisDataService.clear()
def test_get_detectors_excluded_from_default_grouping_tables_gets_correct_groups_for_CHRONUS(self):
result = self.frequency_context.get_detectors_excluded_from_default_grouping_tables()
self.assertEqual(result, [256, 425])
if __name__ == '__main__':
unittest.main(buffer=False, verbosity=2)
|
mganeva/mantid
|
scripts/test/Muon/frequency_domain_context_test.py
|
Python
|
gpl-3.0
| 2,194 | 0.002735 |
from distutils.core import setup
setup(name='robotframework-wiremock',
packages=['WireMockLibrary'],
package_dir={'': 'src'},
version='development',
description='Robot framework library for WireMock',
author='Timo Yrjola',
author_email='timo.yrjola@gmail.com',
classifiers=[])
|
tyrjola/robotframework-wiremock
|
resources/scripts/setup.py
|
Python
|
mit
| 320 | 0 |
"""Generated message classes for bigtableadmin version v2.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'bigtableadmin'
class BigtableadminOperationsCancelRequest(_messages.Message):
"""A BigtableadminOperationsCancelRequest object.
Fields:
name: The name of the operation resource to be cancelled.
"""
name = _messages.StringField(1, required=True)
class BigtableadminOperationsDeleteRequest(_messages.Message):
"""A BigtableadminOperationsDeleteRequest object.
Fields:
name: The name of the operation resource to be deleted.
"""
name = _messages.StringField(1, required=True)
class BigtableadminOperationsGetRequest(_messages.Message):
"""A BigtableadminOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class BigtableadminOperationsListRequest(_messages.Message):
"""A BigtableadminOperationsListRequest object.
Fields:
filter: The standard list filter.
name: The name of the operation collection.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class BigtableadminProjectsInstancesClustersCreateRequest(_messages.Message):
"""A BigtableadminProjectsInstancesClustersCreateRequest object.
Fields:
cluster: A Cluster resource to be passed as the request body.
clusterId: The ID to be used when referring to the new cluster within its
instance, e.g., just `mycluster` rather than
`projects/myproject/instances/myinstance/clusters/mycluster`.
parent: The unique name of the instance in which to create the new
cluster. Values are of the form
`projects/<project>/instances/<instance>`.
"""
cluster = _messages.MessageField('Cluster', 1)
clusterId = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class BigtableadminProjectsInstancesClustersDeleteRequest(_messages.Message):
"""A BigtableadminProjectsInstancesClustersDeleteRequest object.
Fields:
name: The unique name of the cluster to be deleted. Values are of the form
`projects/<project>/instances/<instance>/clusters/<cluster>`.
"""
name = _messages.StringField(1, required=True)
class BigtableadminProjectsInstancesClustersGetRequest(_messages.Message):
"""A BigtableadminProjectsInstancesClustersGetRequest object.
Fields:
name: The unique name of the requested cluster. Values are of the form
`projects/<project>/instances/<instance>/clusters/<cluster>`.
"""
name = _messages.StringField(1, required=True)
class BigtableadminProjectsInstancesClustersListRequest(_messages.Message):
"""A BigtableadminProjectsInstancesClustersListRequest object.
Fields:
pageToken: The value of `next_page_token` returned by a previous call.
parent: The unique name of the instance for which a list of clusters is
requested. Values are of the form
`projects/<project>/instances/<instance>`. Use `<instance> = '-'` to
list Clusters for all Instances in a project, e.g.,
`projects/myproject/instances/-`.
"""
pageToken = _messages.StringField(1)
parent = _messages.StringField(2, required=True)
class BigtableadminProjectsInstancesDeleteRequest(_messages.Message):
"""A BigtableadminProjectsInstancesDeleteRequest object.
Fields:
name: The unique name of the instance to be deleted. Values are of the
form `projects/<project>/instances/<instance>`.
"""
name = _messages.StringField(1, required=True)
class BigtableadminProjectsInstancesGetRequest(_messages.Message):
"""A BigtableadminProjectsInstancesGetRequest object.
Fields:
name: The unique name of the requested instance. Values are of the form
`projects/<project>/instances/<instance>`.
"""
name = _messages.StringField(1, required=True)
class BigtableadminProjectsInstancesListRequest(_messages.Message):
"""A BigtableadminProjectsInstancesListRequest object.
Fields:
pageToken: The value of `next_page_token` returned by a previous call.
parent: The unique name of the project for which a list of instances is
requested. Values are of the form `projects/<project>`.
"""
pageToken = _messages.StringField(1)
parent = _messages.StringField(2, required=True)
class BigtableadminProjectsInstancesTablesCreateRequest(_messages.Message):
"""A BigtableadminProjectsInstancesTablesCreateRequest object.
Fields:
createTableRequest: A CreateTableRequest resource to be passed as the
request body.
parent: The unique name of the instance in which to create the table.
Values are of the form `projects/<project>/instances/<instance>`.
"""
createTableRequest = _messages.MessageField('CreateTableRequest', 1)
parent = _messages.StringField(2, required=True)
class BigtableadminProjectsInstancesTablesDeleteRequest(_messages.Message):
"""A BigtableadminProjectsInstancesTablesDeleteRequest object.
Fields:
name: The unique name of the table to be deleted. Values are of the form
`projects/<project>/instances/<instance>/tables/<table>`.
"""
name = _messages.StringField(1, required=True)
class BigtableadminProjectsInstancesTablesDropRowRangeRequest(_messages.Message):
"""A BigtableadminProjectsInstancesTablesDropRowRangeRequest object.
Fields:
dropRowRangeRequest: A DropRowRangeRequest resource to be passed as the
request body.
name: The unique name of the table on which to drop a range of rows.
Values are of the form
`projects/<project>/instances/<instance>/tables/<table>`.
"""
dropRowRangeRequest = _messages.MessageField('DropRowRangeRequest', 1)
name = _messages.StringField(2, required=True)
class BigtableadminProjectsInstancesTablesGetRequest(_messages.Message):
"""A BigtableadminProjectsInstancesTablesGetRequest object.
Enums:
ViewValueValuesEnum: The view to be applied to the returned table's
fields. Defaults to `SCHEMA_ONLY` if unspecified.
Fields:
name: The unique name of the requested table. Values are of the form
`projects/<project>/instances/<instance>/tables/<table>`.
view: The view to be applied to the returned table's fields. Defaults to
`SCHEMA_ONLY` if unspecified.
"""
class ViewValueValuesEnum(_messages.Enum):
"""The view to be applied to the returned table's fields. Defaults to
`SCHEMA_ONLY` if unspecified.
Values:
VIEW_UNSPECIFIED: <no description>
NAME_ONLY: <no description>
SCHEMA_VIEW: <no description>
FULL: <no description>
"""
VIEW_UNSPECIFIED = 0
NAME_ONLY = 1
SCHEMA_VIEW = 2
FULL = 3
name = _messages.StringField(1, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 2)
class BigtableadminProjectsInstancesTablesListRequest(_messages.Message):
"""A BigtableadminProjectsInstancesTablesListRequest object.
Enums:
ViewValueValuesEnum: The view to be applied to the returned tables'
fields. Defaults to `NAME_ONLY` if unspecified; no others are currently
supported.
Fields:
pageToken: The value of `next_page_token` returned by a previous call.
parent: The unique name of the instance for which tables should be listed.
Values are of the form `projects/<project>/instances/<instance>`.
view: The view to be applied to the returned tables' fields. Defaults to
`NAME_ONLY` if unspecified; no others are currently supported.
"""
class ViewValueValuesEnum(_messages.Enum):
"""The view to be applied to the returned tables' fields. Defaults to
`NAME_ONLY` if unspecified; no others are currently supported.
Values:
VIEW_UNSPECIFIED: <no description>
NAME_ONLY: <no description>
SCHEMA_VIEW: <no description>
FULL: <no description>
"""
VIEW_UNSPECIFIED = 0
NAME_ONLY = 1
SCHEMA_VIEW = 2
FULL = 3
pageToken = _messages.StringField(1)
parent = _messages.StringField(2, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 3)
class BigtableadminProjectsInstancesTablesModifyColumnFamiliesRequest(_messages.Message):
"""A BigtableadminProjectsInstancesTablesModifyColumnFamiliesRequest object.
Fields:
modifyColumnFamiliesRequest: A ModifyColumnFamiliesRequest resource to be
passed as the request body.
name: The unique name of the table whose families should be modified.
Values are of the form
`projects/<project>/instances/<instance>/tables/<table>`.
"""
modifyColumnFamiliesRequest = _messages.MessageField('ModifyColumnFamiliesRequest', 1)
name = _messages.StringField(2, required=True)
class Cluster(_messages.Message):
"""A resizable group of nodes in a particular cloud location, capable of
serving all Tables in the parent Instance.
Enums:
DefaultStorageTypeValueValuesEnum: (`CreationOnly`) The type of storage
used by this cluster to serve its parent instance's tables, unless
explicitly overridden.
StateValueValuesEnum: (`OutputOnly`) The current state of the cluster.
Fields:
defaultStorageType: (`CreationOnly`) The type of storage used by this
cluster to serve its parent instance's tables, unless explicitly
overridden.
location: (`CreationOnly`) The location where this cluster's nodes and
storage reside. For best performance, clients should be located as close
as possible to this cluster. Currently only zones are supported, so
values should be of the form `projects/<project>/locations/<zone>`.
name: (`OutputOnly`) The unique name of the cluster. Values are of the
form `projects/<project>/instances/<instance>/clusters/a-z*`.
serveNodes: The number of nodes allocated to this cluster. More nodes
enable higher throughput and more consistent performance.
state: (`OutputOnly`) The current state of the cluster.
"""
class DefaultStorageTypeValueValuesEnum(_messages.Enum):
"""(`CreationOnly`) The type of storage used by this cluster to serve its
parent instance's tables, unless explicitly overridden.
Values:
STORAGE_TYPE_UNSPECIFIED: The user did not specify a storage type.
SSD: Flash (SSD) storage should be used.
HDD: Magnetic drive (HDD) storage should be used.
"""
STORAGE_TYPE_UNSPECIFIED = 0
SSD = 1
HDD = 2
class StateValueValuesEnum(_messages.Enum):
"""(`OutputOnly`) The current state of the cluster.
Values:
STATE_NOT_KNOWN: The state of the cluster could not be determined.
READY: The cluster has been successfully created and is ready to serve
requests.
CREATING: The cluster is currently being created, and may be destroyed
if the creation process encounters an error. A cluster may not be able
to serve requests while being created.
RESIZING: The cluster is currently being resized, and may revert to its
previous node count if the process encounters an error. A cluster is
still capable of serving requests while being resized, but may exhibit
performance as if its number of allocated nodes is between the
starting and requested states.
DISABLED: The cluster has no backing nodes. The data (tables) still
exist, but no operations can be performed on the cluster.
"""
STATE_NOT_KNOWN = 0
READY = 1
CREATING = 2
RESIZING = 3
DISABLED = 4
defaultStorageType = _messages.EnumField('DefaultStorageTypeValueValuesEnum', 1)
location = _messages.StringField(2)
name = _messages.StringField(3)
serveNodes = _messages.IntegerField(4, variant=_messages.Variant.INT32)
state = _messages.EnumField('StateValueValuesEnum', 5)
class ColumnFamily(_messages.Message):
"""A set of columns within a table which share a common configuration.
Fields:
gcRule: Garbage collection rule specified as a protobuf. Must serialize to
at most 500 bytes. NOTE: Garbage collection executes opportunistically
in the background, and so it's possible for reads to return a cell even
if it matches the active GC expression for its family.
"""
gcRule = _messages.MessageField('GcRule', 1)
class CreateInstanceMetadata(_messages.Message):
"""The metadata for the Operation returned by CreateInstance.
Fields:
finishTime: The time at which the operation failed or was completed
successfully.
originalRequest: The request that prompted the initiation of this
CreateInstance operation.
requestTime: The time at which the original request was received.
"""
finishTime = _messages.StringField(1)
originalRequest = _messages.MessageField('CreateInstanceRequest', 2)
requestTime = _messages.StringField(3)
class CreateInstanceRequest(_messages.Message):
"""Request message for BigtableInstanceAdmin.CreateInstance.
Messages:
ClustersValue: The clusters to be created within the instance, mapped by
desired cluster ID, e.g., just `mycluster` rather than
`projects/myproject/instances/myinstance/clusters/mycluster`. Fields
marked `OutputOnly` must be left blank. Currently exactly one cluster
must be specified.
Fields:
clusters: The clusters to be created within the instance, mapped by
desired cluster ID, e.g., just `mycluster` rather than
`projects/myproject/instances/myinstance/clusters/mycluster`. Fields
marked `OutputOnly` must be left blank. Currently exactly one cluster
must be specified.
instance: The instance to create. Fields marked `OutputOnly` must be left
blank.
instanceId: The ID to be used when referring to the new instance within
its project, e.g., just `myinstance` rather than
`projects/myproject/instances/myinstance`.
parent: The unique name of the project in which to create the new
instance. Values are of the form `projects/<project>`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ClustersValue(_messages.Message):
"""The clusters to be created within the instance, mapped by desired
cluster ID, e.g., just `mycluster` rather than
`projects/myproject/instances/myinstance/clusters/mycluster`. Fields
marked `OutputOnly` must be left blank. Currently exactly one cluster must
be specified.
Messages:
AdditionalProperty: An additional property for a ClustersValue object.
Fields:
additionalProperties: Additional properties of type ClustersValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ClustersValue object.
Fields:
key: Name of the additional property.
value: A Cluster attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('Cluster', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
clusters = _messages.MessageField('ClustersValue', 1)
instance = _messages.MessageField('Instance', 2)
instanceId = _messages.StringField(3)
parent = _messages.StringField(4)
class CreateTableRequest(_messages.Message):
"""Request message for
google.bigtable.admin.v2.BigtableTableAdmin.CreateTable
Fields:
initialSplits: The optional list of row keys that will be used to
initially split the table into several tablets (tablets are similar to
HBase regions). Given two split keys, `s1` and `s2`, three tablets will
be created, spanning the key ranges: `[, s1), [s1, s2), [s2, )`.
Example: * Row keys := `["a", "apple", "custom", "customer_1",
"customer_2",` `"other", "zz"]` * initial_split_keys :=
`["apple", "customer_1", "customer_2", "other"]` * Key assignment: -
Tablet 1 `[, apple) => {"a"}.` - Tablet 2 `[apple,
customer_1) => {"apple", "custom"}.` - Tablet 3 `[customer_1,
customer_2) => {"customer_1"}.` - Tablet 4 `[customer_2, other)
=> {"customer_2"}.` - Tablet 5 `[other, ) =>
{"other", "zz"}.`
table: The Table to create.
tableId: The name by which the new table should be referred to within the
parent instance, e.g., `foobar` rather than `<parent>/tables/foobar`.
"""
initialSplits = _messages.MessageField('Split', 1, repeated=True)
table = _messages.MessageField('Table', 2)
tableId = _messages.StringField(3)
class DropRowRangeRequest(_messages.Message):
"""Request message for
google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange
Fields:
deleteAllDataFromTable: Delete all rows in the table. Setting this to
false is a no-op.
rowKeyPrefix: Delete all rows that start with this row key prefix. Prefix
cannot be zero length.
"""
deleteAllDataFromTable = _messages.BooleanField(1)
rowKeyPrefix = _messages.BytesField(2)
class Empty(_messages.Message):
"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class GcRule(_messages.Message):
"""Rule for determining which cells to delete during garbage collection.
Fields:
intersection: Delete cells that would be deleted by every nested rule.
maxAge: Delete cells in a column older than the given age. Values must be
at least one millisecond, and will be truncated to microsecond
granularity.
maxNumVersions: Delete all cells in a column except the most recent N.
union: Delete cells that would be deleted by any nested rule.
"""
intersection = _messages.MessageField('Intersection', 1)
maxAge = _messages.StringField(2)
maxNumVersions = _messages.IntegerField(3, variant=_messages.Variant.INT32)
union = _messages.MessageField('Union', 4)
class Instance(_messages.Message):
"""A collection of Bigtable Tables and the resources that serve them. All
tables in an instance are served from a single Cluster.
Enums:
StateValueValuesEnum: (`OutputOnly`) The current state of the instance.
TypeValueValuesEnum: The type of the instance. Defaults to `PRODUCTION`.
Fields:
displayName: The descriptive name for this instance as it appears in UIs.
Can be changed at any time, but should be kept globally unique to avoid
confusion.
name: (`OutputOnly`) The unique name of the instance. Values are of the
form `projects/<project>/instances/a-z+[a-z0-9]`.
state: (`OutputOnly`) The current state of the instance.
type: The type of the instance. Defaults to `PRODUCTION`.
"""
class StateValueValuesEnum(_messages.Enum):
"""(`OutputOnly`) The current state of the instance.
Values:
STATE_NOT_KNOWN: The state of the instance could not be determined.
READY: The instance has been successfully created and can serve requests
to its tables.
CREATING: The instance is currently being created, and may be destroyed
if the creation process encounters an error.
"""
STATE_NOT_KNOWN = 0
READY = 1
CREATING = 2
class TypeValueValuesEnum(_messages.Enum):
"""The type of the instance. Defaults to `PRODUCTION`.
Values:
TYPE_UNSPECIFIED: The type of the instance is unspecified. If set when
creating an instance, a `PRODUCTION` instance will be created. If set
when updating an instance, the type will be left unchanged.
PRODUCTION: An instance meant for production use. `serve_nodes` must be
set on the cluster.
DEVELOPMENT: The instance is meant for development and testing purposes
only; it has no performance or uptime guarantees and is not covered by
SLA. After a development instance is created, it can be upgraded by
updating the instance to type `PRODUCTION`. An instance created as a
production instance cannot be changed to a development instance. When
creating a development instance, `serve_nodes` on the cluster must not
be set.
"""
TYPE_UNSPECIFIED = 0
PRODUCTION = 1
DEVELOPMENT = 2
displayName = _messages.StringField(1)
name = _messages.StringField(2)
state = _messages.EnumField('StateValueValuesEnum', 3)
type = _messages.EnumField('TypeValueValuesEnum', 4)
class Intersection(_messages.Message):
"""A GcRule which deletes cells matching all of the given rules.
Fields:
rules: Only delete cells which would be deleted by every element of
`rules`.
"""
rules = _messages.MessageField('GcRule', 1, repeated=True)
class ListClustersResponse(_messages.Message):
"""Response message for BigtableInstanceAdmin.ListClusters.
Fields:
clusters: The list of requested clusters.
failedLocations: Locations from which Cluster information could not be
retrieved, due to an outage or some other transient condition. Clusters
from these locations may be missing from `clusters`, or may only have
partial information returned.
nextPageToken: Set if not all clusters could be returned in a single
response. Pass this value to `page_token` in another request to get the
next page of results.
"""
clusters = _messages.MessageField('Cluster', 1, repeated=True)
failedLocations = _messages.StringField(2, repeated=True)
nextPageToken = _messages.StringField(3)
class ListInstancesResponse(_messages.Message):
"""Response message for BigtableInstanceAdmin.ListInstances.
Fields:
failedLocations: Locations from which Instance information could not be
retrieved, due to an outage or some other transient condition. Instances
whose Clusters are all in one of the failed locations may be missing
from `instances`, and Instances with at least one Cluster in a failed
location may only have partial information returned.
instances: The list of requested instances.
nextPageToken: Set if not all instances could be returned in a single
response. Pass this value to `page_token` in another request to get the
next page of results.
"""
failedLocations = _messages.StringField(1, repeated=True)
instances = _messages.MessageField('Instance', 2, repeated=True)
nextPageToken = _messages.StringField(3)
class ListOperationsResponse(_messages.Message):
"""The response message for Operations.ListOperations.
Fields:
nextPageToken: The standard List next-page token.
operations: A list of operations that matches the specified filter in the
request.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class ListTablesResponse(_messages.Message):
"""Response message for
google.bigtable.admin.v2.BigtableTableAdmin.ListTables
Fields:
nextPageToken: Set if not all tables could be returned in a single
response. Pass this value to `page_token` in another request to get the
next page of results.
tables: The tables present in the requested instance.
"""
nextPageToken = _messages.StringField(1)
tables = _messages.MessageField('Table', 2, repeated=True)
class Modification(_messages.Message):
"""A create, update, or delete of a particular column family.
Fields:
create: Create a new column family with the specified schema, or fail if
one already exists with the given ID.
drop: Drop (delete) the column family with the given ID, or fail if no
such family exists.
id: The ID of the column family to be modified.
update: Update an existing column family to the specified schema, or fail
if no column family exists with the given ID.
"""
create = _messages.MessageField('ColumnFamily', 1)
drop = _messages.BooleanField(2)
id = _messages.StringField(3)
update = _messages.MessageField('ColumnFamily', 4)
class ModifyColumnFamiliesRequest(_messages.Message):
"""Request message for
google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies
Fields:
modifications: Modifications to be atomically applied to the specified
table's families. Entries are applied in order, meaning that earlier
modifications can be masked by later ones (in the case of repeated
updates to the same family, for example).
"""
modifications = _messages.MessageField('Modification', 1, repeated=True)
class Operation(_messages.Message):
"""This resource represents a long-running operation that is the result of a
network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation.
It typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success.
If the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If true, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should have the format of `operations/some/unique/name`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class Split(_messages.Message):
"""An initial split point for a newly created table.
Fields:
key: Row key to use as an initial tablet boundary.
"""
key = _messages.BytesField(1)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class Status(_messages.Message):
"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). The error model is designed to be:
- Simple to use and understand for most users - Flexible enough to meet
unexpected needs # Overview The `Status` message contains three pieces of
data: error code, error message, and error details. The error code should be
an enum value of google.rpc.Code, but it may accept additional error codes
if needed. The error message should be a developer-facing English message
that helps developers *understand* and *resolve* the error. If a localized
user-facing error message is needed, put the localized message in the error
details or localize it in the client. The optional error details may contain
arbitrary information about the error. There is a predefined set of error
detail types in the package `google.rpc` which can be used for common error
conditions. # Language mapping The `Status` message is the logical
representation of the error model, but it is not necessarily the actual wire
format. When the `Status` message is exposed in different client libraries
and different wire protocols, it can be mapped differently. For example, it
will likely be mapped to some exceptions in Java, but more likely mapped to
some error codes in C. # Other uses The error model and the `Status`
message can be used in a variety of environments, either with or without
APIs, to provide a consistent developer experience across different
environments. Example uses of this error model include: - Partial errors.
If a service needs to return partial errors to the client, it may embed
the `Status` in the normal response to indicate the partial errors. -
Workflow errors. A typical workflow has multiple steps. Each step may
have a `Status` message for error reporting purpose. - Batch operations. If
a client uses batch request and batch response, the `Status` message
should be used directly inside batch response, one for each error sub-
response. - Asynchronous operations. If an API call embeds asynchronous
operation results in its response, the status of those operations should
be represented directly using the `Status` message. - Logging. If some
API errors are stored in logs, the message `Status` could be used
directly after any stripping needed for security/privacy reasons.
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There will be a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class Table(_messages.Message):
"""A collection of user data indexed by row, column, and timestamp. Each
table is served using the resources of its parent cluster.
Enums:
GranularityValueValuesEnum: (`CreationOnly`) The granularity (e.g.
`MILLIS`, `MICROS`) at which timestamps are stored in this table.
Timestamps not matching the granularity will be rejected. If unspecified
at creation time, the value will be set to `MILLIS`. Views:
`SCHEMA_VIEW`, `FULL`
Messages:
ColumnFamiliesValue: (`CreationOnly`) The column families configured for
this table, mapped by column family ID. Views: `SCHEMA_VIEW`, `FULL`
Fields:
columnFamilies: (`CreationOnly`) The column families configured for this
table, mapped by column family ID. Views: `SCHEMA_VIEW`, `FULL`
granularity: (`CreationOnly`) The granularity (e.g. `MILLIS`, `MICROS`) at
which timestamps are stored in this table. Timestamps not matching the
granularity will be rejected. If unspecified at creation time, the value
will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`
name: (`OutputOnly`) The unique name of the table. Values are of the form
`projects/<project>/instances/<instance>/tables/_a-zA-Z0-9*`. Views:
`NAME_ONLY`, `SCHEMA_VIEW`, `FULL`
"""
class GranularityValueValuesEnum(_messages.Enum):
"""(`CreationOnly`) The granularity (e.g. `MILLIS`, `MICROS`) at which
timestamps are stored in this table. Timestamps not matching the
granularity will be rejected. If unspecified at creation time, the value
will be set to `MILLIS`. Views: `SCHEMA_VIEW`, `FULL`
Values:
TIMESTAMP_GRANULARITY_UNSPECIFIED: The user did not specify a
granularity. Should not be returned. When specified during table
creation, MILLIS will be used.
MILLIS: The table keeps data versioned at a granularity of 1ms.
"""
TIMESTAMP_GRANULARITY_UNSPECIFIED = 0
MILLIS = 1
@encoding.MapUnrecognizedFields('additionalProperties')
class ColumnFamiliesValue(_messages.Message):
"""(`CreationOnly`) The column families configured for this table, mapped
by column family ID. Views: `SCHEMA_VIEW`, `FULL`
Messages:
AdditionalProperty: An additional property for a ColumnFamiliesValue
object.
Fields:
additionalProperties: Additional properties of type ColumnFamiliesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ColumnFamiliesValue object.
Fields:
key: Name of the additional property.
value: A ColumnFamily attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('ColumnFamily', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
columnFamilies = _messages.MessageField('ColumnFamiliesValue', 1)
granularity = _messages.EnumField('GranularityValueValuesEnum', 2)
name = _messages.StringField(3)
class Union(_messages.Message):
"""A GcRule which deletes cells matching any of the given rules.
Fields:
rules: Delete cells which would be deleted by any element of `rules`.
"""
rules = _messages.MessageField('GcRule', 1, repeated=True)
class UpdateClusterMetadata(_messages.Message):
"""The metadata for the Operation returned by UpdateCluster.
Fields:
finishTime: The time at which the operation failed or was completed
successfully.
originalRequest: The request that prompted the initiation of this
UpdateCluster operation.
requestTime: The time at which the original request was received.
"""
finishTime = _messages.StringField(1)
originalRequest = _messages.MessageField('Cluster', 2)
requestTime = _messages.StringField(3)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv',
package=u'bigtableadmin')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1',
package=u'bigtableadmin')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2',
package=u'bigtableadmin')
|
Sorsly/subtle
|
google-cloud-sdk/lib/googlecloudsdk/third_party/apis/bigtableadmin/v2/bigtableadmin_v2_messages.py
|
Python
|
mit
| 40,478 | 0.004595 |
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1006230013.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
biomodels/MODEL1006230013
|
MODEL1006230013/model.py
|
Python
|
cc0-1.0
| 427 | 0.009368 |
from django.apps import AppConfig
class TagsConfig(AppConfig):
name = "hav.apps.tags"
|
whav/hav
|
src/hav/apps/tags/apps.py
|
Python
|
gpl-3.0
| 92 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# MODIFIED FROM ORIGINAL VERSION
#
# This file is not the same as in pypi. It includes a pull request to fix py3
# incompabilities that never ended up getting merged.
###############################################################################
import os
from ctypes import CDLL, c_char_p, c_int, c_void_p, c_uint, c_double, byref, Structure, get_errno,\
POINTER, c_short, c_size_t, create_string_buffer
from ctypes.util import find_library
from psistats.libsensors.lib import stdc
version_info = (0, 0, 3)
__version__ = '.'.join(map(str, version_info))
__date__ = '2014-08-17'
__author__ = "Marc 'BlackJack' Rintsch"
__contact__ = 'marc@rintsch.de'
__license__ = 'LGPL v2.1'
API_VERSION = 4
DEFAULT_CONFIG_FILENAME = '/etc/sensors3.conf'
LIB_FILENAME = os.environ.get('SENSORS_LIB') or find_library('sensors')
SENSORS_LIB = CDLL(LIB_FILENAME)
VERSION = c_char_p.in_dll(SENSORS_LIB, 'libsensors_version').value
MAJOR_VERSION = version_info[0]
class SensorsError(Exception):
def __init__(self, message, error_number=None):
Exception.__init__(self, message)
self.error_number = error_number
def _error_check(result, _func, _arguments):
if result < 0:
raise SensorsError(_strerror(result), result)
return result
_strerror = SENSORS_LIB.sensors_strerror
_strerror.argtypes = [c_int]
_strerror.restype = c_char_p
_init = SENSORS_LIB.sensors_init
_init.argtypes = [c_void_p]
_init.restype = c_int
_init.errcheck = _error_check
cleanup = SENSORS_LIB.sensors_cleanup
cleanup.argtypes = None
cleanup.restype = None
SENSORS_FEATURE_IN = 0x00
SENSORS_FEATURE_FAN = 0x01
SENSORS_FEATURE_TEMP = 0x02
SENSORS_FEATURE_POWER = 0x03
SENSORS_FEATURE_ENERGY = 0x04
SENSORS_FEATURE_CURR = 0x05
SENSORS_FEATURE_HUMIDITY = 0x06
# SENSORS_FEATURE_MAX_MAIN
SENSORS_FEATURE_VID = 0x10
SENSORS_FEATURE_INTRUSION = 0x11
#SENSORS_FEATURE_MAX_OTHER,
SENSORS_FEATURE_BEEP_ENABLE = 0x18
#SENSORS_FEATURE_MAX,
#SENSORS_FEATURE_UNKNOWN = INT_MAX
def init(config_filename=DEFAULT_CONFIG_FILENAME):
file_p = stdc.fopen(config_filename.encode('utf-8'), b'r')
if file_p is None:
error_number = get_errno()
raise OSError(error_number, os.strerror(error_number), config_filename)
try:
_init(file_p)
finally:
stdc.fclose(file_p)
class Subfeature(Structure):
_fields_ = [
('name', c_char_p),
('number', c_int),
('type', c_int),
('mapping', c_int),
('flags', c_uint),
]
def __repr__(self):
return '<%s name=%r number=%d type=%d mapping=%d flags=%08x>' % (
self.__class__.__name__,
self.name,
self.number,
self.type,
self.mapping,
self.flags
)
def get_value(self):
result = c_double()
_get_value(byref(self.parent.chip), self.number, byref(result))
return result.value
SUBFEATURE_P = POINTER(Subfeature)
class Feature(Structure):
_fields_ = [
('name', c_char_p),
('number', c_int),
('type', c_int),
('_first_subfeature', c_int),
('_padding1', c_int),
]
def __repr__(self):
return '<%s name=%r number=%r type=%r>' % (
self.__class__.__name__,
self.name,
self.number,
self.type
)
def __iter__(self):
number = c_int(0)
while True:
result_p = _get_all_subfeatures(
byref(self.chip),
byref(self),
byref(number)
)
if not result_p:
break
result = result_p.contents
result.chip = self.chip
result.parent = self
yield result
@property
def label(self):
#
# TODO Maybe this is a memory leak!
#
return _get_label(byref(self.chip), byref(self)).decode('utf-8')
def get_value(self):
#
# TODO Is the first always the correct one for all feature types?
#
return next(iter(self)).get_value()
FEATURE_P = POINTER(Feature)
class Bus(Structure):
TYPE_ANY = -1
NR_ANY = -1
_fields_ = [
('type', c_short),
('nr', c_short),
]
def __str__(self):
return (
'*' if self.type == self.TYPE_ANY
else _get_adapter_name(byref(self)).decode('utf-8')
)
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.type, self.nr)
@property
def has_wildcards(self):
return self.type == self.TYPE_ANY or self.nr == self.NR_ANY
BUS_P = POINTER(Bus)
class Chip(Structure):
#
# TODO Move common stuff into `AbstractChip` class.
#
_fields_ = [
('prefix', c_char_p),
('bus', Bus),
('addr', c_int),
('path', c_char_p),
]
PREFIX_ANY = None
ADDR_ANY = -1
def __new__(cls, *args):
result = super(Chip, cls).__new__(cls)
if args:
_parse_chip_name(args[0].encode('utf-8'), byref(result))
return result
def __init__(self, *_args):
Structure.__init__(self)
#
# Need to bind the following to the instance so it is available in
# `__del__()` when the interpreter shuts down.
#
self._free_chip_name = _free_chip_name
self.byref = byref
def __del__(self):
if self._b_needsfree_:
self._free_chip_name(self.byref(self))
def __repr__(self):
return '<%s prefix=%r bus=%r addr=%r path=%r>' % (
(
self.__class__.__name__,
self.prefix,
self.bus,
self.addr,
self.path
)
)
def __str__(self):
buffer_size = 200
result = create_string_buffer(buffer_size)
used = _snprintf_chip_name(result, len(result), byref(self))
assert used < buffer_size
return result.value.decode('utf-8')
def __iter__(self):
number = c_int(0)
while True:
result_p = _get_features(byref(self), byref(number))
if not result_p:
break
result = result_p.contents
result.chip = self
yield result
@property
def adapter_name(self):
return str(self.bus)
@property
def has_wildcards(self):
return (
self.prefix == self.PREFIX_ANY
or self.addr == self.ADDR_ANY
or self.bus.has_wildcards
)
CHIP_P = POINTER(Chip)
_parse_chip_name = SENSORS_LIB.sensors_parse_chip_name
_parse_chip_name.argtypes = [c_char_p, CHIP_P]
_parse_chip_name.restype = c_int
_parse_chip_name.errcheck = _error_check
_free_chip_name = SENSORS_LIB.sensors_free_chip_name
_free_chip_name.argtypes = [CHIP_P]
_free_chip_name.restype = None
_snprintf_chip_name = SENSORS_LIB.sensors_snprintf_chip_name
_snprintf_chip_name.argtypes = [c_char_p, c_size_t, CHIP_P]
_snprintf_chip_name.restype = c_int
_snprintf_chip_name.errcheck = _error_check
_get_adapter_name = SENSORS_LIB.sensors_get_adapter_name
_get_adapter_name.argtypes = [BUS_P]
_get_adapter_name.restype = c_char_p
_get_label = SENSORS_LIB.sensors_get_label
_get_label.argtypes = [CHIP_P, FEATURE_P]
_get_label.restype = c_char_p
_get_value = SENSORS_LIB.sensors_get_value
_get_value.argtypes = [CHIP_P, c_int, POINTER(c_double)]
_get_value.restype = c_int
_get_value.errcheck = _error_check
#
# TODO sensors_set_value()
# TODO sensors_do_chip_sets()
#
_get_detected_chips = SENSORS_LIB.sensors_get_detected_chips
_get_detected_chips.argtypes = [CHIP_P, POINTER(c_int)]
_get_detected_chips.restype = CHIP_P
_get_features = SENSORS_LIB.sensors_get_features
_get_features.argtypes = [CHIP_P, POINTER(c_int)]
_get_features.restype = FEATURE_P
_get_all_subfeatures = SENSORS_LIB.sensors_get_all_subfeatures
_get_all_subfeatures.argtypes = [CHIP_P, FEATURE_P, POINTER(c_int)]
_get_all_subfeatures.restype = SUBFEATURE_P
#
# TODO sensors_get_subfeature() ?
#
def iter_detected_chips(chip_name='*-*'):
chip = Chip(chip_name)
number = c_int(0)
while True:
result = _get_detected_chips(byref(chip), byref(number))
if not result:
break
yield result.contents
|
psistats/linux-client
|
psistats/libsensors/lib/sensors.py
|
Python
|
mit
| 8,525 | 0.002346 |
from __future__ import absolute_import, print_function, division
from pony.py23compat import PY2, basestring, unicode, buffer, int_types
import os, re, json
from decimal import Decimal, InvalidOperation
from datetime import datetime, date, time, timedelta
from uuid import uuid4, UUID
import pony
from pony.utils import is_utf8, decorator, throw, localbase, deprecated
from pony.converting import str2date, str2time, str2datetime, str2timedelta
from pony.orm.ormtypes import LongStr, LongUnicode, RawSQLType, TrackedValue, Json
class DBException(Exception):
def __init__(exc, original_exc, *args):
args = args or getattr(original_exc, 'args', ())
Exception.__init__(exc, *args)
exc.original_exc = original_exc
# Exception inheritance layout of DBAPI 2.0-compatible provider:
#
# Exception
# Warning
# Error
# InterfaceError
# DatabaseError
# DataError
# OperationalError
# IntegrityError
# InternalError
# ProgrammingError
# NotSupportedError
class Warning(DBException): pass
class Error(DBException): pass
class InterfaceError(Error): pass
class DatabaseError(Error): pass
class DataError(DatabaseError): pass
class OperationalError(DatabaseError): pass
class IntegrityError(DatabaseError): pass
class InternalError(DatabaseError): pass
class ProgrammingError(DatabaseError): pass
class NotSupportedError(DatabaseError): pass
@decorator
def wrap_dbapi_exceptions(func, provider, *args, **kwargs):
dbapi_module = provider.dbapi_module
try: return func(provider, *args, **kwargs)
except dbapi_module.NotSupportedError as e: raise NotSupportedError(e)
except dbapi_module.ProgrammingError as e: raise ProgrammingError(e)
except dbapi_module.InternalError as e: raise InternalError(e)
except dbapi_module.IntegrityError as e: raise IntegrityError(e)
except dbapi_module.OperationalError as e: raise OperationalError(e)
except dbapi_module.DataError as e: raise DataError(e)
except dbapi_module.DatabaseError as e: raise DatabaseError(e)
except dbapi_module.InterfaceError as e:
if e.args == (0, '') and getattr(dbapi_module, '__name__', None) == 'MySQLdb':
throw(InterfaceError, e, 'MySQL server misconfiguration')
raise InterfaceError(e)
except dbapi_module.Error as e: raise Error(e)
except dbapi_module.Warning as e: raise Warning(e)
def unexpected_args(attr, args):
throw(TypeError,
'Unexpected positional argument%s for attribute %s: %r'
% ((args > 1 and 's' or ''), attr, ', '.join(repr(arg) for arg in args)))
version_re = re.compile('[0-9\.]+')
def get_version_tuple(s):
m = version_re.match(s)
if m is not None:
components = m.group(0).split('.')
return tuple(int(component) for component in components)
return None
class DBAPIProvider(object):
paramstyle = 'qmark'
quote_char = '"'
max_params_count = 200
max_name_len = 128
table_if_not_exists_syntax = True
index_if_not_exists_syntax = True
max_time_precision = default_time_precision = 6
uint64_support = False
select_for_update_nowait_syntax = True
# SQLite and PostgreSQL does not limit varchar max length.
varchar_default_max_len = None
dialect = None
dbapi_module = None
dbschema_cls = None
translator_cls = None
sqlbuilder_cls = None
name_before_table = 'schema_name'
default_schema_name = None
fk_types = { 'SERIAL' : 'INTEGER', 'BIGSERIAL' : 'BIGINT' }
def __init__(provider, *args, **kwargs):
pool_mockup = kwargs.pop('pony_pool_mockup', None)
if pool_mockup: provider.pool = pool_mockup
else: provider.pool = provider.get_pool(*args, **kwargs)
connection = provider.connect()
provider.inspect_connection(connection)
provider.release(connection)
@wrap_dbapi_exceptions
def inspect_connection(provider, connection):
pass
def normalize_name(provider, name):
return name[:provider.max_name_len]
def get_default_entity_table_name(provider, entity):
return provider.normalize_name(entity.__name__)
def get_default_m2m_table_name(provider, attr, reverse):
if attr.symmetric:
assert reverse is attr
name = attr.entity.__name__ + '_' + attr.name
else:
name = attr.entity.__name__ + '_' + reverse.entity.__name__
return provider.normalize_name(name)
def get_default_column_names(provider, attr, reverse_pk_columns=None):
normalize = provider.normalize_name
if reverse_pk_columns is None:
return [ normalize(attr.name) ]
elif len(reverse_pk_columns) == 1:
return [ normalize(attr.name) ]
else:
prefix = attr.name + '_'
return [ normalize(prefix + column) for column in reverse_pk_columns ]
def get_default_m2m_column_names(provider, entity):
normalize = provider.normalize_name
columns = entity._get_pk_columns_()
if len(columns) == 1:
return [ normalize(entity.__name__.lower()) ]
else:
prefix = entity.__name__.lower() + '_'
return [ normalize(prefix + column) for column in columns ]
def get_default_index_name(provider, table_name, column_names, is_pk=False, is_unique=False, m2m=False):
if is_pk: index_name = 'pk_%s' % table_name
else:
if is_unique: template = 'unq_%(tname)s__%(cnames)s'
elif m2m: template = 'idx_%(tname)s'
else: template = 'idx_%(tname)s__%(cnames)s'
index_name = template % dict(tname=table_name,
cnames='_'.join(name for name in column_names))
return provider.normalize_name(index_name.lower())
def get_default_fk_name(provider, child_table_name, parent_table_name, child_column_names):
fk_name = 'fk_%s__%s' % (child_table_name, '__'.join(child_column_names))
return provider.normalize_name(fk_name.lower())
def split_table_name(provider, table_name):
if isinstance(table_name, basestring): return provider.default_schema_name, table_name
if not table_name: throw(TypeError, 'Invalid table name: %r' % table_name)
if len(table_name) != 2:
size = len(table_name)
throw(TypeError, '%s qualified table name must have two components: '
'%s and table_name. Got %d component%s: %s'
% (provider.dialect, provider.name_before_table,
size, 's' if size != 1 else '', table_name))
return table_name[0], table_name[1]
def quote_name(provider, name):
quote_char = provider.quote_char
if isinstance(name, basestring):
name = name.replace(quote_char, quote_char+quote_char)
return quote_char + name + quote_char
return '.'.join(provider.quote_name(item) for item in name)
def normalize_vars(provider, vars, vartypes):
pass
def ast2sql(provider, ast):
builder = provider.sqlbuilder_cls(provider, ast)
return builder.sql, builder.adapter
def should_reconnect(provider, exc):
return False
@wrap_dbapi_exceptions
def connect(provider):
return provider.pool.connect()
@wrap_dbapi_exceptions
def set_transaction_mode(provider, connection, cache):
pass
@wrap_dbapi_exceptions
def commit(provider, connection, cache=None):
core = pony.orm.core
if core.debug: core.log_orm('COMMIT')
connection.commit()
if cache is not None: cache.in_transaction = False
@wrap_dbapi_exceptions
def rollback(provider, connection, cache=None):
core = pony.orm.core
if core.debug: core.log_orm('ROLLBACK')
connection.rollback()
if cache is not None: cache.in_transaction = False
@wrap_dbapi_exceptions
def release(provider, connection, cache=None):
core = pony.orm.core
if cache is not None and cache.db_session is not None and cache.db_session.ddl:
provider.drop(connection, cache)
else:
if core.debug: core.log_orm('RELEASE CONNECTION')
provider.pool.release(connection)
@wrap_dbapi_exceptions
def drop(provider, connection, cache=None):
core = pony.orm.core
if core.debug: core.log_orm('CLOSE CONNECTION')
provider.pool.drop(connection)
if cache is not None: cache.in_transaction = False
@wrap_dbapi_exceptions
def disconnect(provider):
core = pony.orm.core
if core.debug: core.log_orm('DISCONNECT')
provider.pool.disconnect()
@wrap_dbapi_exceptions
def execute(provider, cursor, sql, arguments=None, returning_id=False):
if type(arguments) is list:
assert arguments and not returning_id
cursor.executemany(sql, arguments)
else:
if arguments is None: cursor.execute(sql)
else: cursor.execute(sql, arguments)
if returning_id: return cursor.lastrowid
converter_classes = []
def _get_converter_type_by_py_type(provider, py_type):
if isinstance(py_type, type):
for t, converter_cls in provider.converter_classes:
if issubclass(py_type, t): return converter_cls
if isinstance(py_type, RawSQLType):
return Converter # for cases like select(raw_sql(...) for x in X)
throw(TypeError, 'No database converter found for type %s' % py_type)
def get_converter_by_py_type(provider, py_type):
converter_cls = provider._get_converter_type_by_py_type(py_type)
return converter_cls(provider, py_type)
def get_converter_by_attr(provider, attr):
py_type = attr.py_type
converter_cls = provider._get_converter_type_by_py_type(py_type)
return converter_cls(provider, py_type, attr)
def get_pool(provider, *args, **kwargs):
return Pool(provider.dbapi_module, *args, **kwargs)
def table_exists(provider, connection, table_name, case_sensitive=True):
throw(NotImplementedError)
def index_exists(provider, connection, table_name, index_name, case_sensitive=True):
throw(NotImplementedError)
def fk_exists(provider, connection, table_name, fk_name, case_sensitive=True):
throw(NotImplementedError)
def table_has_data(provider, connection, table_name):
table_name = provider.quote_name(table_name)
cursor = connection.cursor()
cursor.execute('SELECT 1 FROM %s LIMIT 1' % table_name)
return cursor.fetchone() is not None
def disable_fk_checks(provider, connection):
pass
def enable_fk_checks(provider, connection, prev_state):
pass
def drop_table(provider, connection, table_name):
table_name = provider.quote_name(table_name)
cursor = connection.cursor()
sql = 'DROP TABLE %s' % table_name
cursor.execute(sql)
class Pool(localbase):
forked_connections = []
def __init__(pool, dbapi_module, *args, **kwargs): # called separately in each thread
pool.dbapi_module = dbapi_module
pool.args = args
pool.kwargs = kwargs
pool.con = pool.pid = None
def connect(pool):
pid = os.getpid()
if pool.con is not None and pool.pid != pid:
pool.forked_connections.append((pool.con, pool.pid))
pool.con = pool.pid = None
core = pony.orm.core
if pool.con is None:
if core.debug: core.log_orm('GET NEW CONNECTION')
pool._connect()
pool.pid = pid
elif core.debug: core.log_orm('GET CONNECTION FROM THE LOCAL POOL')
return pool.con
def _connect(pool):
pool.con = pool.dbapi_module.connect(*pool.args, **pool.kwargs)
def release(pool, con):
assert con is pool.con
try: con.rollback()
except:
pool.drop(con)
raise
def drop(pool, con):
assert con is pool.con, (con, pool.con)
pool.con = None
con.close()
def disconnect(pool):
con = pool.con
pool.con = None
if con is not None: con.close()
class Converter(object):
EQ = 'EQ'
NE = 'NE'
optimistic = True
def __deepcopy__(converter, memo):
return converter # Converter instances are "immutable"
def __init__(converter, provider, py_type, attr=None):
converter.provider = provider
converter.py_type = py_type
converter.attr = attr
if attr is None: return
kwargs = attr.kwargs.copy()
converter.init(kwargs)
for option in kwargs: throw(TypeError, 'Attribute %s has unknown option %r' % (attr, option))
def init(converter, kwargs):
attr = converter.attr
if attr and attr.args: unexpected_args(attr, attr.args)
def validate(converter, val):
return val
def py2sql(converter, val):
return val
def sql2py(converter, val):
return val
def val2dbval(self, val, obj=None):
return val
def dbval2val(self, dbval, obj=None):
return dbval
def dbvals_equal(self, x, y):
return x == y
def get_sql_type(converter, attr=None):
if attr is not None and attr.sql_type is not None:
return attr.sql_type
attr = converter.attr
if attr.sql_type is not None:
assert len(attr.columns) == 1
return converter.get_fk_type(attr.sql_type)
if attr is not None and attr.reverse and not attr.is_collection:
i = attr.converters.index(converter)
rentity = attr.reverse.entity
rpk_converters = rentity._pk_converters_
assert rpk_converters is not None and len(attr.converters) == len(rpk_converters)
rconverter = rpk_converters[i]
return rconverter.sql_type()
return converter.sql_type()
def get_fk_type(converter, sql_type):
fk_types = converter.provider.fk_types
if sql_type.isupper(): return fk_types.get(sql_type, sql_type)
sql_type = sql_type.upper()
return fk_types.get(sql_type, sql_type).lower()
class NoneConverter(Converter): # used for raw_sql() parameters only
def __init__(converter, provider, py_type, attr=None):
if attr is not None: throw(TypeError, 'Attribute %s has invalid type NoneType' % attr)
Converter.__init__(converter, provider, py_type)
def get_sql_type(converter, attr=None):
assert False
def get_fk_type(converter, sql_type):
assert False
class BoolConverter(Converter):
def validate(converter, val):
return bool(val)
def sql2py(converter, val):
return bool(val)
def sql_type(converter):
return "BOOLEAN"
class StrConverter(Converter):
def __init__(converter, provider, py_type, attr=None):
converter.max_len = None
converter.db_encoding = None
Converter.__init__(converter, provider, py_type, attr)
def init(converter, kwargs):
attr = converter.attr
if not attr.args: max_len = None
elif len(attr.args) > 1: unexpected_args(attr, attr.args[1:])
else: max_len = attr.args[0]
if issubclass(attr.py_type, (LongStr, LongUnicode)):
if max_len is not None: throw(TypeError, 'Max length is not supported for CLOBs')
elif max_len is None: max_len = converter.provider.varchar_default_max_len
elif not isinstance(max_len, int_types):
throw(TypeError, 'Max length argument must be int. Got: %r' % max_len)
converter.max_len = max_len
converter.db_encoding = kwargs.pop('db_encoding', None)
converter.autostrip = kwargs.pop('autostrip', True)
def validate(converter, val):
if PY2 and isinstance(val, str): val = val.decode('ascii')
elif not isinstance(val, unicode): throw(TypeError,
'Value type for attribute %s must be %s. Got: %r' % (converter.attr, unicode.__name__, type(val)))
if converter.autostrip: val = val.strip()
max_len = converter.max_len
val_len = len(val)
if max_len and val_len > max_len:
throw(ValueError, 'Value for attribute %s is too long. Max length is %d, value length is %d'
% (converter.attr, max_len, val_len))
return val
def sql_type(converter):
if converter.max_len:
return 'VARCHAR(%d)' % converter.max_len
return 'TEXT'
class IntConverter(Converter):
signed_types = {None: 'INTEGER', 8: 'TINYINT', 16: 'SMALLINT', 24: 'MEDIUMINT', 32: 'INTEGER', 64: 'BIGINT'}
unsigned_types = None
def init(converter, kwargs):
Converter.init(converter, kwargs)
attr = converter.attr
min_val = kwargs.pop('min', None)
if min_val is not None and not isinstance(min_val, int_types):
throw(TypeError, "'min' argument for attribute %s must be int. Got: %r" % (attr, min_val))
max_val = kwargs.pop('max', None)
if max_val is not None and not isinstance(max_val, int_types):
throw(TypeError, "'max' argument for attribute %s must be int. Got: %r" % (attr, max_val))
size = kwargs.pop('size', None)
if size is None:
if attr.py_type.__name__ == 'long':
deprecated(9, "Attribute %s: 'long' attribute type is deprecated. "
"Please use 'int' type with size=64 option instead" % attr)
attr.py_type = int
size = 64
elif attr.py_type.__name__ == 'long': throw(TypeError,
"Attribute %s: 'size' option cannot be used with long type. Please use int type instead" % attr)
elif not isinstance(size, int_types):
throw(TypeError, "'size' option for attribute %s must be of int type. Got: %r" % (attr, size))
elif size not in (8, 16, 24, 32, 64):
throw(TypeError, "incorrect value of 'size' option for attribute %s. "
"Should be 8, 16, 24, 32 or 64. Got: %d" % (attr, size))
unsigned = kwargs.pop('unsigned', False)
if unsigned is not None and not isinstance(unsigned, bool):
throw(TypeError, "'unsigned' option for attribute %s must be of bool type. Got: %r" % (attr, unsigned))
if size == 64 and unsigned and not converter.provider.uint64_support: throw(TypeError,
'Attribute %s: %s provider does not support unsigned bigint type' % (attr, converter.provider.dialect))
if unsigned is not None and size is None: size = 32
lowest = highest = None
if size:
highest = highest = 2 ** size - 1 if unsigned else 2 ** (size - 1) - 1
lowest = 0 if unsigned else -(2 ** (size - 1))
if highest is not None and max_val is not None and max_val > highest:
throw(ValueError, "'max' argument should be less or equal to %d because of size=%d and unsigned=%s. "
"Got: %d" % (highest, size, max_val, unsigned))
if lowest is not None and min_val is not None and min_val < lowest:
throw(ValueError, "'min' argument should be greater or equal to %d because of size=%d and unsigned=%s. "
"Got: %d" % (lowest, size, min_val, unsigned))
converter.min_val = min_val or lowest
converter.max_val = max_val or highest
converter.size = size
converter.unsigned = unsigned
def validate(converter, val):
if isinstance(val, int_types): pass
elif isinstance(val, basestring):
try: val = int(val)
except ValueError: throw(ValueError,
'Value type for attribute %s must be int. Got string %r' % (converter.attr, val))
else: throw(TypeError, 'Value type for attribute %s must be int. Got: %r' % (converter.attr, type(val)))
if converter.min_val and val < converter.min_val:
throw(ValueError, 'Value %r of attr %s is less than the minimum allowed value %r'
% (val, converter.attr, converter.min_val))
if converter.max_val and val > converter.max_val:
throw(ValueError, 'Value %r of attr %s is greater than the maximum allowed value %r'
% (val, converter.attr, converter.max_val))
return val
def sql2py(converter, val):
return int(val)
def sql_type(converter):
if not converter.unsigned:
return converter.signed_types.get(converter.size)
if converter.unsigned_types is None:
return converter.signed_types.get(converter.size) + ' UNSIGNED'
return converter.unsigned_types.get(converter.size)
class RealConverter(Converter):
# The tolerance is necessary for Oracle, because it has different representation of float numbers.
# For other databases the default tolerance is set because the precision can be lost during
# Python -> JavaScript -> Python conversion
default_tolerance = 1e-14
def init(converter, kwargs):
Converter.init(converter, kwargs)
min_val = kwargs.pop('min', None)
if min_val is not None:
try: min_val = float(min_val)
except ValueError:
throw(TypeError, "Invalid value for 'min' argument for attribute %s: %r" % (converter.attr, min_val))
max_val = kwargs.pop('max', None)
if max_val is not None:
try: max_val = float(max_val)
except ValueError:
throw(TypeError, "Invalid value for 'max' argument for attribute %s: %r" % (converter.attr, max_val))
converter.min_val = min_val
converter.max_val = max_val
converter.tolerance = kwargs.pop('tolerance', converter.default_tolerance)
def validate(converter, val):
try: val = float(val)
except ValueError:
throw(TypeError, 'Invalid value for attribute %s: %r' % (converter.attr, val))
if converter.min_val and val < converter.min_val:
throw(ValueError, 'Value %r of attr %s is less than the minimum allowed value %r'
% (val, converter.attr, converter.min_val))
if converter.max_val and val > converter.max_val:
throw(ValueError, 'Value %r of attr %s is greater than the maximum allowed value %r'
% (val, converter.attr, converter.max_val))
return val
def dbvals_equal(converter, x, y):
tolerance = converter.tolerance
if tolerance is None or x is None or y is None: return x == y
denominator = max(abs(x), abs(y))
if not denominator: return True
diff = abs(x-y) / denominator
return diff <= tolerance
def sql2py(converter, val):
return float(val)
def sql_type(converter):
return 'REAL'
class DecimalConverter(Converter):
def __init__(converter, provider, py_type, attr=None):
converter.exp = None # for the case when attr is None
Converter.__init__(converter, provider, py_type, attr)
def init(converter, kwargs):
attr = converter.attr
args = attr.args
if len(args) > 2: throw(TypeError, 'Too many positional parameters for Decimal '
'(expected: precision and scale), got: %s' % args)
if args: precision = args[0]
else: precision = kwargs.pop('precision', 12)
if not isinstance(precision, int_types):
throw(TypeError, "'precision' positional argument for attribute %s must be int. Got: %r" % (attr, precision))
if precision <= 0: throw(TypeError,
"'precision' positional argument for attribute %s must be positive. Got: %r" % (attr, precision))
if len(args) == 2: scale = args[1]
else: scale = kwargs.pop('scale', 2)
if not isinstance(scale, int_types):
throw(TypeError, "'scale' positional argument for attribute %s must be int. Got: %r" % (attr, scale))
if scale <= 0: throw(TypeError,
"'scale' positional argument for attribute %s must be positive. Got: %r" % (attr, scale))
if scale > precision: throw(ValueError, "'scale' must be less or equal 'precision'")
converter.precision = precision
converter.scale = scale
converter.exp = Decimal(10) ** -scale
min_val = kwargs.pop('min', None)
if min_val is not None:
try: min_val = Decimal(min_val)
except TypeError: throw(TypeError,
"Invalid value for 'min' argument for attribute %s: %r" % (attr, min_val))
max_val = kwargs.pop('max', None)
if max_val is not None:
try: max_val = Decimal(max_val)
except TypeError: throw(TypeError,
"Invalid value for 'max' argument for attribute %s: %r" % (attr, max_val))
converter.min_val = min_val
converter.max_val = max_val
def validate(converter, val):
if isinstance(val, float):
s = str(val)
if float(s) != val: s = repr(val)
val = Decimal(s)
try: val = Decimal(val)
except InvalidOperation as exc:
throw(TypeError, 'Invalid value for attribute %s: %r' % (converter.attr, val))
if converter.min_val is not None and val < converter.min_val:
throw(ValueError, 'Value %r of attr %s is less than the minimum allowed value %r'
% (val, converter.attr, converter.min_val))
if converter.max_val is not None and val > converter.max_val:
throw(ValueError, 'Value %r of attr %s is greater than the maximum allowed value %r'
% (val, converter.attr, converter.max_val))
return val
def sql2py(converter, val):
return Decimal(val)
def sql_type(converter):
return 'DECIMAL(%d, %d)' % (converter.precision, converter.scale)
class BlobConverter(Converter):
def validate(converter, val):
if isinstance(val, buffer): return val
if isinstance(val, str): return buffer(val)
throw(TypeError, "Attribute %r: expected type is 'buffer'. Got: %r" % (converter.attr, type(val)))
def sql2py(converter, val):
if not isinstance(val, buffer): val = buffer(val)
return val
def sql_type(converter):
return 'BLOB'
class DateConverter(Converter):
def validate(converter, val):
if isinstance(val, datetime): return val.date()
if isinstance(val, date): return val
if isinstance(val, basestring): return str2date(val)
throw(TypeError, "Attribute %r: expected type is 'date'. Got: %r" % (converter.attr, val))
def sql2py(converter, val):
if not isinstance(val, date): throw(ValueError,
'Value of unexpected type received from database: instead of date got %s' % type(val))
return val
def sql_type(converter):
return 'DATE'
class ConverterWithMicroseconds(Converter):
def __init__(converter, provider, py_type, attr=None):
converter.precision = None # for the case when attr is None
Converter.__init__(converter, provider, py_type, attr)
def init(converter, kwargs):
attr = converter.attr
args = attr.args
if len(args) > 1: throw(TypeError, 'Too many positional parameters for attribute %s. '
'Expected: precision, got: %r' % (attr, args))
provider = attr.entity._database_.provider
if args:
precision = args[0]
if 'precision' in kwargs: throw(TypeError,
'Precision for attribute %s has both positional and keyword value' % attr)
else: precision = kwargs.pop('precision', provider.default_time_precision)
if not isinstance(precision, int) or not 0 <= precision <= 6: throw(ValueError,
'Precision value of attribute %s must be between 0 and 6. Got: %r' % (attr, precision))
if precision > provider.max_time_precision: throw(ValueError,
'Precision value (%d) of attribute %s exceeds max datetime precision (%d) of %s %s'
% (precision, attr, provider.max_time_precision, provider.dialect, provider.server_version))
converter.precision = precision
def round_microseconds_to_precision(converter, microseconds, precision):
# returns None if no change is required
if not precision: result = 0
elif precision < 6:
rounding = 10 ** (6-precision)
result = (microseconds // rounding) * rounding
else: return None
return result if result != microseconds else None
def sql_type(converter):
attr = converter.attr
precision = converter.precision
if not attr or precision == attr.entity._database_.provider.default_time_precision:
return converter.sql_type_name
return converter.sql_type_name + '(%d)' % precision
class TimeConverter(ConverterWithMicroseconds):
sql_type_name = 'TIME'
def validate(converter, val):
if isinstance(val, time): pass
elif isinstance(val, basestring): val = str2time(val)
else: throw(TypeError, "Attribute %r: expected type is 'time'. Got: %r" % (converter.attr, val))
mcs = converter.round_microseconds_to_precision(val.microsecond, converter.precision)
if mcs is not None: val = val.replace(microsecond=mcs)
return val
def sql2py(converter, val):
if not isinstance(val, time): throw(ValueError,
'Value of unexpected type received from database: instead of time got %s' % type(val))
return val
class TimedeltaConverter(ConverterWithMicroseconds):
sql_type_name = 'INTERVAL'
def validate(converter, val):
if isinstance(val, timedelta): pass
elif isinstance(val, basestring): val = str2timedelta(val)
else: throw(TypeError, "Attribute %r: expected type is 'timedelta'. Got: %r" % (converter.attr, val))
mcs = converter.round_microseconds_to_precision(val.microseconds, converter.precision)
if mcs is not None: val = timedelta(val.days, val.seconds, mcs)
return val
def sql2py(converter, val):
if not isinstance(val, timedelta): throw(ValueError,
'Value of unexpected type received from database: instead of time got %s' % type(val))
return val
class DatetimeConverter(ConverterWithMicroseconds):
sql_type_name = 'DATETIME'
def validate(converter, val):
if isinstance(val, datetime): pass
elif isinstance(val, basestring): val = str2datetime(val)
else: throw(TypeError, "Attribute %r: expected type is 'datetime'. Got: %r" % (converter.attr, val))
mcs = converter.round_microseconds_to_precision(val.microsecond, converter.precision)
if mcs is not None: val = val.replace(microsecond=mcs)
return val
def sql2py(converter, val):
if not isinstance(val, datetime): throw(ValueError,
'Value of unexpected type received from database: instead of datetime got %s' % type(val))
return val
class UuidConverter(Converter):
def __init__(converter, provider, py_type, attr=None):
if attr is not None and attr.auto:
attr.auto = False
if not attr.default: attr.default = uuid4
Converter.__init__(converter, provider, py_type, attr)
def validate(converter, val):
if isinstance(val, UUID): return val
if isinstance(val, buffer): return UUID(bytes=val)
if isinstance(val, basestring):
if len(val) == 16: return UUID(bytes=val)
return UUID(hex=val)
if isinstance(val, int): return UUID(int=val)
if converter.attr is not None:
throw(ValueError, 'Value type of attribute %s must be UUID. Got: %r'
% (converter.attr, type(val)))
else: throw(ValueError, 'Expected UUID value, got: %r' % type(val))
def py2sql(converter, val):
return buffer(val.bytes)
sql2py = validate
def sql_type(converter):
return "UUID"
class JsonConverter(Converter):
json_kwargs = {}
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Json):
return obj.wrapped
return json.JSONEncoder.default(self, obj)
def val2dbval(self, val, obj=None):
return json.dumps(val, cls=self.JsonEncoder, **self.json_kwargs)
def dbval2val(self, dbval, obj=None):
if isinstance(dbval, (int, bool, float, type(None))):
return dbval
val = json.loads(dbval)
if obj is None:
return val
return TrackedValue.make(obj, self.attr, val)
def dbvals_equal(self, x, y):
if isinstance(x, basestring): x = json.loads(x)
if isinstance(y, basestring): y = json.loads(y)
return x == y
def sql_type(self):
return "JSON"
|
Ahmad31/Web_Flask_Cassandra
|
flask/lib/python2.7/site-packages/pony/orm/dbapiprovider.py
|
Python
|
apache-2.0
| 33,878 | 0.009947 |
#!/usr/bin/python3
# Copyright (C) 2015 Bitquant Research Laboratories (Asia) Limited
# Released under the Simplified BSD License
import my_path
import time
import zmq.green as zmq
import pprint
import algobroker
import msgpack
class Dispatcher(algobroker.Broker):
def __init__(self):
algobroker.Broker.__init__(self, "dispatcher")
# send work
self.sms_sender = self.socket(zmq.PUSH)
self.sms_sender.connect(algobroker.ports['data']['broker_plivo'])
self.bitmex_sender = self.socket(zmq.PUSH)
self.bitmex_sender.connect(algobroker.ports['data']['broker_bitmex'])
self.web_sender = self.socket(zmq.PUSH)
self.web_sender.connect(algobroker.ports['data']['broker_web'])
def process_data(self, data):
if (data['cmd'] == "log"):
self.warning(pprint.pformat(data))
elif (data['cmd'] == 'alert' and
data['type'] == 'sms'):
self.debug("sending sms")
self.debug(pprint.pformat(data))
self.sms_sender.send(msgpack.packb(data))
elif (data['cmd'] == 'alert' and
data['type'] == 'web'):
self.debug("sending web")
self.debug(pprint.pformat(data))
self.web_sender.send(msgpack.packb(data))
elif (data.get('broker', None) == 'bitmex'):
self.debug("sending bitmex")
self.debug(pprint.pformat(data))
self.bitmex_sender.send(msgpack.packb(data))
else:
self.error("unknown action")
if __name__ == "__main__":
dispatcher = Dispatcher()
dispatcher.run()
|
joequant/algobroker
|
algobroker/dispatcher.py
|
Python
|
bsd-2-clause
| 1,618 | 0.000618 |
# -*- coding: utf-8 -*-
import re
import json
import traceback
import sys
import time
import datetime
import random
# 这段代码是用于解决中文报错的问题
reload(sys)
sys.setdefaultencoding("utf8")
from datetime import date
from scrapy.selector import Selector
from dateutil.relativedelta import relativedelta
if __name__ == '__main__':
import sys
sys.path.append('../..')
sys.path.append('../../..')
sys.path.append('../../../..')
from base_crawler import BaseCrawler
from crawler.china_telecom_tool import login_unity
else:
from worker.crawler.base_crawler import BaseCrawler
from worker.crawler.china_telecom_tool import login_unity
class Crawler(BaseCrawler):
"""
kwargs 包含
'tel': str,
'pin_pwd': str,
'id_card': str,
'full_name': unicode,
'sms_code': str,
'captcha_code': str
錯誤等級
0: 成功
1: 帳號密碼錯誤
2: 認證碼錯誤
9: 其他錯誤
"""
def __init__(self, **kwargs):
"""
初始化
"""
super(Crawler, self).__init__(**kwargs)
self.pin_pwd_error_times = 0
self.info_res = ''
def need_parameters(self, **kwargs):
return ['pin_pwd']
def get_verify_type(self, **kwargs):
return 'SMS'
def login(self, **kwargs):
ProvinceID = '07'
code, key = login_unity(self, ProvinceID, **kwargs)
if code != 0:
return code, key
cookie_url = 'http://nm.189.cn/selfservice/service/userLogin'
cookie_data = {
"number" : kwargs['tel'],
"intLoginType":"4",
"areaCode":"0471",
"isBusinessCustType":"N",
"identifyType":"B",
"userLoginType":"4",
"password":"",
"randomPass":"",
"noCheck":"N",
"isSSOLogin":"Y",
"sRand":"SSOLogin"
}
code, key, resp = self.post(cookie_url, data=json.dumps(cookie_data))
if code != 0:
return code, key
personal_info_url = 'http://www.189.cn/dqmh/userCenter/userInfo.do?method=editUserInfo_new&fastcode=10000557&cityCode=nm'
for retry in xrange(self.max_retry):
code, key, tel_info_res = self.get(personal_info_url)
if code != 0:
return code, key
if u'真实姓名' in tel_info_res.text:
self.info_res = tel_info_res.text
return 0, "success"
else:
pass
else:
self.log('crawler', "request_error", tel_info_res)
return 9, "website_busy_error"
def send_verify_request(self, **kwargs):
"""
請求發送短信,或是下載圖片,或是同時發送請求
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
image_str: str, Captcha圖片的base64字串, SMS則回空
"""
send_sms_url = "http://nm.189.cn/selfservice/bill/xdQuerySMS"
send_sms_data = {
"phone": kwargs['tel']
}
code, key, resp = self.post(send_sms_url, data=json.dumps(send_sms_data))
if code != 0:
return code, key, ""
if resp.text:
try:
resp_json_response = resp.json()
except:
error = traceback.format_exc()
self.log('crawler', "Not json file : {}, resp:{}".format(error, resp.history), resp)
return 9, 'website_busy_error', ''
if resp_json_response.get('flag', '') == '0':
return 0, "success", ""
elif resp_json_response.get('flag', '') == '2':
self.log('crawler', "send_sms_error", resp)
return 9, "send_sms_error", ''
else:
self.log('crawler', "unknown_error", resp)
return 9, "unknown_error", ''
else:
self.log('crawler', "send_sms_error", resp)
return 9, "send_sms_error", ''
def verify(self, **kwargs):
"""
執行二次驗證
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
"""
check_sms_url = "http://nm.189.cn/selfservice/bill/xdQuerySMSCheck"
check_sms_data = {
'code': kwargs['sms_code']
}
code, key, resp = self.post(check_sms_url, data=json.dumps(check_sms_data))
if code != 0:
return code, key
try:
resp_json_response = resp.json()
except:
error = traceback.format_exc()
self.log('crawler', "json_error : %s" % error, resp)
return 9, 'json_error'
if resp_json_response.get('flag', '') == '0':
self.log('crawler', "verify_error", resp)
return 2, "verify_error"
# 如果直接返回详单数据按成功处理。
elif resp_json_response.get('flag', '') == '1' or 'resultNum' in resp.text or 'items' in resp.text:
return 0, "success"
else:
self.log('crawler', "unknown_error", resp)
return 9, "unknown_error"
def crawl_info(self, **kwargs):
"""
爬取帳戶資訊
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
info: dict, 帳戶信息,參考帳戶信息格式
"""
user_info = {}
selector = Selector(text=self.info_res)
try:
full_name = selector.xpath('//input[@name="realName"]/@value').extract()
user_info['full_name'] = full_name[0] if full_name else ''
id_card = selector.xpath('//input[@name="certificateNumber"]/@value').extract()
user_info['id_card'] = id_card[0] if id_card else ''
address = re.findall(u'id="address".*?;">(.*?)</textarea>', self.info_res)
user_info['address'] = address[0] if address else ''
user_info['open_date'] = ""
user_info['is_realname_register'] = True
except:
error = traceback.format_exc()
self.log('crawler', "html_error : %s" % error, '')
return 9, "html_error", {}
return 0, "success", user_info
def random_sleep(self, tm, modulus=3):
time.sleep(random.uniform(tm / modulus / 1.5, 1.5 * tm / modulus))
def crawl_call_log(self, **kwargs):
"""
爬取詳單
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
call_log: list, 通信詳單,參考詳單格式
"""
call_log = []
crawl_num = 0
call_log_url = "http://nm.189.cn/selfservice/bill/xdQuery"
today = date.today()
missing_list = []
pos_missing = []
search_month = [x for x in range(0, -6, -1)]
for each_month in search_month:
query_date = today + relativedelta(months=each_month)
search_month = "%d%02d" % (query_date.year, query_date.month)
call_log_data = {
"billingCycle": "{}{}".format(query_date.year, str(query_date.month).zfill(2)),
'accNbr': kwargs['tel'],
'accNbrType': '4',
'areaCode': '0478',
'pageNo': -1,
'pageRecords': -1,
'prodSpecId': '378',
'qtype': '0',
'isYWlQuery': 'N',
}
header = {
'Referer': 'http://nm.189.cn/selfservice/bill/xd',
'Host': 'nm.189.cn',
'Content-Type': 'application/json'
}
start_time = time.time()
end_time = start_time + 10
aid_time_dict = dict()
retry_times = self.max_retry
log_for_retry = []
while 1:
log_for_retry.append((1, retry_times))
retry_times -= 1
code, key, resp = self.post(call_log_url, data=json.dumps(call_log_data), headers=header)
if code:
missing_flag = True
elif 'POR-2102' in resp.text:
# 无查询结果,这个月没有数据
missing_flag = False
else:
flag = True
break
now_time = time.time()
if retry_times >= 0:
aid_time_dict.update({retry_times: time.time()})
elif now_time < end_time:
loop_time = aid_time_dict.get(0, time.time())
left_time = end_time - loop_time
self.random_sleep(left_time)
else:
flag = False
if missing_flag:
missing_list.append(search_month)
else:
pos_missing.append(search_month)
break
self.log('crawler', '{}重试记录{}'.format(search_month, log_for_retry), '')
if not flag:
continue
# for retry in range(self.max_retry):
# code, key, resp = self.post(call_log_url, data=json.dumps(call_log_data), headers=header)
# if code != 0:
# missing_flag = True
# # 无查询结果 , 这个月没有数据
# elif 'POR-2102' in resp.text:
# missing_flag = False
# else:
# break
# else:
# if missing_flag:
# missing_list.append(search_month)
# else:
# self.log('crawler', '未查询到您的详单信息', resp)
# pos_missing.append(search_month)
# continue
try:
resp_json_response = resp.json()
except:
error = traceback.format_exc()
self.log('crawler', 'html_error : %s' % error, resp)
missing_list.append(search_month)
continue
if resp_json_response.get('resultCode', '') == 'POR-0000':
status_key, status_level, message, log_data = self.call_log_get(resp.text, search_month)
if status_level != 0:
crawl_num += 1
self.log('crawler', message, resp)
missing_list.append(search_month)
continue
else:
call_log.extend(log_data)
else:
self.log('crawler', 'html_error', resp)
missing_list.append(search_month)
if crawl_num > 0:
return 9, 'crawl_error', call_log, missing_list, pos_missing
if len(missing_list+pos_missing) == 6:
return 9, 'website_busy_error', call_log, missing_list, pos_missing
return 0, "success", call_log, missing_list, pos_missing
def call_log_get(self, response, search_month):
"""
| `update_time` | string | 更新时间戳 |
| `call_cost` | string | 爬取费用 |
| `call_time` | string | 通话起始时间 |
| `call_method` | string | 呼叫类型(主叫, 被叫) |
| `call_type` | string | 通话类型(本地, 长途) |
| `call_from` | string | 本机通话地 |
| `call_to` | string | 对方归属地 |
| `call_duration` | string | 通话时长 |
"""
try:
json_logs = json.loads(response)
except:
error = traceback.format_exc()
return 'json_error', 9, 'json_error %s' % error, []
if json_logs.get('resultCode', '') == 'POR-0000':
records = []
for item in json_logs.get('items', []):
data = {}
try:
data['month'] = search_month
data['call_cost'] = item.get('fee', '')
# 以下几行 转换成时间戳
temp = '{} {}'.format(item.get('converseDate', ''), item.get('converseTime', ''))
call_time = re.findall('\d{2}', temp)
call_time_change = call_time[0] + call_time[1] + '-' + call_time[2] + '-' + call_time[3] + ' ' + \
call_time[4] + ':' + call_time[5] + ':' + call_time[6]
timeArray = time.strptime(call_time_change, "%Y-%m-%d %H:%M:%S")
call_time_timeStamp = str(int(time.mktime(timeArray)))
data['call_time'] = call_time_timeStamp
data['call_method'] = item.get('callType', '')
data['call_type'] = item.get('converseType', '')
# data['call_from'] = item.get('converseAddr', '')
raw_call_from = item.get('converseAddr', '').strip()
call_from, error = self.formatarea(raw_call_from)
if call_from:
data['call_from'] = call_from
else:
# self.log("crawler", "{} {}".format(error, raw_call_from), "")
data['call_from'] = raw_call_from
data['call_to'] = item.get('callArea', '')
data['call_tel'] = item.get('callingNbr', '')
# 以下几行转换成秒
durations = item.get('converseDuration', '').split("'")
duration = int(durations[0]) * 3600 + int(durations[1]) * 60 + int(durations[2])
data['call_duration'] = str(duration)
records.append(data)
except:
error = traceback.format_exc()
return 'html_error', 9, 'html_error %s' % error, []
return 'success', 0, 'success', records
else:
return 'html_error', 9, 'html_error', []
def crawl_phone_bill(self, **kwargs):
phone_bill = list()
missing_list = []
month_bill_url = 'http://nm.189.cn/selfservice/bill/khzdQuery'
header = {
'Referer': 'http://nm.189.cn/selfservice/bill/khzd-mini?fastcode=10000542&cityCode=nm',
'Host': 'nm.189.cn',
'Content-Type': 'application/json'
}
for month in self.__monthly_period(6, '%Y%m'):
post_data = {
'accNbr': kwargs['tel'],
'accNbrType': '4',
'areaCode': '0478',
'billingCycle': month,
'prodSpecId': '378',
'prodSpecName': '',
'smsCode': '',
}
for retry in xrange(self.max_retry):
code, key, resp = self.post(month_bill_url, headers=header, data=json.dumps(post_data))
if code != 0:
continue
else:
break
else:
missing_list.append(month)
continue
key, level, message, result = self.phone_bill_get(month, resp)
if level != 0 or result['bill_amount'] == '' or result['bill_amount'] == '0.00':
missing_list.append(month)
continue
phone_bill.append(result)
if len(missing_list) == 6:
return 9, 'website_busy_error', phone_bill, missing_list
today = date.today()
today_month = "%d%02d" % (today.year, today.month)
if today_month in missing_list:
missing_list.remove(today_month)
return 0, 'success', phone_bill, missing_list
def phone_bill_get(self, month, resp):
month_bill = {
'bill_month': month,
'bill_amount': '',
'bill_package': '',
'bill_ext_calls': '',
'bill_ext_data': '',
'bill_ext_sms': '',
'bill_zengzhifei': '',
'bill_daishoufei': '',
'bill_qita': ''
}
try:
bill = json.loads(resp.text)
except:
error = traceback.format_exc()
self.log('crawler', 'html_error'+error, resp)
return 'html_error', 9, 'html_error'+error, {}
if bill['resultSet'] == None:
self.log('website', 'website_busy_error', resp)
return 'website_busy_error', 9, 'website_busy_error', {}
bill_amounts = re.findall(u'费用合计:([\d.]+)元', resp.text)
if bill_amounts:
month_bill['bill_amount'] = bill_amounts[0]
bill_package = re.findall(u"套餐费</span><span class='pricebills'>([\d.]+)</span>", resp.text)
if bill_package:
month_bill['bill_package'] = bill_package[0]
return 'success', 0, 'website_busy_error', month_bill
def __monthly_period(self, length=6, strf='%Y%m'):
current_time = datetime.datetime.now()
for month_offset in range(0, length):
yield (current_time - relativedelta(months=month_offset + 1)).strftime(strf)
if __name__ == '__main__':
c = Crawler()
USER_ID = "15335686893"
# USER_ID = "15335686896"
USER_PASSWORD = "135126"
USER_FULL_NAME = "薛胜英"
USER_ID_CARD = "152801198002090347"
c.self_test(tel=USER_ID,pin_pwd=USER_PASSWORD)
|
Svolcano/python_exercise
|
dianhua/worker/crawler/china_telecom/neimenggu/main.py
|
Python
|
mit
| 17,659 | 0.003293 |
# -*- coding: utf-8 -*-
import sys
import os
from os.path import dirname
# Set the directory for using the modules in the same project such as eshop.
PROJECT_PATH = dirname(os.path.abspath(os.path.dirname(__file__)))
ESHOP_PATH = os.path.join(PROJECT_PATH, 'eshop/')
sys.path.append(PROJECT_PATH)
|
mikuyves/fashion-finder
|
flickr/settings.py
|
Python
|
gpl-3.0
| 298 | 0 |
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
list_servers = ['openload',
'okru',
'netutv',
'rapidvideo'
]
list_quality = ['default']
host = "http://www.anitoonstv.com"
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Anime", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="lista", title="Series Animadas", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="lista", title="Pokemon", url=host,
thumbnail=thumb_series))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
if 'Novedades' in item.title:
patron_cat = '<div class="activos"><h3>(.+?)<\/h2><\/a><\/div>'
patron = '<a href="(.+?)"><h2><span>(.+?)<\/span>'
else:
patron_cat = '<li><a href=.+?>'
patron_cat += str(item.title)
patron_cat += '<\/a><div>(.+?)<\/div><\/li>'
patron = "<a href='(.+?)'>(.+?)<\/a>"
data = scrapertools.find_single_match(data, patron_cat)
matches = scrapertools.find_multiple_matches(data, patron)
for link, name in matches:
if "Novedades" in item.title:
url = link
title = name.capitalize()
else:
url = host + link
title = name
if ":" in title:
cad = title.split(":")
show = cad[0]
else:
if "(" in title:
cad = title.split("(")
if "Super" in title:
show = cad[1]
show = show.replace(")", "")
else:
show = cad[0]
else:
show = title
if "&" in show:
cad = title.split("xy")
show = cad[0]
context1=[renumbertools.context(item), autoplay.context]
itemlist.append(
item.clone(title=title, url=url, plot=show, action="episodios", show=show,
context=context1))
tmdb.set_infoLabels(itemlist)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
patron = '<div class="pagina">(.+?)<\/div><div id="fade".+?>'
data = scrapertools.find_single_match(data, patron)
patron_caps = "<a href='(.+?)'>Capitulo: (.+?) - (.+?)<\/a>"
matches = scrapertools.find_multiple_matches(data, patron_caps)
show = scrapertools.find_single_match(data, '<span>Titulo.+?<\/span>(.+?)<br><span>')
scrapedthumbnail = scrapertools.find_single_match(data, "<img src='(.+?)'.+?>")
scrapedplot = scrapertools.find_single_match(data, '<span>Descripcion.+?<\/span>(.+?)<br>')
i = 0
temp = 0
for link, cap, name in matches:
if int(cap) == 1:
temp = temp + 1
if int(cap) < 10:
cap = "0" + cap
season = temp
episode = int(cap)
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.show, season, episode)
date = name
title = "%sx%s %s (%s)" % (season, str(episode).zfill(2), "Episodio %s" % episode, date)
# title = str(temp)+"x"+cap+" "+name
url = host + "/" + link
if "NO DISPONIBLE" not in name:
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, thumbnail=scrapedthumbnail,
plot=scrapedplot, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data1 = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
data_vid = scrapertools.find_single_match(data1, '<div class="videos">(.+?)<\/div><div .+?>')
# name = scrapertools.find_single_match(data,'<span>Titulo.+?<\/span>([^<]+)<br>')
scrapedplot = scrapertools.find_single_match(data, '<br><span>Descrip.+?<\/span>([^<]+)<br>')
scrapedthumbnail = scrapertools.find_single_match(data, '<div class="caracteristicas"><img src="([^<]+)">')
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
for server, quality, url in itemla:
if "Calidad Alta" in quality:
quality = quality.replace("Calidad Alta", "HQ")
server = server.lower().strip()
if "ok" == server:
server = 'okru'
if "netu" == server:
continue
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
thumbnail=scrapedthumbnail, plot=scrapedplot,
title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality)))
autoplay.start(itemlist, item)
return itemlist
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail))
return itemlist
|
pitunti/alfaPitunti
|
plugin.video.alfa/channels/anitoonstv.py
|
Python
|
gpl-3.0
| 6,699 | 0.005526 |
from django.core.exceptions import ValidationError
from django.db import IntegrityError, models, transaction
from django.test import SimpleTestCase, TestCase
from .models import BooleanModel, FksToBooleans, NullBooleanModel
class BooleanFieldTests(TestCase):
def _test_get_prep_value(self, f):
self.assertEqual(f.get_prep_value(True), True)
self.assertEqual(f.get_prep_value('1'), True)
self.assertEqual(f.get_prep_value(1), True)
self.assertEqual(f.get_prep_value(False), False)
self.assertEqual(f.get_prep_value('0'), False)
self.assertEqual(f.get_prep_value(0), False)
self.assertEqual(f.get_prep_value(None), None)
def _test_to_python(self, f):
self.assertIs(f.to_python(1), True)
self.assertIs(f.to_python(0), False)
def test_booleanfield_get_prep_value(self):
self._test_get_prep_value(models.BooleanField())
def test_nullbooleanfield_get_prep_value(self):
self._test_get_prep_value(models.NullBooleanField())
def test_booleanfield_to_python(self):
self._test_to_python(models.BooleanField())
def test_nullbooleanfield_to_python(self):
self._test_to_python(models.NullBooleanField())
def test_booleanfield_choices_blank(self):
"""
BooleanField with choices and defaults doesn't generate a formfield
with the blank option (#9640, #10549).
"""
choices = [(1, 'Si'), (2, 'No')]
f = models.BooleanField(choices=choices, default=1, null=False)
self.assertEqual(f.formfield().choices, choices)
def test_return_type(self):
b = BooleanModel.objects.create(bfield=True)
b.refresh_from_db()
self.assertEqual(b.bfield, True)
b2 = BooleanModel.objects.create(bfield=False)
b2.refresh_from_db()
self.assertEqual(b2.bfield, False)
b3 = NullBooleanModel.objects.create(nbfield=True)
b3.refresh_from_db()
self.assertEqual(b3.nbfield, True)
b4 = NullBooleanModel.objects.create(nbfield=False)
b4.refresh_from_db()
self.assertEqual(b4.nbfield, False)
# When an extra clause exists, the boolean conversions are applied with
# an offset (#13293).
b5 = BooleanModel.objects.all().extra(select={'string_col': 'string'})[0]
self.assertNotIsInstance(b5.pk, bool)
def test_select_related(self):
"""
Boolean fields retrieved via select_related() should return booleans.
"""
bmt = BooleanModel.objects.create(bfield=True)
bmf = BooleanModel.objects.create(bfield=False)
nbmt = NullBooleanModel.objects.create(nbfield=True)
nbmf = NullBooleanModel.objects.create(nbfield=False)
m1 = FksToBooleans.objects.create(bf=bmt, nbf=nbmt)
m2 = FksToBooleans.objects.create(bf=bmf, nbf=nbmf)
# select_related('fk_field_name')
ma = FksToBooleans.objects.select_related('bf').get(pk=m1.id)
self.assertEqual(ma.bf.bfield, True)
self.assertEqual(ma.nbf.nbfield, True)
# select_related()
mb = FksToBooleans.objects.select_related().get(pk=m1.id)
mc = FksToBooleans.objects.select_related().get(pk=m2.id)
self.assertEqual(mb.bf.bfield, True)
self.assertEqual(mb.nbf.nbfield, True)
self.assertEqual(mc.bf.bfield, False)
self.assertEqual(mc.nbf.nbfield, False)
def test_null_default(self):
"""
A BooleanField defaults to None, which isn't a valid value (#15124).
"""
boolean_field = BooleanModel._meta.get_field('bfield')
self.assertFalse(boolean_field.has_default())
b = BooleanModel()
self.assertIsNone(b.bfield)
with transaction.atomic():
with self.assertRaises(IntegrityError):
b.save()
nb = NullBooleanModel()
self.assertIsNone(nb.nbfield)
nb.save() # no error
class ValidationTest(SimpleTestCase):
def test_boolean_field_doesnt_accept_empty_input(self):
f = models.BooleanField()
with self.assertRaises(ValidationError):
f.clean(None, None)
def test_nullbooleanfield_blank(self):
"""
NullBooleanField shouldn't throw a validation error when given a value
of None.
"""
nullboolean = NullBooleanModel(nbfield=None)
nullboolean.full_clean()
|
tbeadle/django
|
tests/model_fields/test_booleanfield.py
|
Python
|
bsd-3-clause
| 4,416 | 0.000226 |
# -*- coding: utf-8 -*-
"""Defines mixing class.
You can use it for inherit from Class Base Views, it was
developed by Timothée Peignier https://gist.github.com/cyberdelia/1231560
"""
from django.contrib.auth.decorators import login_required
from django.utils.cache import patch_response_headers
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page, never_cache
from django.views.decorators.csrf import csrf_exempt
class NeverCacheMixin(object):
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super(NeverCacheMixin, self).dispatch(*args, **kwargs)
class LoginRequiredMixin(object):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class CSRFExemptMixin(object):
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(CSRFExemptMixin, self).dispatch(*args, **kwargs)
class CacheMixin(object):
cache_timeout = 60
def get_cache_timeout(self):
return self.cache_timeout
def dispatch(self, *args, **kwargs):
return cache_page(self.get_cache_timeout())(super(CacheMixin, self).dispatch)(*args, **kwargs)
class CacheControlMixin(object):
cache_timeout = 60
def get_cache_timeout(self):
return self.cache_timeout
def dispatch(self, *args, **kwargs):
response = super(CacheControlMixin, self).dispatch(*args, **kwargs)
patch_response_headers(response, self.get_cache_timeout())
return response
class JitterCacheMixin(CacheControlMixin):
cache_range = [40, 80]
def get_cache_range(self):
return self.cache_range
def get_cache_timeout(self):
return random.randint(*self.get_cache_range())
|
dairdr/voteapp
|
voteapp/apps/vote/mixing.py
|
Python
|
mit
| 1,724 | 0.020894 |
# Copyright (c) 2014 RainMachine, Green Electronics LLC
# All rights reserved.
# Authors: Nicu Pavel <npavel@mini-box.com>
# Codrin Juravle <codrin.juravle@mini-box.com>
from datetime import datetime, timedelta, tzinfo
from math import sin, cos, asin, acos, sqrt
import time, calendar
import ctypes,os, fcntl, errno
from RMUtilsFramework.rmLogging import log
ZERO = timedelta(0)
Y2K38_MAX_YEAR = 2037
Y2K38_MAX_TIMESTAMP = 2147483647
# For monotonic time
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long)
]
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
utc_t0 = datetime(1970, 1, 1, tzinfo=utc)
def rmYMDToTimestamp(year, month, day):
if year > Y2K38_MAX_YEAR: #Y2K38
year = Y2K38_MAX_YEAR
try:
return int(datetime(year, month, day).strftime("%s"))
except ValueError:
return int(time.mktime(datetime(year, month, day).timetuple())) # Windows platform doesn't have strftime(%s)
def rmYMDFromTimestamp(timestamp):
if timestamp > Y2K38_MAX_TIMESTAMP: #Y2K38
timestamp = Y2K38_MAX_TIMESTAMP
d = datetime.fromtimestamp(timestamp)
return d.year, d.month, d.day
def rmTimestampToDate(timestamp):
if timestamp > Y2K38_MAX_TIMESTAMP: #Y2K38
timestamp = Y2K38_MAX_TIMESTAMP
return datetime.fromtimestamp(timestamp)
def rmTimestampToDateAsString(timestamp, format = None):
if timestamp > Y2K38_MAX_TIMESTAMP: #Y2K38
timestamp = Y2K38_MAX_TIMESTAMP
if format:
return datetime.fromtimestamp(timestamp).strftime(format)
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
def rmCurrentTimestampToDateAsString(format = None):
timestamp = int(time.time())
if format:
return datetime.fromtimestamp(timestamp).strftime(format)
return datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
def rmTimestampToUtcDateAsString(timestamp, format = None):
if timestamp > Y2K38_MAX_TIMESTAMP: #Y2K38
timestamp = Y2K38_MAX_TIMESTAMP
if format:
return datetime.utcfromtimestamp(timestamp).strftime(format)
return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
def rmTimestampFromDateAsString(dateString, format):
return int(datetime.strptime(dateString, format).strftime("%s"))
# Converts a date string in UTC format to a local timestamp (ex: 2019-05-20T12:00:00Z)
def rmTimestampFromUTCDateAsString(dateString, format):
dt = datetime.strptime(dateString, format)
return int((dt - datetime.utcfromtimestamp(0)).total_seconds())
def rmTimestampFromDateAsStringWithOffset(dateString):
# format in form of 2015-04-24T08:00:00-04:00 converted to UTC timestamp
if dateString is None:
return None
try:
sign = int(dateString[19:20] + '1')
(hour, minute) = [int(s) for s in dateString[20:].split(':')]
offset = sign * (hour * 60 * 60 + minute * 60)
except:
return None
try:
start_time = datetime.strptime(dateString[:19], "%Y-%m-%dT%H:%M:%S")
timestamp = int(calendar.timegm(start_time.timetuple())) - offset
except:
return None
return timestamp
def rmTimestampToYearMonthDay(timestamp):
d = datetime.fromtimestamp(timestamp)
return d.year, d.month, d.day
def rmNowToYearMonthDay():
d = datetime.now()
return d.year, d.month, d.day
def rmNormalizeTimestamp(timestamp):
return int(datetime.fromtimestamp(timestamp).strftime('%s'))
def rmTimestampToDayOfYear(timestamp):
if timestamp is None:
timestamp = rmCurrentDayTimestamp()
d = datetime.fromtimestamp(timestamp).timetuple()
return d.tm_yday
def rmNowDateTime():
return datetime.now()
def rmCurrentTimestamp():
return int(time.time())
def rmCurrentDayTimestamp():
return rmGetStartOfDay(int(time.time()))
def rmCurrentMinuteTimestamp():
timestamp = int(time.time())
return timestamp - (timestamp % 60)
def rmGetStartOfDay(timestamp):
tuple = datetime.fromtimestamp(timestamp).timetuple()
return int(datetime(tuple.tm_year, tuple.tm_mon, tuple.tm_mday).strftime("%s"))
def rmGetStartOfDayUtc(timestamp):
tuple = datetime.utcfromtimestamp(timestamp).timetuple()
dt = datetime(tuple.tm_year, tuple.tm_mon, tuple.tm_mday, tzinfo=utc)
return int((dt-utc_t0).total_seconds())
def rmTimestampIsLeapYear(timestamp):
d = datetime.fromtimestamp(timestamp)
#try:
# datetime(d.year, 2, 29)
# return True
#except ValueError:
# return False
if d.year % 400 == 0:
return True
elif d.year % 100 == 0:
return False
elif d.year % 4 == 0:
return True
return False
def rmConvertDateStringToFormat(dateString, inputFormat, outputFormat):
return datetime.strptime(dateString, inputFormat).strftime(outputFormat)
def rmDayRange(startDayTimestamp, numDays):
d = datetime.fromtimestamp(startDayTimestamp)
if numDays >=0:
dateList = [int(time.mktime( (d + timedelta(days=x)).timetuple() )) for x in range(0, numDays)]
else:
numDays = -numDays
dateList = [int(time.mktime( (d - timedelta(days=x)).timetuple() )) for x in range(0, numDays)]
return dateList
def rmDeltaDayFromTimestamp(startDayTimeStamp, deltaDays):
d = datetime.fromtimestamp(startDayTimeStamp)
if deltaDays < 0:
d = d - timedelta(days=-deltaDays)
else:
d = d + timedelta(days=deltaDays)
return int(time.mktime(d.timetuple()))
def rmGetNumberOfDaysBetweenTimestamps(startTimestamp, endTimestamp):
d1 = datetime.fromtimestamp(startTimestamp)
d2 = datetime.fromtimestamp(endTimestamp)
delta = d2-d1
return delta.days
# Sunrise and sunset for specific location and elevation
def computeSuntransitAndDayLenghtForDayTs(ts, lat, lon, elevation):
ts = rmGetStartOfDayUtc(ts)
n = julianDayFromTimestamp(ts)
J = __computeMeanSolarNoon(n, lon)
M = __computeSolarMeanAnomay(J)
C = __equationOfTheCenter(M)
L = __computeEclipticLongitude(M, C)
Jtr = computeSolarTransit(J, M, L)
delta = __computeSinSunDeclination(L)
w0 = computeHourAngle(lat, delta, elevation)
return Jtr, w0
def rmGetSunsetTimestampForDayTimestamp(ts, lat, lon, elevation):
Jtr, w0 = computeSuntransitAndDayLenghtForDayTs(ts, lat, -lon, elevation)
Jset = Jtr+w0/360
tsJset = julianDayToUTC(Jset)
return tsJset
def rmGetSunriseTimestampForDayTimestamp(ts, lat, lon, elevation):
if lat is None or lon is None:
log.debug("Latitude or longitude is not set. Returning same timestamp")
return ts
Jtr, w0 = computeSuntransitAndDayLenghtForDayTs(ts, lat, -lon, elevation)
Jrise = Jtr-w0/360
tsJrise = julianDayToUTC(Jrise)
return tsJrise
def julianDayFromTimestamp(ts):
ts = rmGetStartOfDayUtc(ts) + 12*3600
JD = float(ts)/86400 + 2440587.5
return JD - 2451545.0 + 0.0008
def julianDayToUTC(JD):
return (JD - 2440587.5)*86400
def __cosa(degree):
radian = degree/180*3.14159265359
return cos(radian)
def __sina(degree):
radian = degree/180*3.14159265359
return sin(radian)
def __acosa(x):
if abs(x) > 1:
return 180. if x< 0 else 0.
radian = acos(x)
return radian/3.14159265359*180.
def __asina(x):
if abs(x) > 1:
return -90. if x< 0 else 90.
radian = asin(x)
return radian/(3.14159265359)*180.
def __computeMeanSolarNoon(jd, wlon):
J = wlon/360 + jd
return J
def __computeSolarMeanAnomay(solarNoon): #degrees
return (357.5291 + 0.98560028*solarNoon)%360
def __equationOfTheCenter(solarMeanAnomaly): # constant from sine
M = solarMeanAnomaly
return 1.9148*__sina(M) + 0.0200*__sina(2*M) + 0.0003*__sina(3*M)
def __computeEclipticLongitude(solarMeanAnomaly, eqCenter): #degrees (it adds a sum a sines)
L = (solarMeanAnomaly + eqCenter + 180 + 102.9372) % 360
return L
def computeSolarTransit(meanSolarNoon, solarMeanAnomaly, eclipticLongitude): #substract sinuses from 12 am
Jtr = 2451545.0 + meanSolarNoon + (0.0053*__sina(solarMeanAnomaly) - 0.0069*__sina(2*eclipticLongitude))
return Jtr
def __computeSinSunDeclination(L):
delta = __sina(L)*__sina(23.439 )
return delta
def computeHourAngle(nlat, sdelta, elevation):
if elevation < 0:
elevation = 0
elevCoef = -2.076*sqrt(elevation)/60
cosw0 = (__sina(-0.83+elevCoef) - __sina(nlat)*sdelta)/ ( sqrt(1-sdelta*sdelta) * __cosa(nlat))
return __acosa(cosw0)
def rmNTPFetch(server = "pool.ntp.org", withRequestDrift = False):
import struct
from socket import socket, AF_INET, SOCK_DGRAM
requestPacket = '\x1b' + 47 * '\0'
startTime = time.time()
try:
sock = socket(AF_INET, SOCK_DGRAM)
sock.settimeout(5)
except Exception, e:
log.error("NTPFetch: Can't create socket")
return None
try:
sock.sendto(requestPacket, (server, 123))
data, ip = sock.recvfrom(1024)
except Exception, e:
#log.error("NTPFetch: Error receiving data: %s" % e)
return None
try:
if data:
timestamp = struct.unpack('!12I', data)[10]
timestamp -= 2208988800L # = date in sec since epoch
# http://stackoverflow.com/questions/1599060/how-can-i-get-an-accurate-utc-time-with-python
if withRequestDrift:
reqTime = time.time() - startTime
timestamp += reqTime / 2
return timestamp
except:
log.error("NTPFetch: Conversion failed.")
return None
def getAlarmElapsedRealTime():
### DEPRECATED: This method was used on Android to get the UP_TIME (replaced by monotonicTime())
elapsedTime = -1
try:
alarmFile = open("/dev/alarm", 'r')
if alarmFile:
t = timespec()
# ANDROID_ALARM_GET_TIME(ANDROID_ALARM_ELAPSED_REALTIME) = 0x40086134
result = fcntl.ioctl(alarmFile.fileno(), 0x40086134, t)
if result == 0:
elapsedTime = t.tv_sec
alarmFile.close()
except Exception, e:
log.error(e)
return elapsedTime
class rmMonotonicTime:
CLOCK_MONOTONIC_RAW = 4 # see <linux/time.h>
def __init__(self, fallback = True):
self.fallback = fallback
self.clock_gettime = None
self.get = None
self.monotonicInit()
def monotonicInit(self):
try:
from RMOSGlue.rmOSPlatform import RMOSPlatform
if RMOSPlatform().AUTODETECTED == RMOSPlatform.ANDROID:
librt = ctypes.CDLL('libc.so', use_errno=True)
log.info("Initialised Android monotonic clock")
elif RMOSPlatform().AUTODETECTED == RMOSPlatform.OPENWRT:
librt = ctypes.CDLL('librt.so.0', use_errno=True)
log.info("Initialised OpenWRT monotonic clock")
else:
librt = ctypes.CDLL('librt.so.1', use_errno=True)
log.info("Initialised generic monotonic clock")
self.clock_gettime = librt.clock_gettime
self.clock_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(timespec)]
self.get = self.monotonicTime
except Exception, e:
self.get = self.monotonicFallback
log.error("Cannot initialise monotonicClock will use fallback time.time() method !")
def monotonicFallback(self, asSeconds = True):
if asSeconds:
return int(time.time())
return time.time()
def monotonicTime(self, asSeconds = True):
t = timespec()
if self.clock_gettime(rmMonotonicTime.CLOCK_MONOTONIC_RAW , ctypes.pointer(t)) != 0:
errno_ = ctypes.get_errno()
if self.fallback:
log.info("Monotonic Clock Error ! Reverting to time.time() fallback")
return self.monotonicFallback(asSeconds)
else:
raise OSError(errno_, os.strerror(errno_))
if asSeconds:
return t.tv_sec
return t.tv_sec + t.tv_nsec * 1e-9
#-----------------------------------------------------------------------------------------------
#
#
#
globalMonotonicTime = rmMonotonicTime()
|
sprinkler/rainmachine-developer-resources
|
sdk-parsers/RMUtilsFramework/rmTimeUtils.py
|
Python
|
gpl-3.0
| 12,413 | 0.01007 |
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'BOM for working process',
'version': '0.1',
'category': '',
'description': """
Add extra information for manage BOM as a work BOM
""",
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'mrp',
],
'init_xml': [],
'demo': [],
'data': [
'security/ir.model.access.csv',
'bom_views.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
|
Micronaet/micronaet-production
|
working_bom/__openerp__.py
|
Python
|
agpl-3.0
| 1,492 | 0.00067 |
"""
WSGI config for opendai_lleida_web project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin_web.settings-production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
open-dai/bcn-lleida-opendai-pilots
|
web-geo-server/admin_web/wsgi.py
|
Python
|
lgpl-3.0
| 1,160 | 0.001724 |
from .data_asset import DataAsset
from .file_data_asset import FileDataAsset
|
great-expectations/great_expectations
|
great_expectations/data_asset/__init__.py
|
Python
|
apache-2.0
| 77 | 0 |
import connect_tests
import string_utils_tests
|
doubleO8/versionone-sdk-spoon
|
versio9/tests/__init__.py
|
Python
|
bsd-3-clause
| 49 | 0 |
#!/usr/bin/env python
# encoding:utf-8
"""
@software: PyCharm
@file: video_db.py
@time: 2016/8/4 16:56
"""
import sqlite3
class Create_DB():
def __init__(self):
self.conn = sqlite3.connect('video.db')
self.cn = self.conn.cursor()
def create_table(self, table):
# 创建表格 table == 创建表命令
self.cn.execute(table)
def insert_db(self):
# 插入数据
pass
def select_db(self):
# 查询数据
pass
if __name__ == '__main__':
pass
|
bjweiqm/Sele
|
school/pachong/video_db.py
|
Python
|
gpl-2.0
| 534 | 0.002 |
"""Tests for 'site'.
Tests assume the initial paths in sys.path once the interpreter has begun
executing have not been removed.
"""
import unittest
from test.test_support import run_unittest, TESTFN, EnvironmentVarGuard
from test.test_support import captured_output
import __builtin__
import os
import sys
import re
import encodings
import subprocess
import sysconfig
from copy import copy
# Need to make sure to not import 'site' if someone specified ``-S`` at the
# command-line. Detect this by just making sure 'site' has not been imported
# already.
if "site" in sys.modules:
import site
else:
raise unittest.SkipTest("importation of site.py suppressed")
if site.ENABLE_USER_SITE and not os.path.isdir(site.USER_SITE):
# need to add user site directory for tests
os.makedirs(site.USER_SITE)
site.addsitedir(site.USER_SITE)
class HelperFunctionsTests(unittest.TestCase):
"""Tests for helper functions.
The setting of the encoding (set using sys.setdefaultencoding) used by
the Unicode implementation is not tested.
"""
def setUp(self):
"""Save a copy of sys.path"""
self.sys_path = sys.path[:]
self.old_base = site.USER_BASE
self.old_site = site.USER_SITE
self.old_prefixes = site.PREFIXES
self.old_vars = copy(sysconfig._CONFIG_VARS)
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
site.PREFIXES = self.old_prefixes
sysconfig._CONFIG_VARS = self.old_vars
def test_makepath(self):
# Test makepath() have an absolute path for its first return value
# and a case-normalized version of the absolute path for its
# second value.
path_parts = ("Beginning", "End")
original_dir = os.path.join(*path_parts)
abs_dir, norm_dir = site.makepath(*path_parts)
self.assertEqual(os.path.abspath(original_dir), abs_dir)
if original_dir == os.path.normcase(original_dir):
self.assertEqual(abs_dir, norm_dir)
else:
self.assertEqual(os.path.normcase(abs_dir), norm_dir)
def test_init_pathinfo(self):
dir_set = site._init_pathinfo()
for entry in [site.makepath(path)[1] for path in sys.path
if path and os.path.isdir(path)]:
self.assertIn(entry, dir_set,
"%s from sys.path not found in set returned "
"by _init_pathinfo(): %s" % (entry, dir_set))
def pth_file_tests(self, pth_file):
"""Contain common code for testing results of reading a .pth file"""
self.assertIn(pth_file.imported, sys.modules,
"%s not in sys.modules" % pth_file.imported)
self.assertIn(site.makepath(pth_file.good_dir_path)[0], sys.path)
self.assertFalse(os.path.exists(pth_file.bad_dir_path))
def test_addpackage(self):
# Make sure addpackage() imports if the line starts with 'import',
# adds directories to sys.path for any line in the file that is not a
# comment or import that is a valid directory name for where the .pth
# file resides; invalid directories are not added
pth_file = PthFile()
pth_file.cleanup(prep=True) # to make sure that nothing is
# pre-existing that shouldn't be
try:
pth_file.create()
site.addpackage(pth_file.base_dir, pth_file.filename, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
def make_pth(self, contents, pth_dir='.', pth_name=TESTFN):
# Create a .pth file and return its (abspath, basename).
pth_dir = os.path.abspath(pth_dir)
pth_basename = pth_name + '.pth'
pth_fn = os.path.join(pth_dir, pth_basename)
pth_file = open(pth_fn, 'w')
self.addCleanup(lambda: os.remove(pth_fn))
pth_file.write(contents)
pth_file.close()
return pth_dir, pth_basename
def test_addpackage_import_bad_syntax(self):
# Issue 10642
pth_dir, pth_fn = self.make_pth("import bad)syntax\n")
with captured_output("stderr") as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegexpMatches(err_out.getvalue(), "line 1")
self.assertRegexpMatches(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: the previous two should be independent checks so that the
# order doesn't matter. The next three could be a single check
# but my regex foo isn't good enough to write it.
self.assertRegexpMatches(err_out.getvalue(), 'Traceback')
self.assertRegexpMatches(err_out.getvalue(), r'import bad\)syntax')
self.assertRegexpMatches(err_out.getvalue(), 'SyntaxError')
def test_addpackage_import_bad_exec(self):
# Issue 10642
pth_dir, pth_fn = self.make_pth("randompath\nimport nosuchmodule\n")
with captured_output("stderr") as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegexpMatches(err_out.getvalue(), "line 2")
self.assertRegexpMatches(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegexpMatches(err_out.getvalue(), 'Traceback')
self.assertRegexpMatches(err_out.getvalue(), 'ImportError')
@unittest.skipIf(sys.platform == "win32", "Windows does not raise an "
"error for file paths containing null characters")
def test_addpackage_import_bad_pth_file(self):
# Issue 5258
pth_dir, pth_fn = self.make_pth("abc\x00def\n")
with captured_output("stderr") as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegexpMatches(err_out.getvalue(), "line 1")
self.assertRegexpMatches(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegexpMatches(err_out.getvalue(), 'Traceback')
self.assertRegexpMatches(err_out.getvalue(), 'TypeError')
def test_addsitedir(self):
# Same tests for test_addpackage since addsitedir() essentially just
# calls addpackage() for every .pth file in the directory
pth_file = PthFile()
pth_file.cleanup(prep=True) # Make sure that nothing is pre-existing
# that is tested for
try:
pth_file.create()
site.addsitedir(pth_file.base_dir, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
@unittest.skipUnless(site.ENABLE_USER_SITE, "requires access to PEP 370 "
"user-site (site.ENABLE_USER_SITE)")
def test_s_option(self):
usersite = site.USER_SITE
self.assertIn(usersite, sys.path)
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 1, "%r is not in sys.path (sys.exit returned %r)"
% (usersite, rc))
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-s', '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONNOUSERSITE"] = "1"
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONUSERBASE"] = "/tmp"
rc = subprocess.call([sys.executable, '-c',
'import sys, site; sys.exit(site.USER_BASE.startswith("/tmp"))'],
env=env)
self.assertEqual(rc, 1)
def test_getuserbase(self):
site.USER_BASE = None
user_base = site.getuserbase()
# the call sets site.USER_BASE
self.assertEqual(site.USER_BASE, user_base)
# let's set PYTHONUSERBASE and see if it uses it
site.USER_BASE = None
import sysconfig
sysconfig._CONFIG_VARS = None
with EnvironmentVarGuard() as environ:
environ['PYTHONUSERBASE'] = 'xoxo'
self.assertTrue(site.getuserbase().startswith('xoxo'),
site.getuserbase())
def test_getusersitepackages(self):
site.USER_SITE = None
site.USER_BASE = None
user_site = site.getusersitepackages()
# the call sets USER_BASE *and* USER_SITE
self.assertEqual(site.USER_SITE, user_site)
self.assertTrue(user_site.startswith(site.USER_BASE), user_site)
def test_getsitepackages(self):
site.PREFIXES = ['xoxo']
dirs = site.getsitepackages()
if sys.platform in ('os2emx', 'riscos'):
self.assertEqual(len(dirs), 1)
wanted = os.path.join('xoxo', 'Lib', 'site-packages')
self.assertEqual(dirs[0], wanted)
elif (sys.platform == "darwin" and
sysconfig.get_config_var("PYTHONFRAMEWORK")):
# OS X framework builds
site.PREFIXES = ['Python.framework']
dirs = site.getsitepackages()
self.assertEqual(len(dirs), 3)
wanted = os.path.join('/Library',
sysconfig.get_config_var("PYTHONFRAMEWORK"),
sys.version[:3],
'site-packages')
self.assertEqual(dirs[2], wanted)
elif os.sep == '/':
# OS X non-framwework builds, Linux, FreeBSD, etc
self.assertEqual(len(dirs), 2)
wanted = os.path.join('xoxo', 'lib', 'python' + sys.version[:3],
'site-packages')
self.assertEqual(dirs[0], wanted)
wanted = os.path.join('xoxo', 'lib', 'site-python')
self.assertEqual(dirs[1], wanted)
else:
# other platforms
self.assertEqual(len(dirs), 2)
self.assertEqual(dirs[0], 'xoxo')
wanted = os.path.join('xoxo', 'lib', 'site-packages')
self.assertEqual(dirs[1], wanted)
class PthFile(object):
"""Helper class for handling testing of .pth files"""
def __init__(self, filename_base=TESTFN, imported="time",
good_dirname="__testdir__", bad_dirname="__bad"):
"""Initialize instance variables"""
self.filename = filename_base + ".pth"
self.base_dir = os.path.abspath('')
self.file_path = os.path.join(self.base_dir, self.filename)
self.imported = imported
self.good_dirname = good_dirname
self.bad_dirname = bad_dirname
self.good_dir_path = os.path.join(self.base_dir, self.good_dirname)
self.bad_dir_path = os.path.join(self.base_dir, self.bad_dirname)
def create(self):
"""Create a .pth file with a comment, blank lines, an ``import
<self.imported>``, a line with self.good_dirname, and a line with
self.bad_dirname.
Creation of the directory for self.good_dir_path (based off of
self.good_dirname) is also performed.
Make sure to call self.cleanup() to undo anything done by this method.
"""
FILE = open(self.file_path, 'w')
try:
print>>FILE, "#import @bad module name"
print>>FILE, "\n"
print>>FILE, "import %s" % self.imported
print>>FILE, self.good_dirname
print>>FILE, self.bad_dirname
finally:
FILE.close()
os.mkdir(self.good_dir_path)
def cleanup(self, prep=False):
"""Make sure that the .pth file is deleted, self.imported is not in
sys.modules, and that both self.good_dirname and self.bad_dirname are
not existing directories."""
if os.path.exists(self.file_path):
os.remove(self.file_path)
if prep:
self.imported_module = sys.modules.get(self.imported)
if self.imported_module:
del sys.modules[self.imported]
else:
if self.imported_module:
sys.modules[self.imported] = self.imported_module
if os.path.exists(self.good_dir_path):
os.rmdir(self.good_dir_path)
if os.path.exists(self.bad_dir_path):
os.rmdir(self.bad_dir_path)
class ImportSideEffectTests(unittest.TestCase):
"""Test side-effects from importing 'site'."""
def setUp(self):
"""Make a copy of sys.path"""
self.sys_path = sys.path[:]
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
def test_abs__file__(self):
# Make sure all imported modules have their __file__ attribute
# as an absolute path.
# Handled by abs__file__()
site.abs__file__()
for module in (sys, os, __builtin__):
try:
self.assertTrue(os.path.isabs(module.__file__), repr(module))
except AttributeError:
continue
# We could try everything in sys.modules; however, when regrtest.py
# runs something like test_frozen before test_site, then we will
# be testing things loaded *after* test_site did path normalization
def test_no_duplicate_paths(self):
# No duplicate paths should exist in sys.path
# Handled by removeduppaths()
site.removeduppaths()
seen_paths = set()
for path in sys.path:
self.assertNotIn(path, seen_paths)
seen_paths.add(path)
@unittest.skip('test not implemented')
def test_add_build_dir(self):
# Test that the build directory's Modules directory is used when it
# should be.
# XXX: implement
pass
def test_setting_quit(self):
# 'quit' and 'exit' should be injected into __builtin__
self.assertTrue(hasattr(__builtin__, "quit"))
self.assertTrue(hasattr(__builtin__, "exit"))
def test_setting_copyright(self):
# 'copyright' and 'credits' should be in __builtin__
self.assertTrue(hasattr(__builtin__, "copyright"))
self.assertTrue(hasattr(__builtin__, "credits"))
def test_setting_help(self):
# 'help' should be set in __builtin__
self.assertTrue(hasattr(__builtin__, "help"))
def test_aliasing_mbcs(self):
if sys.platform == "win32":
import locale
if locale.getdefaultlocale()[1].startswith('cp'):
for value in encodings.aliases.aliases.itervalues():
if value == "mbcs":
break
else:
self.fail("did not alias mbcs")
def test_setdefaultencoding_removed(self):
# Make sure sys.setdefaultencoding is gone
self.assertTrue(not hasattr(sys, "setdefaultencoding"))
def test_sitecustomize_executed(self):
# If sitecustomize is available, it should have been imported.
if "sitecustomize" not in sys.modules:
try:
import sitecustomize
except ImportError:
pass
else:
self.fail("sitecustomize not imported automatically")
def test_main():
run_unittest(HelperFunctionsTests, ImportSideEffectTests)
if __name__ == "__main__":
test_main()
|
j5shi/Thruster
|
pylibs/test/test_site.py
|
Python
|
gpl-2.0
| 15,991 | 0.001688 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import factory.fuzzy
from education_group.ddd.domain._campus import Campus
class CampusFactory(factory.Factory):
class Meta:
model = Campus
abstract = False
name = factory.Sequence(lambda n: 'Campus %02d' % n)
university_name = factory.Sequence(lambda n: 'University %02d' % n)
|
uclouvain/OSIS-Louvain
|
education_group/tests/ddd/factories/domain/campus.py
|
Python
|
agpl-3.0
| 1,589 | 0.00063 |
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
## test with python setup.py develop
setup(
name='ipyreload',
packages=['ipyreload'],
version= 1.2,
description='ipython productivity tools',
long_description=readme(),
url="https://github.com/wolfiex/ipython-dev-reload",
keywords= 'ipython reload'.split(' '),
author='Dan Ellis',
author_email='daniel.ellis.research@gmail.com',
license='MIT',
zip_safe=False)
|
wolfiex/ipython-dev-reload
|
setup.py
|
Python
|
mit
| 529 | 0.009452 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# url_read.py file is part of slpkg.
# Copyright 2014-2021 Dimitris Zlatanidis <d.zlatanidis@gmail.com>
# All rights reserved.
# Slpkg is a user-friendly package manager for Slackware installations
# https://gitlab.com/dslackw/slpkg
# Slpkg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import urllib3
from slpkg.__metadata__ import MetaData as _meta_
class URL:
"""Urls reading class
"""
def __init__(self, link):
self.link = link
self.meta = _meta_
self.red = _meta_.color["RED"]
self.endc = _meta_.color["ENDC"]
if self.meta.http_proxy:
self.http = urllib3.ProxyManager(self.meta.http_proxy)
else:
self.http = urllib3.PoolManager()
def reading(self):
"""Open url and read
"""
try:
f = self.http.request('GET', self.link)
return f.data.decode("utf-8", "ignore")
except urllib3.exceptions.NewConnectionError:
print(f"\n{self.red}Can't read the file '{self.link.split('/')[-1]}'{self.endc}")
return " "
|
dslackw/slpkg
|
slpkg/url_read.py
|
Python
|
gpl-3.0
| 1,692 | 0.000591 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is based on chromium/chromium/master/tools/clang/scripts/update.py.
It is used on Windows platforms to copy the correct msdia*.dll to the
clang folder, as a "gclient hook".
"""
import os
import shutil
import stat
import sys
# Path constants. (All of these should be absolute paths.)
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
LLVM_BUILD_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', 'third_party',
'llvm-build', 'Release+Asserts'))
def GetDiaDll():
"""Get the location of msdia*.dll for the platform."""
# Bump after VC updates.
DIA_DLL = {
'2013': 'msdia120.dll',
'2015': 'msdia140.dll',
'2017': 'msdia140.dll',
'2019': 'msdia140.dll',
}
# Don't let vs_toolchain overwrite our environment.
environ_bak = os.environ
sys.path.append(os.path.join(THIS_DIR, '..', '..', 'build'))
import vs_toolchain
win_sdk_dir = vs_toolchain.SetEnvironmentAndGetSDKDir()
msvs_version = vs_toolchain.GetVisualStudioVersion()
if bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1'))):
dia_path = os.path.join(win_sdk_dir, '..', 'DIA SDK', 'bin', 'amd64')
else:
if 'GYP_MSVS_OVERRIDE_PATH' in os.environ:
vs_path = os.environ['GYP_MSVS_OVERRIDE_PATH']
else:
vs_path = vs_toolchain.DetectVisualStudioPath()
dia_path = os.path.join(vs_path, 'DIA SDK', 'bin', 'amd64')
os.environ = environ_bak
return os.path.join(dia_path, DIA_DLL[msvs_version])
def CopyFile(src, dst):
"""Copy a file from src to dst."""
print("Copying %s to %s" % (str(src), str(dst)))
shutil.copy(src, dst)
def CopyDiaDllTo(target_dir):
# This script always wants to use the 64-bit msdia*.dll.
dia_dll = GetDiaDll()
CopyFile(dia_dll, target_dir)
def main():
CopyDiaDllTo(os.path.join(LLVM_BUILD_DIR, 'bin'))
return 0
if __name__ == '__main__':
sys.exit(main())
|
chinmaygarde/flutter_engine
|
tools/dia_dll.py
|
Python
|
bsd-3-clause
| 2,094 | 0.011939 |
"""
Views to support exchange of authentication credentials.
The following are currently implemented:
1. AccessTokenExchangeView:
3rd party (social-auth) OAuth 2.0 access token -> 1st party (open-edx) OAuth 2.0 access token
2. LoginWithAccessTokenView:
1st party (open-edx) OAuth 2.0 access token -> session cookie
"""
# pylint: disable=abstract-method
import django.contrib.auth as auth
import social.apps.django_app.utils as social_utils
from django.conf import settings
from django.contrib.auth import login
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from edx_oauth2_provider.constants import SCOPE_VALUE_DICT
from oauth2_provider.settings import oauth2_settings
from oauth2_provider.views.base import TokenView as DOTAccessTokenView
from oauthlib.oauth2.rfc6749.tokens import BearerToken
from provider import constants
from provider.oauth2.views import AccessTokenView as DOPAccessTokenView
from rest_framework import permissions
from rest_framework.response import Response
from rest_framework.views import APIView
from openedx.core.djangoapps.auth_exchange.forms import AccessTokenExchangeForm
from openedx.core.djangoapps.oauth_dispatch import adapters
from openedx.core.lib.api.authentication import OAuth2AuthenticationAllowInactiveUser
class AccessTokenExchangeBase(APIView):
"""
View for token exchange from 3rd party OAuth access token to 1st party
OAuth access token.
"""
@method_decorator(csrf_exempt)
@method_decorator(social_utils.strategy("social:complete"))
def dispatch(self, *args, **kwargs):
return super(AccessTokenExchangeBase, self).dispatch(*args, **kwargs)
def get(self, request, _backend): # pylint: disable=arguments-differ
"""
Pass through GET requests without the _backend
"""
return super(AccessTokenExchangeBase, self).get(request)
def post(self, request, _backend): # pylint: disable=arguments-differ
"""
Handle POST requests to get a first-party access token.
"""
form = AccessTokenExchangeForm(request=request, oauth2_adapter=self.oauth2_adapter, data=request.POST) # pylint: disable=no-member
if not form.is_valid():
return self.error_response(form.errors) # pylint: disable=no-member
user = form.cleaned_data["user"]
scope = form.cleaned_data["scope"]
client = form.cleaned_data["client"]
return self.exchange_access_token(request, user, scope, client)
def exchange_access_token(self, request, user, scope, client):
"""
Exchange third party credentials for an edx access token, and return a
serialized access token response.
"""
if constants.SINGLE_ACCESS_TOKEN:
edx_access_token = self.get_access_token(request, user, scope, client) # pylint: disable=no-member
else:
edx_access_token = self.create_access_token(request, user, scope, client)
return self.access_token_response(edx_access_token) # pylint: disable=no-member
class DOPAccessTokenExchangeView(AccessTokenExchangeBase, DOPAccessTokenView):
"""
View for token exchange from 3rd party OAuth access token to 1st party
OAuth access token. Uses django-oauth2-provider (DOP) to manage access
tokens.
"""
oauth2_adapter = adapters.DOPAdapter()
class DOTAccessTokenExchangeView(AccessTokenExchangeBase, DOTAccessTokenView):
"""
View for token exchange from 3rd party OAuth access token to 1st party
OAuth access token. Uses django-oauth-toolkit (DOT) to manage access
tokens.
"""
oauth2_adapter = adapters.DOTAdapter()
def get(self, request, _backend):
return Response(status=400, data={
'error': 'invalid_request',
'error_description': 'Only POST requests allowed.',
})
def get_access_token(self, request, user, scope, client):
"""
TODO: MA-2122: Reusing access tokens is not yet supported for DOT.
Just return a new access token.
"""
return self.create_access_token(request, user, scope, client)
def create_access_token(self, request, user, scope, client):
"""
Create and return a new access token.
"""
_days = 24 * 60 * 60
token_generator = BearerToken(
expires_in=settings.OAUTH_EXPIRE_PUBLIC_CLIENT_DAYS * _days,
request_validator=oauth2_settings.OAUTH2_VALIDATOR_CLASS(),
)
self._populate_create_access_token_request(request, user, scope, client)
return token_generator.create_token(request, refresh_token=True)
def access_token_response(self, token):
"""
Wrap an access token in an appropriate response
"""
return Response(data=token)
def _populate_create_access_token_request(self, request, user, scope, client):
"""
django-oauth-toolkit expects certain non-standard attributes to
be present on the request object. This function modifies the
request object to match these expectations
"""
request.user = user
request.scopes = [SCOPE_VALUE_DICT[scope]]
request.client = client
request.state = None
request.refresh_token = None
request.extra_credentials = None
request.grant_type = client.authorization_grant_type
def error_response(self, form_errors):
"""
Return an error response consisting of the errors in the form
"""
return Response(status=400, data=form_errors)
class LoginWithAccessTokenView(APIView):
"""
View for exchanging an access token for session cookies
"""
authentication_classes = (OAuth2AuthenticationAllowInactiveUser,)
permission_classes = (permissions.IsAuthenticated,)
@staticmethod
def _get_path_of_arbitrary_backend_for_user(user):
"""
Return the path to the first found authentication backend that recognizes the given user.
"""
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = auth.load_backend(backend_path)
if backend.get_user(user.id):
return backend_path
@method_decorator(csrf_exempt)
def post(self, request):
"""
Handler for the POST method to this view.
"""
# The django login method stores the user's id in request.session[SESSION_KEY] and the
# path to the user's authentication backend in request.session[BACKEND_SESSION_KEY].
# The login method assumes the backend path had been previously stored in request.user.backend
# in the 'authenticate' call. However, not all authentication providers do so.
# So we explicitly populate the request.user.backend field here.
if not hasattr(request.user, 'backend'):
request.user.backend = self._get_path_of_arbitrary_backend_for_user(request.user)
login(request, request.user) # login generates and stores the user's cookies in the session
return HttpResponse(status=204) # cookies stored in the session are returned with the response
|
fintech-circle/edx-platform
|
openedx/core/djangoapps/auth_exchange/views.py
|
Python
|
agpl-3.0
| 7,236 | 0.002349 |
import android
class SMSPoolMember:
def __init__(self, query):
self.droid = android.Android()
self.query = str(query).lstrip().rstrip()
def wifiConnected(self):
none = "<unknown ssid>"
return not self.droid.wifiGetConnectionInfo().result["ssid"] == none
def dataConnected(self):
return self.droid.getCellLocation().result["cid"] > -1
def sendResponse(self):
if self.query == "connection":
return "pool:" + str(self.wifiConnected() or self.dataConnected())
else:
return "pool: None"
|
wallarelvo/eneza-server
|
smsserver/poolmember.py
|
Python
|
apache-2.0
| 584 | 0.001712 |
import caffe
import numpy as np
import pdb
class MyLossLayer(caffe.Layer):
"""Layer of Efficient Siamese loss function."""
def setup(self, bottom, top):
self.margin = 10
print '*********************** SETTING UP'
pass
def forward(self, bottom, top):
"""The parameters here have the same meaning as data_layer"""
self.Num = 0
batch = 1
level = 5
dis = 9
SepSize = batch*level
self.dis = []
# for the first
for k in range(dis):
for i in range(SepSize*k,SepSize*(k+1)-batch):
for j in range(SepSize*k + int((i-SepSize*k)/batch+1)*batch,SepSize*(k+1)):
self.dis.append(bottom[0].data[i]-bottom[0].data[j])
self.Num +=1
self.dis = np.asarray(self.dis)
self.loss = np.maximum(0,self.margin-self.dis) # Efficient Siamese forward pass of hinge loss
top[0].data[...] = np.sum(self.loss)/bottom[0].num
def backward(self, top, propagate_down, bottom):
"""The parameters here have the same meaning as data_layer"""
batch=1
index = 0
level = 5
dis = 9
SepSize = batch*level
self.ref= np.zeros(bottom[0].num,dtype=np.float32)
for k in range(dis):
for i in range(SepSize*k,SepSize*(k+1)-batch):
for j in range(SepSize*k + int((i-SepSize*k)/batch+1)*batch,SepSize*(k+1)):
if self.loss[index]>0:
self.ref[i] += -1
self.ref[j] += +1
index +=1
# Efficient Siamese backward pass
bottom[0].diff[...]= np.reshape(self.ref,(bottom[0].num,1))/bottom[0].num
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
top[0].reshape(1)
|
xialeiliu/RankIQA
|
src/MyLossLayer/netloss_tid2013.py
|
Python
|
mit
| 1,910 | 0.013613 |
#!/usr/bin/env python3
import sys
if len(sys.argv) != 2:
raise SystemExit('Incorrect usage. Use: ' + sys.argv[0] + ' <image.img>')
image_filename = sys.argv[1]
content = ''
with open(image_filename, 'rb') as f:
content = bytearray(f.read())
bytes_to_append = 512 - len(content) % 512
for i in range(bytes_to_append):
content.append(0)
with open(image_filename, 'wb') as f:
f.write(content)
print('Successfully aligned to sector')
|
qwercik/brainfuckOS
|
utils/align-to-full-sector.py
|
Python
|
mit
| 467 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.